├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── circle.yml ├── deployments └── k8s-http-statistics.yaml ├── ebpf-http-statistics.c ├── http-statistics.py ├── imgs └── http-statistics.png ├── test-http-statistics.sh └── tools ├── .gitignore ├── README.md ├── circle.yml ├── cmd └── wcloud │ ├── Makefile │ ├── cli.go │ ├── client.go │ └── types.go ├── cover ├── Makefile ├── cover.go └── gather_coverage.sh ├── files-with-type ├── image-tag ├── integration ├── assert.sh ├── config.sh ├── gce.sh ├── run_all.sh └── sanity_check.sh ├── lint ├── publish-site ├── rebuild-image ├── runner ├── Makefile └── runner.go ├── sched ├── scheduler ├── .gitignore ├── README.md ├── app.yaml ├── appengine_config.py ├── cron.yaml ├── main.py └── requirements.txt ├── shell-lint ├── socks ├── Dockerfile ├── Makefile ├── README.md ├── connect.sh └── main.go └── test /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:xenial 2 | MAINTAINER Weaveworks Inc 3 | LABEL works.weave.role=system 4 | 5 | # Install BCC 6 | RUN echo "deb [trusted=yes] http://repo.iovisor.org/apt/xenial xenial-nightly main" | tee /etc/apt/sources.list.d/iovisor.list 7 | RUN apt-get update && apt-get install -y libbcc libbcc-examples python-bcc 8 | 9 | # Add our plugin 10 | ADD ./ebpf-http-statistics.c ./http-statistics.py /usr/bin/ 11 | ENTRYPOINT ["/usr/bin/http-statistics.py"] 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: run clean 2 | 3 | SUDO=$(shell docker info >/dev/null 2>&1 || echo "sudo -E") 4 | EXE=http-statistics 5 | ORGANIZATION=weaveworksplugins 6 | IMAGE=$(ORGANIZATION)/scope-$(EXE) 7 | NAME=$(ORGANIZATION)-scope-$(EXE) 8 | 9 | UPTODATE=.$(EXE).uptodate 10 | 11 | run: $(UPTODATE) 12 | $(SUDO) docker run --rm -it \ 13 | --privileged --net=host --pid=host \ 14 | -v /lib/modules:/lib/modules \ 15 | -v /usr/src:/usr/src \ 16 | -v /sys/kernel/debug/:/sys/kernel/debug/ \ 17 | -v /var/run/scope/plugins:/var/run/scope/plugins \ 18 | --name $(NAME) $(IMAGE) 19 | 20 | $(UPTODATE): Dockerfile http-statistics.py ebpf-http-statistics.c 21 | $(SUDO) docker build -t $(IMAGE) . 22 | touch $@ 23 | 24 | clean: 25 | - rm -rf $(UPTODATE) 26 | - $(SUDO) docker rmi $(IMAGE) 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DEPRECATED: Scope HTTP Statistics Plugin 2 | 3 | The Scope HTTP Statistics plugin provides HTTP traffic statistics to the [Weave Scope](https://github.com/weaveworks/scope) user using [eBPF](http://man7.org/linux/man-pages/man2/bpf.2.html). 4 | The HTTP Statistics plugin does not use any application-level instrumentation, it has a negligible performance toll (metrics are obtained in-kernel without any packet copying to user space). 5 | 6 | **Note:** This plugin needs a [recent kernel version with eBPF support](https://github.com/iovisor/bcc/blob/master/INSTALL.md#kernel-configuration). 7 | It will not compile on current [dlite](https://github.com/nlf/dlite) and boot2docker hosts. 8 | You need `/sys/kernel/debug/` in order to be able to build the eBPF program generated by the plugin. 9 | 10 | Scope HTTP Statistics plugin screenshot 11 | 12 | ## How to Run Scope HTTP Statistics Plugin 13 | 14 | This plugin requires: 15 | 16 | * kernel version [>=4.2](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md) 17 | * Attaching to kprobes requires >=4.1. 18 | * `bpf_probe_read()` requires >=4.1. 19 | * `bpf_get_current_pid_tgid()` requires >=4.2. 20 | * Kernel build directory to be available in `/lib/modules//build`. Depending on your distribution you might need to add this symlink: `ln -s /lib/modules//build /lib/modules//source`. 21 | 22 | The Scope HTTP Statistics plugin can be executed stand alone. 23 | It will respond to `GET /report` request on the `/var/run/scope/plugins/http-statistics/http-statistics.sock` in a JSON format. 24 | If the running plugin has been registered by Scope, you will see it in the list of `PLUGINS` in the bottom right of the UI (see the green rectangle in the above figure). 25 | The measured values are shown in the *STATUS* section (see the circle in the above figure). 26 | 27 | ### Using a pre-built Docker image 28 | 29 | If you want to make sure of running the latest available version of the plugin, you can pull the image from docker hub. 30 | 31 | ```console 32 | docker pull weaveworksplugins/scope-http-statistics:latest 33 | ``` 34 | 35 | To run the Scope HTTP Statistics plugin you just need to run the following command. 36 | 37 | ```console 38 | sudo docker run --rm -it \ 39 | --privileged --net=host --pid=host \ 40 | -v /lib/modules:/lib/modules \ 41 | -v /usr/src:/usr/src \ 42 | -v /sys/kernel/debug/:/sys/kernel/debug/ \ 43 | -v /var/run/scope/plugins:/var/run/scope/plugins \ 44 | --name weaveworksplugins-scope-http-statistics weaveworksplugins/scope-http-statistics 45 | ``` 46 | 47 | ### Kubernetes 48 | 49 | If you want to use the Scope HTTP Statistics plugin in an already set up Kubernetes cluster with Weave Scope running on it, you just need to run: 50 | 51 | ```console 52 | kubectl create -f https://raw.githubusercontent.com/weaveworks-plugins/scope-http-statistics/master/deployments/k8s-http-statistics.yaml 53 | ``` 54 | 55 | ### Recompiling 56 | * Run the HTTP Statistics plugin 57 | * `git clone git@github.com:weaveworks-plugins/scope-http-statistics.git` 58 | * `cd scope-http-statistics; make` 59 | 60 | ## Testing 61 | 62 | * Run an `nginx` instance `docker run --rm --name http-statistics-nginx -p 8080:80 nginx` 63 | * Run `sh test-http-statistics.sh`, press Ctrl+c to terminate the test. 64 | * Go to the Weave Scope UI [http://localhost:4040](http://localhost:4040). 65 | * Open the `http-statistics-nginx` container. 66 | * Click on `nginx: worker process` in the "Processes" table. 67 | 68 | ## Metrics 69 | The HTTP Statistics plugin currently implements the following metrics: 70 | 71 | * HTTP Requests rate (per second) 72 | * HTTP Response Codes rate (per second), for the most common HTTP response codes. 73 | 74 | ### HTTP Requests 75 | The HTTP Requests metric is the number of requests per second received by a process. 76 | Weave Scope UI shows this information as single value and graph which are updated every second. 77 | The single value is the latest value measured, while the graph is the historical representation of recents values. 78 | 79 | ### HTTP Response Codes 80 | Currently the following HTTP Response codes are tracked: 81 | 82 | * 100: Continue 83 | * 200: OK 84 | * 201: Created 85 | * 202: Accepted 86 | * 204: No Content 87 | * 308: Permanent Redirect Redirect 88 | * 400: Bad Request 89 | * 401: Unauthorized 90 | * 403: Forbidden 91 | * 404: Not Found 92 | * 408: Request Timeout 93 | * 451: Unavailable For Legal Reasons 94 | * 500: Internal Server Error 95 | * 501: Not Implemented 96 | * 502: Bad Gateway 97 | * 503: Service Unavailable 98 | * OTHERS: Every other code not specified above 99 | 100 | For each code, the scope UI shows as single value and graph which are updated every second. 101 | The single value is the latest value measured, while the graph is the historical representation of recent values. 102 | **Note**: If a HTTP response code is never sent, the plugin will not report about it in order to avoid displaying plenty of empty graphs. When the traffic stops containing a specific HTTP response code, the corresponding graph will be removed from the Weave Scope UI. 103 | 104 | ## Plugin internals 105 | 106 | Below you see the `Plugins` section of the [reporter interface](https://github.com/weaveworks/scope/tree/master/examples/plugins#reporter-interface). 107 | 108 | ```json 109 | { 110 | "Plugins": [ 111 | { 112 | "id": "http-statistics", 113 | "label": "HTTP Statistics", 114 | "description": "Adds http request metrics to processes", 115 | "interfaces": ["reporter"], 116 | "api_version": "1", 117 | } 118 | ], 119 | ... 120 | } 121 | ``` 122 | 123 | The HTTP Statistics plugin also has a `Processes` section, where the per process metrics are stored, an example of which you'll find below. 124 | 125 | ```json 126 | { 127 | ... 128 | "Process": { 129 | "metric_templates": { 130 | "http_requests_per_second": { 131 | "id": "http_requests_per_second", 132 | "label": "HTTP Req/Second", 133 | "priority": 0.1 134 | }, 135 | "http_200_responses_per_second": { 136 | "id": "http_200_responses_per_second", 137 | "label": "HTTP Resp 200 /Second", 138 | "priority": 0.2 139 | } 140 | }, 141 | "nodes": { 142 | "example.org;29770": { 143 | "metrics": { 144 | "http_requests_per_second": { 145 | "samples": [ 146 | { 147 | "date": "2016-09-21T07:22:24.293175Z", 148 | "value": 1.0 149 | } 150 | ] 151 | } 152 | } 153 | } 154 | } 155 | } 156 | } 157 | ``` 158 | 159 | `Processes` has a single `metric_template` array field containing one or more elements. 160 | These elements describe the various types of metrics contained by each element of the `nodes` field. 161 | Each element in the `nodes` section identifies a process on a specific host. 162 | The element identifier is generated by concatenating the hostname and the PID with `;` as delimiter (e.g. `example.org;29770`). 163 | Each element contains the metrics data provided by the HTTP Statistics plugin (just one metric in the example). 164 | The data may contain many samples (just one in the example). 165 | 166 | ## Getting help 167 | 168 | We love hearing from you and encourage you to join our community. For more 169 | information on how to get help or get in touch, see [Scope's help 170 | section](https://github.com/weaveworks/scope/#help). 171 | -------------------------------------------------------------------------------- /circle.yml: -------------------------------------------------------------------------------- 1 | general: 2 | branches: 3 | ignore: 4 | - gh-pages 5 | 6 | machine: 7 | services: 8 | - docker 9 | environment: 10 | GOPATH: /home/ubuntu 11 | SRCDIR: /home/ubuntu/scope-http-statistics 12 | PATH: $PATH:$HOME/.local/bin 13 | 14 | dependencies: 15 | cache_directories: 16 | - "~/docker" 17 | override: 18 | - echo "no dependencies" 19 | 20 | test: 21 | override: 22 | - cd $SRCDIR && make .http-statistics.uptodate && docker tag weaveworksplugins/scope-http-statistics weaveworksplugins/scope-http-statistics:$(./tools/image-tag): 23 | parallel: false 24 | timeout: 300 25 | 26 | deployment: 27 | hub: 28 | branch: master 29 | commands: 30 | - | 31 | test -z "${DOCKER_USER}" || ( 32 | docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS && 33 | (test "${DOCKER_ORGANIZATION:-$DOCKER_USER}" == "weaveworksplugins" || ( 34 | docker tag weaveworksplugins/scope-http-statistics:latest ${DOCKER_ORGANIZATION:-$DOCKER_USER}/scope-http-statistics:latest && 35 | docker tag weaveworksplugins/scope-http-statistics:$(./tools/image-tag) ${DOCKER_ORGANIZATION:-$DOCKER_USER}/scope-http-statistics:$(./tools/image-tag) 36 | )) && 37 | docker push ${DOCKER_ORGANIZATION:-$DOCKER_USER}/scope-http-statistics && 38 | docker push ${DOCKER_ORGANIZATION:-$DOCKER_USER}/scope-http-statistics:$(./tools/image-tag) 39 | ) 40 | hub-dev: 41 | branch: /^((?!master).)*$/ # not the master branch 42 | commands: 43 | - > 44 | test -z "${DEPLOY_BRANCH}" || test -z "${DOCKER_USER}" || ( 45 | docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS && 46 | docker tag weaveworksplugins/scope-http-statistics:latest ${DOCKER_ORGANIZATION:-$DOCKER_USER}/scope-http-statistics:${CIRCLE_BRANCH//\//-} && 47 | docker push ${DOCKER_ORGANIZATION:-$DOCKER_USER}/scope-http-statistics:${CIRCLE_BRANCH//\//-} 48 | ) 49 | -------------------------------------------------------------------------------- /deployments/k8s-http-statistics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app: weavescope 6 | weavescope-component: weavescope-http-statistics-plugin 7 | name: weavescope-http-statistics-plugin 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | app: weavescope 13 | weavescope-component: weavescope-http-statistics-plugin 14 | spec: 15 | hostPID: true 16 | hostNetwork: true 17 | containers: 18 | - name: weavescope-http-statistics-plugin 19 | image: weaveworksplugins/scope-http-statistics:latest 20 | securityContext: 21 | privileged: true 22 | volumeMounts: 23 | - name: scope-plugins 24 | mountPath: /var/run/scope/plugins 25 | - name: lib-modules 26 | mountPath: /lib/modules 27 | - name: usr-src 28 | mountPath: /usr/src 29 | - name: sys-kernel-debug 30 | mountPath: /sys/kernel/debug 31 | volumes: 32 | - name: scope-plugins 33 | hostPath: 34 | path: /var/run/scope/plugins 35 | - name: lib-modules 36 | hostPath: 37 | path: /lib/modules 38 | - name: usr-src 39 | hostPath: 40 | path: /usr/src 41 | - name: sys-kernel-debug 42 | hostPath: 43 | path: /sys/kernel/debug 44 | -------------------------------------------------------------------------------- /ebpf-http-statistics.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | /* Request tracking */ 5 | 6 | /* http://stackoverflow.com/questions/25047905/http-request-minimum-size-in-bytes 7 | * minimum length of http request is always greater than 7 bytes. 8 | */ 9 | #define HTTP_REQUEST_MIN_LEN 7 10 | 11 | /* Table from (Task group id|Task id) to (Number of received http requests). 12 | * We need to gather requests per task and not only per task group (i.e. userspace pid) 13 | * so that entries can be cleared up independently when a task exits. 14 | * This implies that userspace needs to do the per-process aggregation. 15 | */ 16 | BPF_HASH(received_http_requests, u64, u64); 17 | 18 | /* skb_copy_datagram_iter() (Kernels >= 3.19) is in charge of copying socket 19 | * buffers from kernel to userspace. 20 | * 21 | * skb_copy_datagram_iter() has an associated tracepoint 22 | * (trace_skb_copy_datagram_iovec), which would be more stable than a kprobe but 23 | * it lacks the offset argument. 24 | */ 25 | int kprobe__skb_copy_datagram_iter(struct pt_regs *ctx, const struct sk_buff *skb, int offset, void *unused_iovec, int len) 26 | { 27 | /* Inspect the beginning of socket buffers copied to user-space to determine if they correspond to http requests. 28 | * 29 | * Caveats: 30 | * 31 | * Requests may not appear at the beginning of a packet due to: 32 | * - Persistent connections. 33 | * - Packet fragmentation. 34 | * 35 | * We could inspect the full packet but: 36 | * - It's very inefficient. 37 | * - Examining the non-linear (paginated) area of a socket buffer would be 38 | * really tricky from ebpf. 39 | */ 40 | 41 | /* Verify it's a TCP socket 42 | * TODO: is it worth caching it in a socket table? 43 | */ 44 | struct sock *sk = skb->sk; 45 | unsigned short skc_family = sk->__sk_common.skc_family; 46 | switch (skc_family) { 47 | case PF_INET: 48 | case PF_INET6: 49 | case PF_UNIX: 50 | break; 51 | default: 52 | return 0; 53 | } 54 | /* The socket type and protocol are not directly addressable since they are 55 | * bitfields. We access them by assuming sk_write_queue is immediately before 56 | * them (admittedly pretty hacky). 57 | */ 58 | unsigned int flags = 0; 59 | size_t flags_offset = offsetof(typeof(struct sock), sk_write_queue) + sizeof(sk->sk_write_queue); 60 | bpf_probe_read(&flags, sizeof(flags), ((u8*)sk) + flags_offset); 61 | u16 sk_type = flags >> 16; 62 | if (sk_type != SOCK_STREAM) { 63 | return 0; 64 | } 65 | u8 sk_protocol = flags >> 8 & 0xFF; 66 | /* The protocol is unset (IPPROTO_IP) in Unix sockets. */ 67 | if ((sk_protocol != IPPROTO_TCP) && ((skc_family == PF_UNIX) && (sk_protocol != IPPROTO_IP))) { 68 | return 0; 69 | } 70 | 71 | /* Inline implementation of skb_headlen(). */ 72 | unsigned int head_len = skb->len - skb->data_len; 73 | unsigned int available_data = head_len - offset; 74 | if (available_data < HTTP_REQUEST_MIN_LEN) { 75 | return 0; 76 | } 77 | 78 | /* Check if buffer begins with a method name followed by a space. 79 | * 80 | * To avoid false positives it would be good to do a deeper inspection 81 | * (i.e. fully ensure a 'Method SP Request-URI SP HTTP-Version CRLF' 82 | * structure) but loops are not allowed in ebpf, making variable-size-data 83 | * parsers infeasible. 84 | */ 85 | u8 data[8] = {}; 86 | if (available_data > HTTP_REQUEST_MIN_LEN) { 87 | /* We have confirmed having access to 7 bytes, but need 8 bytes to check the 88 | * space after OPTIONS. bpf_probe_read() requires its second argument to be 89 | * an immediate, so we obtain the data in this unsexy way. 90 | */ 91 | bpf_probe_read(&data, 8, skb->data + offset); 92 | } else { 93 | bpf_probe_read(&data, 7, skb->data + offset); 94 | } 95 | 96 | switch (data[0]) { 97 | /* DELETE */ 98 | case 'D': 99 | if ((data[1] != 'E') || (data[2] != 'L') || (data[3] != 'E') || (data[4] != 'T') || (data[5] != 'E') || (data[6] != ' ')) { 100 | return 0; 101 | } 102 | break; 103 | 104 | /* GET */ 105 | case 'G': 106 | if ((data[1] != 'E') || (data[2] != 'T') || (data[3] != ' ')) { 107 | return 0; 108 | } 109 | break; 110 | 111 | /* HEAD */ 112 | case 'H': 113 | if ((data[1] != 'E') || (data[2] != 'A') || (data[3] != 'D') || (data[4] != ' ')) { 114 | return 0; 115 | } 116 | break; 117 | 118 | /* OPTIONS */ 119 | case 'O': 120 | if (available_data < 8 || (data[1] != 'P') || (data[2] != 'T') || (data[3] != 'I') || (data[4] != 'O') || (data[5] != 'N') || (data[6] != 'S') || (data[7] != ' ')) { 121 | return 0; 122 | } 123 | break; 124 | 125 | /* PATCH/POST/PUT */ 126 | case 'P': 127 | switch (data[1]) { 128 | case 'A': 129 | if ((data[2] != 'T') || (data[3] != 'C') || (data[4] != 'H') || (data[5] != ' ')) { 130 | return 0; 131 | } 132 | break; 133 | case 'O': 134 | if ((data[2] != 'S') || (data[3] != 'T') || (data[4] != ' ')) { 135 | return 0; 136 | } 137 | break; 138 | case 'U': 139 | if ((data[2] != 'T') || (data[3] != ' ')) { 140 | return 0; 141 | } 142 | break; 143 | } 144 | break; 145 | 146 | default: 147 | return 0; 148 | } 149 | 150 | /* Finally, bump the request counter for current task. */ 151 | u64 pid_tgid = bpf_get_current_pid_tgid(); 152 | received_http_requests.increment(pid_tgid); 153 | 154 | return 0; 155 | } 156 | 157 | /* Responses tracking. */ 158 | enum http_codes { 159 | _100 = 0, /* Continue */ 160 | 161 | _200, /* OK */ 162 | _201, /* Created */ 163 | _202, /* Accepted */ 164 | _204, /* No Content */ 165 | 166 | _308, /* Permanent Redirect Redirect */ 167 | 168 | _400, /* Bad Request */ 169 | _401, /* Unauthorized */ 170 | _403, /* Forbidden */ 171 | _404, /* Not Found */ 172 | _408, /* Request Timeout */ 173 | _451, /* Unavailable For Legal Reasons */ 174 | 175 | _500, /* Internal Server Error */ 176 | _501, /* Not Implemented */ 177 | _502, /* Bad Gateway */ 178 | _503, /* Service Unavailable */ 179 | 180 | HTTP_CODE_OTHER, /* Catch all */ 181 | LAST_HTTP_CODE, 182 | }; 183 | 184 | struct http_response_codes_t { 185 | u32 codes[LAST_HTTP_CODE]; 186 | }; 187 | 188 | /* HTTP responses look like "HTTP/1.1 XXX". We only need to read the first 12 characters */ 189 | #define HTTP_CODE_MSG_LEN 12 190 | 191 | /* Keep copy_from_iter context between kprobe and the kretprobe. */ 192 | struct copy_from_iter_args_t { 193 | void *data; 194 | size_t bytes; 195 | }; 196 | 197 | /* Hash map from (Task group id|Task id) to (Number of sent http responses' codes). 198 | * We need to gather responses per task and not only per task group (i.e. userspace pid) 199 | * so that entries can be cleared up independently when a task exits. 200 | * This implies that userspace needs to do the per-process aggregation. 201 | */ 202 | BPF_HASH(sent_http_responses, u64, struct http_response_codes_t); 203 | 204 | /* Hash map from (Task group id|Task id) to (Pointer to data to send). 205 | * We need to save the pointer to where the data are written by copy_from_iter() 206 | */ 207 | BPF_HASH(copy_from_iter_args_table, u64, struct copy_from_iter_args_t); 208 | 209 | /* Hash map from (Task group id|Task id) to (Flag if copy is pending). 210 | * We need to save the data to the copy_from_iter_args_table hash map only when copy_from_iter() is called 211 | * by tcp_sendmsg() and only the first time is called. 212 | * We only check if an element is present in the hash map, the value is ignored. 213 | */ 214 | BPF_HASH(tcp_sendmsg_copy_pending, u64, int); 215 | 216 | /* Parse HTTP code from string to int */ 217 | static int http_code_atoi(char hundreds, char tens, char units) 218 | { 219 | if (hundreds < '0' || hundreds > '9') { 220 | return -1; 221 | } else { 222 | hundreds -= '0'; 223 | } 224 | if (tens < '0' || tens > '9') { 225 | return -1; 226 | } else { 227 | tens -= '0'; 228 | } 229 | if (units < '0' || units > '9') { 230 | return -1; 231 | } else { 232 | units -= '0'; 233 | } 234 | 235 | return (hundreds * 100 + tens * 10 + units); 236 | } 237 | 238 | /* Update HTTP codes in the BPF hash table. */ 239 | static int update_sent_http_responses_codes(u64 pid_tgid, int http_code) 240 | { 241 | struct http_response_codes_t new_codes_counts = {0, }; 242 | 243 | struct http_response_codes_t *current_codes_counts = sent_http_responses.lookup_or_init(&pid_tgid, &new_codes_counts); 244 | new_codes_counts = *current_codes_counts; 245 | 246 | switch (http_code) { 247 | case 100: 248 | new_codes_counts.codes[_100]++; 249 | break; 250 | 251 | case 200: 252 | new_codes_counts.codes[_200]++; 253 | break; 254 | case 201: 255 | new_codes_counts.codes[_201]++; 256 | break; 257 | case 202: 258 | new_codes_counts.codes[_202]++; 259 | break; 260 | case 204: 261 | new_codes_counts.codes[_204]++; 262 | break; 263 | 264 | case 308: 265 | new_codes_counts.codes[_308]++; 266 | break; 267 | 268 | case 400: 269 | new_codes_counts.codes[_400]++; 270 | break; 271 | case 401: 272 | new_codes_counts.codes[_401]++; 273 | break; 274 | case 403: 275 | new_codes_counts.codes[_403]++; 276 | break; 277 | case 404: 278 | new_codes_counts.codes[_404]++; 279 | break; 280 | case 408: 281 | new_codes_counts.codes[_408]++; 282 | break; 283 | case 451: 284 | new_codes_counts.codes[_451]++; 285 | break; 286 | 287 | case 500: 288 | new_codes_counts.codes[_500]++; 289 | break; 290 | case 501: 291 | new_codes_counts.codes[_501]++; 292 | break; 293 | case 502: 294 | new_codes_counts.codes[_502]++; 295 | break; 296 | case 503: 297 | new_codes_counts.codes[_503]++; 298 | break; 299 | 300 | default: 301 | /* HTTP response code not tracked, use the catch all */ 302 | new_codes_counts.codes[HTTP_CODE_OTHER]++; 303 | } 304 | sent_http_responses.update(&pid_tgid, &new_codes_counts); 305 | return 0; 306 | } 307 | 308 | /* When tcp_sendmsg is invoked, we use tcp_sendmsg_copy_pending to signal that 309 | * {kprobe,kretprobe}__copy_from_iter will have to analyze the copied data 310 | */ 311 | int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk, struct msghdr *msg, size_t size) 312 | { 313 | struct sock *skp = 0; 314 | bpf_probe_read(&skp, sizeof(struct sock *), &sk); 315 | unsigned short skc_family = 0; 316 | bpf_probe_read(&skc_family, sizeof(unsigned short), &skp->__sk_common.skc_family); 317 | 318 | /* We ensure to track only inet sockets */ 319 | if (skc_family != PF_INET && skc_family != PF_INET6) 320 | return 0; 321 | 322 | u64 pid_tgid = bpf_get_current_pid_tgid(); 323 | int val = 0; 324 | tcp_sendmsg_copy_pending.update(&pid_tgid, &val); 325 | return 0; 326 | } 327 | 328 | /* Cleanup when the tcp_sendmsg() returns. */ 329 | int kretprobe__tcp_sendmsg(struct pt_regs *ctx) 330 | { 331 | u64 pid_tgid = bpf_get_current_pid_tgid(); 332 | /* Extra safety delete in case that copy_from_iter() is not called (e.g. iovec is empty) */ 333 | tcp_sendmsg_copy_pending.delete(&pid_tgid); 334 | return 0; 335 | } 336 | 337 | /* copy_from_iter() is called within tcp_sendmsg() but it could also be called from elsewhere, 338 | * so we check whether we are from withing tcp_sendmsg. 339 | * 340 | * Look to http://lxr.free-electrons.com/source/include/net/sock.h#L1771 for more functions to track. 341 | * We cannot hook skb_do_copy_data_nocache() because is inline, so we need to hook the functions called by it. 342 | * We track only copy_from_iter(), this seems sufficient in the container context because network cards are virtual. 343 | */ 344 | int kprobe__copy_from_iter(struct pt_regs *ctx, void *addr, size_t bytes, struct iov_iter *i) 345 | { 346 | u64 pid_tgid = bpf_get_current_pid_tgid(); 347 | int *copy_pending = (int *)tcp_sendmsg_copy_pending.lookup(&pid_tgid); 348 | 349 | /* Check if we have some pending copy. 350 | * copy_from_iter() may be called by functions other than tcp_sendmsg(), but we care only for data from it. 351 | */ 352 | if (copy_pending == NULL) { 353 | return 0; 354 | } 355 | 356 | /* We are in the tcp_sendmsg function, save the buffer pointer 357 | * No risk of overwriting because of copy_pending != NULL 358 | */ 359 | struct copy_from_iter_args_t cfia = {0,}; 360 | bpf_probe_read(&cfia.data, sizeof(void *), &addr); 361 | bpf_probe_read(&cfia.bytes, sizeof(size_t), &bytes); 362 | copy_from_iter_args_table.update(&pid_tgid, &cfia); 363 | return 0; 364 | } 365 | 366 | int kretprobe__copy_from_iter(struct pt_regs *ctx) 367 | { 368 | u64 pid_tgid = bpf_get_current_pid_tgid(); 369 | struct copy_from_iter_args_t *cfia_p = (struct copy_from_iter_args_t *)copy_from_iter_args_table.lookup(&pid_tgid); 370 | if (cfia_p == NULL) { 371 | return 0; 372 | } 373 | 374 | /* Remove the hash table entry before processing the pending copy */ 375 | tcp_sendmsg_copy_pending.delete(&pid_tgid); 376 | /* Remove the hash table entry before reading the buffer */ 377 | copy_from_iter_args_table.delete(&pid_tgid); 378 | 379 | struct copy_from_iter_args_t cfia = {0,}; 380 | bpf_probe_read(&cfia, sizeof(struct copy_from_iter_args_t), cfia_p); 381 | 382 | 383 | unsigned char data[HTTP_CODE_MSG_LEN] = {0,}; 384 | bpf_probe_read(&data, HTTP_CODE_MSG_LEN, cfia.data); 385 | 386 | /* eBPF does not have strncmp() yet, see https://github.com/iovisor/bcc/issues/691 387 | * Compare the buffer to "HTTP/1.1". 388 | */ 389 | if (data[0] != 'H' || data[1] != 'T' || data[2] != 'T' || data[3] != 'P' || 390 | data[4] != '/' || data[5] != '1' || data[6] != '.' || data[7] != '1' || data[8] != ' ') { 391 | return 0; 392 | } 393 | 394 | int http_code = http_code_atoi(data[9], data[10], data[11]); 395 | update_sent_http_responses_codes(pid_tgid, http_code); 396 | 397 | return 0; 398 | } 399 | 400 | /* Clear out response count entries of tasks on exit */ 401 | int kprobe__do_exit(struct pt_regs *ctx) { 402 | u64 pid_tgid = bpf_get_current_pid_tgid(); 403 | received_http_requests.delete(&pid_tgid); 404 | sent_http_responses.delete(&pid_tgid); 405 | return 0; 406 | } 407 | -------------------------------------------------------------------------------- /http-statistics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import bcc 3 | 4 | import time 5 | import collections 6 | import datetime 7 | import os 8 | import signal 9 | import errno 10 | import json 11 | try: 12 | from urllib.parse import urlparse 13 | except ImportError: 14 | from urlparse import urlparse 15 | import threading 16 | import socket 17 | import BaseHTTPServer 18 | import SocketServer 19 | import string 20 | import shutil 21 | 22 | EBPF_PROGRAM = "ebpf-http-statistics.c" 23 | EBPF_REQUEST_RATE_TABLE_NAME = "received_http_requests" 24 | EBPF_RESPONSE_CODE_TABLE_NAME = "sent_http_responses" 25 | PLUGIN_ID="http-statistics" 26 | PLUGIN_UNIX_SOCK = "/var/run/scope/plugins/" + PLUGIN_ID + "/" + PLUGIN_ID + ".sock" 27 | 28 | # Keep in sync with ebpf-http-statistics.c enum http_codes 29 | idx_to_http_code = { 30 | "0": "100", 31 | 32 | "1": "200", 33 | "2": "201", 34 | "3": "102", 35 | "4": "204", 36 | 37 | "5": "308", 38 | 39 | "6": "400", 40 | "7": "401", 41 | "8": "403", 42 | "9": "404", 43 | "10": "408", 44 | "11": "451", 45 | 46 | "12": "500", 47 | "13": "501", 48 | "14": "502", 49 | "15": "501", 50 | 51 | "16": "OTHERS", 52 | } 53 | 54 | class KernelInspector(threading.Thread): 55 | def __init__(self): 56 | super(KernelInspector, self).__init__() 57 | self.bpf = bcc.BPF(EBPF_PROGRAM) 58 | self.http_request_rate_per_pid = dict() 59 | self.http_resp_code_rate_per_pid = dict() 60 | self.lock = threading.Lock() 61 | 62 | def update_http_request_rate_per_pid(self, last_req_count_snapshot): 63 | # Aggregate the kernel's per-task http request counts into userland's 64 | # per-process counts 65 | req_count_table = self.bpf.get_table(EBPF_REQUEST_RATE_TABLE_NAME) 66 | new_req_count_snapshot = collections.defaultdict(int) 67 | for pid_tgid, req_count in req_count_table.iteritems(): 68 | # Note that the kernel's tgid maps into userland's pid 69 | # (not to be confused by the kernel's pid, which is 70 | # the unique identifier of a kernel task) 71 | pid = pid_tgid.value >> 32 72 | new_req_count_snapshot[pid] += req_count.value 73 | 74 | # Compute request rate 75 | new_http_request_rate_per_pid = dict() 76 | for pid, req_count in new_req_count_snapshot.iteritems(): 77 | request_delta = req_count 78 | if pid in last_req_count_snapshot: 79 | request_delta -= last_req_count_snapshot[pid] 80 | new_http_request_rate_per_pid[pid] = request_delta 81 | 82 | self.lock.acquire() 83 | self.http_request_rate_per_pid = new_http_request_rate_per_pid 84 | self.lock.release() 85 | 86 | return new_req_count_snapshot 87 | 88 | def update_http_resp_per_pid(self, last_resp_count_snapshot): 89 | # Aggregate the kernel's per-task http response code counts into userland's 90 | # per-process counts 91 | resp_count_table = self.bpf.get_table(EBPF_RESPONSE_CODE_TABLE_NAME) 92 | new_resp_count_snapshot = collections.defaultdict(dict) 93 | 94 | for pid_tgid, codes_counts in resp_count_table.iteritems(): 95 | # Note that the kernel's tgid maps into userland's pid 96 | # (not to be confused by the kernel's pid, which is 97 | # the unique identifier of a kernel task) 98 | pid = pid_tgid.value >> 32 99 | new_resp_count_snapshot[pid] = collections.defaultdict(int) 100 | 101 | for code in range(len(codes_counts.codes)): 102 | code_count = codes_counts.codes[code] 103 | if code not in new_resp_count_snapshot[pid]: 104 | new_resp_count_snapshot[pid][code] = 0 105 | new_resp_count_snapshot[pid][code] += code_count 106 | 107 | # Compute response codes rate 108 | new_http_resp_code_rate_per_pid = dict() 109 | for pid, resp_codes in new_resp_count_snapshot.iteritems(): 110 | if pid not in new_http_resp_code_rate_per_pid: 111 | new_http_resp_code_rate_per_pid[pid] = collections.defaultdict(dict) 112 | for code in resp_codes: 113 | resp_code_delta = resp_codes[code] 114 | if pid in last_resp_count_snapshot: 115 | resp_code_delta -= last_resp_count_snapshot[pid][code] 116 | new_http_resp_code_rate_per_pid[pid][code] = resp_code_delta 117 | 118 | self.lock.acquire() 119 | self.http_resp_code_rate_per_pid = new_http_resp_code_rate_per_pid 120 | self.lock.release() 121 | 122 | return new_resp_count_snapshot 123 | 124 | def on_http_request_rate_per_pid(self, f): 125 | self.lock.acquire() 126 | r = f(self.http_request_rate_per_pid) 127 | self.lock.acquire() 128 | return r 129 | 130 | def on_http_resp_per_pid(self, f): 131 | self.lock.acquire() 132 | r = f(self.http_resp_code_rate_per_pid) 133 | self.lock.release() 134 | return r 135 | 136 | def on_http_stats(self, f): 137 | self.lock.acquire() 138 | r = f(self.http_request_rate_per_pid, self.http_resp_code_rate_per_pid) 139 | self.lock.release() 140 | return r 141 | 142 | def run(self): 143 | # Compute request rates based on the requests counts from the last 144 | # second. It would be simpler to clear the table, wait one second but 145 | # clear() is expensive (each entry is individually cleared with a system 146 | # call) and less robust (it contends with the increments done by the 147 | # kernel probe). 148 | req_count_snapshot = collections.defaultdict(int) 149 | resp_count_snapshot = collections.defaultdict(dict) 150 | while True: 151 | time.sleep(1) 152 | req_count_snapshot = self.update_http_request_rate_per_pid(req_count_snapshot) 153 | resp_count_snapshot = self.update_http_resp_per_pid(resp_count_snapshot) 154 | 155 | 156 | class PluginRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): 157 | protocol_version = 'HTTP/1.1' 158 | 159 | def __init__(self, *args, **kwargs): 160 | self.request_log = '' 161 | BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs) 162 | 163 | def do_GET(self): 164 | self.log_extra = '' 165 | path = urlparse(self.path)[2].lower() 166 | if path == '/report': 167 | self.do_report() 168 | else: 169 | self.send_response(404) 170 | self.send_header('Content-length', 0) 171 | self.end_headers() 172 | 173 | def get_process_nodes(self, http_request_rate_per_pid, http_resp_code_rate_per_pid): 174 | # Get current timestamp in RFC3339 175 | date = datetime.datetime.utcnow() 176 | date = date.isoformat('T') + 'Z' 177 | process_nodes = collections.defaultdict(dict) 178 | for pid, http_request_rate in http_request_rate_per_pid.iteritems(): 179 | node_key = "%s;%d" % (self.server.hostname, pid) 180 | if node_key not in process_nodes: 181 | process_nodes[node_key] = collections.defaultdict(dict) 182 | process_nodes[node_key]['metrics'] = collections.defaultdict(dict) 183 | 184 | process_nodes[node_key]['metrics']['http_requests_per_second'] = { 185 | 'samples': [{ 186 | 'date': date, 187 | 'value': float(http_request_rate), 188 | }] 189 | } 190 | response_code_key_list = list() 191 | for pid, http_responses_code_rate in http_resp_code_rate_per_pid.iteritems(): 192 | node_key = "%s;%d" % (self.server.hostname, pid) 193 | for code, rate in http_responses_code_rate.iteritems(): 194 | if rate == 0: 195 | continue 196 | if node_key not in process_nodes: 197 | process_nodes[node_key] = collections.defaultdict(dict) 198 | process_nodes[node_key]['metrics'] = collections.defaultdict(dict) 199 | 200 | response_code_key = 'http_' + idx_to_http_code[str(code)] + '_responses_per_second' 201 | response_code_key_list.append(response_code_key) 202 | process_nodes[node_key]['metrics'][response_code_key] = { 203 | 'samples': [{ 204 | 'date': date, 205 | 'value': float(rate), 206 | }] 207 | } 208 | 209 | return process_nodes, response_code_key_list 210 | 211 | def do_report(self): 212 | kernel_inspector = self.server.kernel_inspector 213 | process_nodes, response_code_key_list = kernel_inspector.on_http_stats(self.get_process_nodes) 214 | metric_templates = collections.defaultdict(dict) 215 | priority = 0.1 216 | metric_templates['http_requests_per_second'] = { 217 | 'id': 'http_requests_per_second', 218 | 'label': 'HTTP Req/Second', 219 | 'priority': priority, 220 | } 221 | for response_code_key in response_code_key_list: 222 | http_code = string.split(response_code_key, '_')[1] 223 | http_code_priority = http_code 224 | if http_code == "OTHERS": 225 | http_code_priority = "1000" 226 | metric_templates[response_code_key] = { 227 | 'id': response_code_key, 228 | 'label': 'HTTP Resp ' + http_code + '/Second', 229 | 'priority': (float(http_code_priority) / 1000), 230 | } 231 | report = { 232 | 'Process': { 233 | 'nodes': process_nodes, 234 | 'metric_templates': metric_templates, 235 | }, 236 | 'Plugins': [ 237 | { 238 | 'id': PLUGIN_ID, 239 | 'label': 'HTTP Statistics', 240 | 'description': 'Adds http request metrics to processes', 241 | 'interfaces': ['reporter'], 242 | 'api_version': '1', 243 | } 244 | ] 245 | } 246 | body = json.dumps(report) 247 | self.request_log = "resp_size=%d, resp_entry_count=%d" % (len(body), len(process_nodes)) 248 | self.send_response(200) 249 | self.send_header('Content-type', 'application/json') 250 | self.send_header('Content-length', len(body)) 251 | self.end_headers() 252 | self.wfile.write(body) 253 | 254 | def log_request(self, code='-', size='-'): 255 | request_log = '' 256 | if self.request_log: 257 | request_log = ' (%s)' % self.request_log 258 | self.log_message('"%s" %s %s%s', 259 | self.requestline, str(code), str(size), request_log) 260 | 261 | 262 | class PluginServer(SocketServer.ThreadingUnixStreamServer): 263 | daemon_threads = True 264 | 265 | def __init__(self, socket_file, kernel_inspector): 266 | self.socket_file = socket_file 267 | self.delete_plugin_directory() 268 | mkdir_p(os.path.dirname(socket_file)) 269 | self.kernel_inspector = kernel_inspector 270 | self.hostname = socket.gethostname() 271 | SocketServer.UnixStreamServer.__init__(self, socket_file, PluginRequestHandler) 272 | 273 | def finish_request(self, request, _): 274 | # Make the logger happy by providing a phony client_address 275 | self.RequestHandlerClass(request, '-', self) 276 | 277 | def delete_plugin_directory(self): 278 | if os.path.exists(os.path.dirname(self.socket_file)): 279 | shutil.rmtree(os.path.dirname(self.socket_file), ignore_errors=True) 280 | 281 | 282 | def mkdir_p(path): 283 | try: 284 | # we set the permissions to 0700, because only owner and root should be able to access to the plugin directory 285 | os.makedirs(path, mode=0o700) 286 | except OSError as exc: 287 | if exc.errno == errno.EEXIST and os.path.isdir(path): 288 | pass 289 | else: 290 | raise 291 | 292 | 293 | if __name__ == '__main__': 294 | kernel_inspector = KernelInspector() 295 | kernel_inspector.setDaemon(True) 296 | kernel_inspector.start() 297 | plugin_server = PluginServer(PLUGIN_UNIX_SOCK, kernel_inspector) 298 | 299 | def sig_handler(b, a): 300 | plugin_server.delete_plugin_directory() 301 | exit(0) 302 | signal.signal(signal.SIGTERM, sig_handler) 303 | signal.signal(signal.SIGINT, sig_handler) 304 | try: 305 | plugin_server.serve_forever() 306 | except: 307 | plugin_server.delete_plugin_directory() 308 | raise 309 | -------------------------------------------------------------------------------- /imgs/http-statistics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weaveworks-plugins/scope-http-statistics/e9470d5b570432bca5dc80b7b71bb18e6936cc7e/imgs/http-statistics.png -------------------------------------------------------------------------------- /test-http-statistics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | TARGET_IP="127.0.0.1" 4 | TARGET_PORT="8080" 5 | 6 | # This ensures that there is always at least one connection visible in the httpd view. 7 | # This is a workaround until https://github.com/weaveworks/scope/issues/1257 is fixed. 8 | /bin/nc $TARGET_IP $TARGET_PORT & 9 | 10 | while true; do 11 | curl http://$TARGET_IP:$TARGET_PORT &> /dev/null 12 | curl http://$TARGET_IP:$TARGET_PORT/not/exist &> /dev/null 13 | sleep 0.2 14 | done 15 | -------------------------------------------------------------------------------- /tools/.gitignore: -------------------------------------------------------------------------------- 1 | cover/cover 2 | socks/proxy 3 | socks/image.tar 4 | runner/runner 5 | cmd/wcloud/wcloud 6 | *.pyc 7 | *~ 8 | -------------------------------------------------------------------------------- /tools/README.md: -------------------------------------------------------------------------------- 1 | # Weaveworks Build Tools 2 | 3 | Included in this repo are tools shared by weave.git and scope.git. They include 4 | 5 | - ```cover```: a tool which merges overlapping coverage reports generated by go 6 | test 7 | - ```files-with-type```: a tool to search directories for files of a given 8 | MIME type 9 | - ```lint```: a script to lint Go project; runs various tools like golint, go 10 | vet, errcheck etc 11 | - ```rebuild-image```: a script to rebuild docker images when their input files 12 | change; useful when you using docker images to build your software, but you 13 | don't want to build the image every time. 14 | - ```shell-lint```: a script to lint multiple shell files with 15 | [shellcheck](http://www.shellcheck.net/) 16 | - ```socks```: a simple, dockerised SOCKS proxy for getting your laptop onto 17 | the Weave network 18 | - ```test```: a script to run all go unit tests in subdirectories, gather the 19 | coverage results, and merge them into a single report. 20 | - ```runner```: a tool for running tests in parallel; given each test is 21 | suffixed with the number of hosts it requires, and the hosts available are 22 | contained in the environment variable HOSTS, the tool will run tests in 23 | parallel, on different hosts. 24 | - ```scheduler```: an appengine application that can be used to distribute 25 | tests across different shards in CircleCI. 26 | 27 | ## Using build-tools.git 28 | 29 | To allow you to tie your code to a specific version of build-tools.git, such 30 | that future changes don't break you, we recommendation that you [`git subtree`]() 31 | this repository into your own repository: 32 | 33 | [`git subtree`]: http://blogs.atlassian.com/2013/05/alternatives-to-git-submodule-git-subtree/ 34 | 35 | ``` 36 | git subtree add --prefix tools https://github.com/weaveworks/build-tools.git master --squash 37 | ```` 38 | 39 | To update the code in build-tools.git, the process is therefore: 40 | - PR into build-tools.git, go through normal review process etc. 41 | - Do `git subtree pull --prefix tools https://github.com/weaveworks/build-tools.git master --squash` 42 | in your repo, and PR that. 43 | -------------------------------------------------------------------------------- /tools/circle.yml: -------------------------------------------------------------------------------- 1 | machine: 2 | services: 3 | - docker 4 | environment: 5 | GOPATH: /home/ubuntu 6 | SRCDIR: /home/ubuntu/src/github.com/weaveworks/tools 7 | PATH: $PATH:$HOME/bin 8 | 9 | dependencies: 10 | post: 11 | - go clean -i net 12 | - go install -tags netgo std 13 | - mkdir -p $(dirname $SRCDIR) 14 | - cp -r $(pwd)/ $SRCDIR 15 | - go get github.com/golang/lint/golint github.com/fzipp/gocyclo github.com/kisielk/errcheck 16 | 17 | test: 18 | override: 19 | - cd $SRCDIR; ./lint . 20 | - cd $SRCDIR/cover; make 21 | - cd $SRCDIR/socks; make 22 | - cd $SRCDIR/runner; make 23 | - cd $SRCDIR/cmd/wcloud; make 24 | 25 | -------------------------------------------------------------------------------- /tools/cmd/wcloud/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean 2 | 3 | all: wcloud 4 | 5 | wcloud: *.go 6 | go get ./$(@D) 7 | go build -o $@ ./$(@D) 8 | 9 | clean: 10 | rm -rf wcloud 11 | go clean ./... 12 | -------------------------------------------------------------------------------- /tools/cmd/wcloud/cli.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "io/ioutil" 9 | "os" 10 | "os/user" 11 | "path/filepath" 12 | "strings" 13 | "time" 14 | 15 | "github.com/olekukonko/tablewriter" 16 | "gopkg.in/yaml.v2" 17 | ) 18 | 19 | // ArrayFlags allows you to collect repeated flags 20 | type ArrayFlags []string 21 | 22 | func (a *ArrayFlags) String() string { 23 | return strings.Join(*a, ",") 24 | } 25 | 26 | // Set implements flags.Value 27 | func (a *ArrayFlags) Set(value string) error { 28 | *a = append(*a, value) 29 | return nil 30 | } 31 | 32 | func env(key, def string) string { 33 | if val, ok := os.LookupEnv(key); ok { 34 | return val 35 | } 36 | return def 37 | } 38 | 39 | var ( 40 | token = env("SERVICE_TOKEN", "") 41 | baseURL = env("BASE_URL", "https://cloud.weave.works") 42 | ) 43 | 44 | func usage() { 45 | fmt.Println(`Usage: 46 | deploy : Deploy image to your configured env 47 | list List recent deployments 48 | config () Get (or set) the configured env 49 | logs Show lots for the given deployment`) 50 | } 51 | 52 | func main() { 53 | if len(os.Args) <= 1 { 54 | usage() 55 | os.Exit(1) 56 | } 57 | 58 | c := NewClient(token, baseURL) 59 | 60 | switch os.Args[1] { 61 | case "deploy": 62 | deploy(c, os.Args[2:]) 63 | case "list": 64 | list(c, os.Args[2:]) 65 | case "config": 66 | config(c, os.Args[2:]) 67 | case "logs": 68 | logs(c, os.Args[2:]) 69 | case "events": 70 | events(c, os.Args[2:]) 71 | case "help": 72 | usage() 73 | default: 74 | usage() 75 | } 76 | } 77 | 78 | func deploy(c Client, args []string) { 79 | var ( 80 | flags = flag.NewFlagSet("", flag.ContinueOnError) 81 | username = flags.String("u", "", "Username to report to deploy service (default with be current user)") 82 | services ArrayFlags 83 | ) 84 | flags.Var(&services, "service", "Service to update (can be repeated)") 85 | if err := flags.Parse(args); err != nil { 86 | usage() 87 | return 88 | } 89 | args = flags.Args() 90 | if len(args) != 1 { 91 | usage() 92 | return 93 | } 94 | parts := strings.SplitN(args[0], ":", 2) 95 | if len(parts) < 2 { 96 | usage() 97 | return 98 | } 99 | if *username == "" { 100 | user, err := user.Current() 101 | if err != nil { 102 | fmt.Println(err.Error()) 103 | os.Exit(1) 104 | } 105 | *username = user.Username 106 | } 107 | deployment := Deployment{ 108 | ImageName: parts[0], 109 | Version: parts[1], 110 | TriggeringUser: *username, 111 | IntendedServices: services, 112 | } 113 | if err := c.Deploy(deployment); err != nil { 114 | fmt.Println(err.Error()) 115 | os.Exit(1) 116 | } 117 | } 118 | 119 | func list(c Client, args []string) { 120 | var ( 121 | flags = flag.NewFlagSet("", flag.ContinueOnError) 122 | since = flags.Duration("since", 7*24*time.Hour, "How far back to fetch results") 123 | ) 124 | if err := flags.Parse(args); err != nil { 125 | usage() 126 | return 127 | } 128 | through := time.Now() 129 | from := through.Add(-*since) 130 | deployments, err := c.GetDeployments(from.Unix(), through.Unix()) 131 | if err != nil { 132 | fmt.Println(err.Error()) 133 | os.Exit(1) 134 | } 135 | 136 | table := tablewriter.NewWriter(os.Stdout) 137 | table.SetHeader([]string{"Created", "ID", "Image", "Version", "State"}) 138 | table.SetBorder(false) 139 | table.SetColumnSeparator(" ") 140 | for _, deployment := range deployments { 141 | table.Append([]string{ 142 | deployment.CreatedAt.Format(time.RFC822), 143 | deployment.ID, 144 | deployment.ImageName, 145 | deployment.Version, 146 | deployment.State, 147 | }) 148 | } 149 | table.Render() 150 | } 151 | 152 | func events(c Client, args []string) { 153 | var ( 154 | flags = flag.NewFlagSet("", flag.ContinueOnError) 155 | since = flags.Duration("since", 7*24*time.Hour, "How far back to fetch results") 156 | ) 157 | if err := flags.Parse(args); err != nil { 158 | usage() 159 | return 160 | } 161 | through := time.Now() 162 | from := through.Add(-*since) 163 | events, err := c.GetEvents(from.Unix(), through.Unix()) 164 | if err != nil { 165 | fmt.Println(err.Error()) 166 | os.Exit(1) 167 | } 168 | 169 | fmt.Println("events: ", string(events)) 170 | } 171 | 172 | func loadConfig(filename string) (*Config, error) { 173 | extension := filepath.Ext(filename) 174 | var config Config 175 | buf, err := ioutil.ReadFile(filename) 176 | if err != nil { 177 | return nil, err 178 | } 179 | if extension == ".yaml" || extension == ".yml" { 180 | if err := yaml.Unmarshal(buf, &config); err != nil { 181 | return nil, err 182 | } 183 | } else { 184 | if err := json.NewDecoder(bytes.NewReader(buf)).Decode(&config); err != nil { 185 | return nil, err 186 | } 187 | } 188 | return &config, nil 189 | } 190 | 191 | func config(c Client, args []string) { 192 | if len(args) > 1 { 193 | usage() 194 | return 195 | } 196 | 197 | if len(args) == 1 { 198 | config, err := loadConfig(args[0]) 199 | if err != nil { 200 | fmt.Println("Error reading config:", err) 201 | os.Exit(1) 202 | } 203 | 204 | if err := c.SetConfig(config); err != nil { 205 | fmt.Println(err.Error()) 206 | os.Exit(1) 207 | } 208 | } else { 209 | config, err := c.GetConfig() 210 | if err != nil { 211 | fmt.Println(err.Error()) 212 | os.Exit(1) 213 | } 214 | 215 | buf, err := yaml.Marshal(config) 216 | if err != nil { 217 | fmt.Println(err.Error()) 218 | os.Exit(1) 219 | } 220 | 221 | fmt.Println(string(buf)) 222 | } 223 | } 224 | 225 | func logs(c Client, args []string) { 226 | if len(args) != 1 { 227 | usage() 228 | return 229 | } 230 | 231 | output, err := c.GetLogs(args[0]) 232 | if err != nil { 233 | fmt.Println(err.Error()) 234 | os.Exit(1) 235 | } 236 | 237 | fmt.Println(string(output)) 238 | } 239 | -------------------------------------------------------------------------------- /tools/cmd/wcloud/client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "net/http" 10 | ) 11 | 12 | // Client for the deployment service 13 | type Client struct { 14 | token string 15 | baseURL string 16 | } 17 | 18 | // NewClient makes a new Client 19 | func NewClient(token, baseURL string) Client { 20 | return Client{ 21 | token: token, 22 | baseURL: baseURL, 23 | } 24 | } 25 | 26 | func (c Client) newRequest(method, path string, body io.Reader) (*http.Request, error) { 27 | req, err := http.NewRequest(method, c.baseURL+path, body) 28 | if err != nil { 29 | return nil, err 30 | } 31 | req.Header.Add("Authorization", fmt.Sprintf("Scope-Probe token=%s", c.token)) 32 | return req, nil 33 | } 34 | 35 | // Deploy notifies the deployment service about a new deployment 36 | func (c Client) Deploy(deployment Deployment) error { 37 | var buf bytes.Buffer 38 | if err := json.NewEncoder(&buf).Encode(deployment); err != nil { 39 | return err 40 | } 41 | req, err := c.newRequest("POST", "/api/deploy/deploy", &buf) 42 | if err != nil { 43 | return err 44 | } 45 | res, err := http.DefaultClient.Do(req) 46 | if err != nil { 47 | return err 48 | } 49 | if res.StatusCode != 204 { 50 | return fmt.Errorf("Error making request: %s", res.Status) 51 | } 52 | return nil 53 | } 54 | 55 | // GetDeployments returns a list of deployments 56 | func (c Client) GetDeployments(from, through int64) ([]Deployment, error) { 57 | req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/deploy?from=%d&through=%d", from, through), nil) 58 | if err != nil { 59 | return nil, err 60 | } 61 | res, err := http.DefaultClient.Do(req) 62 | if err != nil { 63 | return nil, err 64 | } 65 | if res.StatusCode != 200 { 66 | return nil, fmt.Errorf("Error making request: %s", res.Status) 67 | } 68 | var response struct { 69 | Deployments []Deployment `json:"deployments"` 70 | } 71 | if err := json.NewDecoder(res.Body).Decode(&response); err != nil { 72 | return nil, err 73 | } 74 | return response.Deployments, nil 75 | } 76 | 77 | // GetEvents returns the raw events. 78 | func (c Client) GetEvents(from, through int64) ([]byte, error) { 79 | req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/event?from=%d&through=%d", from, through), nil) 80 | if err != nil { 81 | return nil, err 82 | } 83 | res, err := http.DefaultClient.Do(req) 84 | if err != nil { 85 | return nil, err 86 | } 87 | if res.StatusCode != 200 { 88 | return nil, fmt.Errorf("Error making request: %s", res.Status) 89 | } 90 | return ioutil.ReadAll(res.Body) 91 | } 92 | 93 | // GetConfig returns the current Config 94 | func (c Client) GetConfig() (*Config, error) { 95 | req, err := c.newRequest("GET", "/api/config/deploy", nil) 96 | if err != nil { 97 | return nil, err 98 | } 99 | res, err := http.DefaultClient.Do(req) 100 | if err != nil { 101 | return nil, err 102 | } 103 | if res.StatusCode == 404 { 104 | return nil, fmt.Errorf("No configuration uploaded yet.") 105 | } 106 | if res.StatusCode != 200 { 107 | return nil, fmt.Errorf("Error making request: %s", res.Status) 108 | } 109 | var config Config 110 | if err := json.NewDecoder(res.Body).Decode(&config); err != nil { 111 | return nil, err 112 | } 113 | return &config, nil 114 | } 115 | 116 | // SetConfig sets the current Config 117 | func (c Client) SetConfig(config *Config) error { 118 | var buf bytes.Buffer 119 | if err := json.NewEncoder(&buf).Encode(config); err != nil { 120 | return err 121 | } 122 | req, err := c.newRequest("POST", "/api/config/deploy", &buf) 123 | if err != nil { 124 | return err 125 | } 126 | res, err := http.DefaultClient.Do(req) 127 | if err != nil { 128 | return err 129 | } 130 | if res.StatusCode != 204 { 131 | return fmt.Errorf("Error making request: %s", res.Status) 132 | } 133 | return nil 134 | } 135 | 136 | // GetLogs returns the logs for a given deployment. 137 | func (c Client) GetLogs(deployID string) ([]byte, error) { 138 | req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/deploy/%s/log", deployID), nil) 139 | if err != nil { 140 | return nil, err 141 | } 142 | res, err := http.DefaultClient.Do(req) 143 | if err != nil { 144 | return nil, err 145 | } 146 | if res.StatusCode != 200 { 147 | return nil, fmt.Errorf("Error making request: %s", res.Status) 148 | } 149 | return ioutil.ReadAll(res.Body) 150 | } 151 | -------------------------------------------------------------------------------- /tools/cmd/wcloud/types.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // Deployment describes a deployment 8 | type Deployment struct { 9 | ID string `json:"id"` 10 | CreatedAt time.Time `json:"created_at"` 11 | ImageName string `json:"image_name"` 12 | Version string `json:"version"` 13 | Priority int `json:"priority"` 14 | State string `json:"status"` 15 | 16 | TriggeringUser string `json:"triggering_user"` 17 | IntendedServices []string `json:"intended_services"` 18 | } 19 | 20 | // Config for the deployment system for a user. 21 | type Config struct { 22 | RepoURL string `json:"repo_url" yaml:"repo_url"` 23 | RepoBranch string `json:"repo_branch" yaml:"repo_branch"` 24 | RepoPath string `json:"repo_path" yaml:"repo_path"` 25 | RepoKey string `json:"repo_key" yaml:"repo_key"` 26 | KubeconfigPath string `json:"kubeconfig_path" yaml:"kubeconfig_path"` 27 | AutoApply bool `json:"auto_apply" yaml:"auto_apply"` 28 | 29 | Notifications []NotificationConfig `json:"notifications" yaml:"notifications"` 30 | 31 | // Globs of files not to change, relative to the route of the repo 32 | ConfigFileBlackList []string `json:"config_file_black_list" yaml:"config_file_black_list"` 33 | 34 | CommitMessageTemplate string `json:"commit_message_template" yaml:"commit_message_template"` // See https://golang.org/pkg/text/template/ 35 | } 36 | 37 | // NotificationConfig describes how to send notifications 38 | type NotificationConfig struct { 39 | SlackWebhookURL string `json:"slack_webhook_url" yaml:"slack_webhook_url"` 40 | SlackUsername string `json:"slack_username" yaml:"slack_username"` 41 | MessageTemplate string `json:"message_template" yaml:"message_template"` 42 | ApplyMessageTemplate string `json:"apply_message_template" yaml:"apply_message_template"` 43 | } 44 | -------------------------------------------------------------------------------- /tools/cover/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean 2 | 3 | all: cover 4 | 5 | cover: *.go 6 | go get -tags netgo ./$(@D) 7 | go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D) 8 | 9 | clean: 10 | rm -rf cover 11 | go clean ./... 12 | -------------------------------------------------------------------------------- /tools/cover/cover.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "sort" 7 | 8 | "golang.org/x/tools/cover" 9 | ) 10 | 11 | func merge(p1, p2 *cover.Profile) *cover.Profile { 12 | output := cover.Profile{ 13 | FileName: p1.FileName, 14 | Mode: p1.Mode, 15 | } 16 | 17 | i, j := 0, 0 18 | for i < len(p1.Blocks) && j < len(p2.Blocks) { 19 | bi, bj := p1.Blocks[i], p2.Blocks[j] 20 | if bi.StartLine == bj.StartLine && bi.StartCol == bj.StartCol { 21 | 22 | if bi.EndLine != bj.EndLine || 23 | bi.EndCol != bj.EndCol || 24 | bi.NumStmt != bj.NumStmt { 25 | panic("Not run on same source!") 26 | } 27 | 28 | output.Blocks = append(output.Blocks, cover.ProfileBlock{ 29 | StartLine: bi.StartLine, 30 | StartCol: bi.StartCol, 31 | EndLine: bi.EndLine, 32 | EndCol: bi.EndCol, 33 | NumStmt: bi.NumStmt, 34 | Count: bi.Count + bj.Count, 35 | }) 36 | i++ 37 | j++ 38 | } else if bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol { 39 | output.Blocks = append(output.Blocks, bi) 40 | i++ 41 | } else { 42 | output.Blocks = append(output.Blocks, bj) 43 | j++ 44 | } 45 | } 46 | 47 | for ; i < len(p1.Blocks); i++ { 48 | output.Blocks = append(output.Blocks, p1.Blocks[i]) 49 | } 50 | 51 | for ; j < len(p2.Blocks); j++ { 52 | output.Blocks = append(output.Blocks, p2.Blocks[j]) 53 | } 54 | 55 | return &output 56 | } 57 | 58 | func print(profiles []*cover.Profile) { 59 | fmt.Println("mode: atomic") 60 | for _, profile := range profiles { 61 | for _, block := range profile.Blocks { 62 | fmt.Printf("%s:%d.%d,%d.%d %d %d\n", profile.FileName, block.StartLine, block.StartCol, 63 | block.EndLine, block.EndCol, block.NumStmt, block.Count) 64 | } 65 | } 66 | } 67 | 68 | // Copied from https://github.com/golang/tools/blob/master/cover/profile.go 69 | type byFileName []*cover.Profile 70 | 71 | func (p byFileName) Len() int { return len(p) } 72 | func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } 73 | func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } 74 | 75 | func main() { 76 | outputProfiles := map[string]*cover.Profile{} 77 | for _, input := range os.Args[1:] { 78 | inputProfiles, err := cover.ParseProfiles(input) 79 | if err != nil { 80 | panic(fmt.Sprintf("Error parsing %s: %v", input, err)) 81 | } 82 | for _, ip := range inputProfiles { 83 | op := outputProfiles[ip.FileName] 84 | if op == nil { 85 | outputProfiles[ip.FileName] = ip 86 | } else { 87 | outputProfiles[ip.FileName] = merge(op, ip) 88 | } 89 | } 90 | } 91 | profiles := make([]*cover.Profile, 0, len(outputProfiles)) 92 | for _, profile := range outputProfiles { 93 | profiles = append(profiles, profile) 94 | } 95 | sort.Sort(byFileName(profiles)) 96 | print(profiles) 97 | } 98 | -------------------------------------------------------------------------------- /tools/cover/gather_coverage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This scripts copies all the coverage reports from various circle shards, 3 | # merges them and produces a complete report. 4 | 5 | set -ex 6 | DESTINATION=$1 7 | FROMDIR=$2 8 | mkdir -p "$DESTINATION" 9 | 10 | if [ -n "$CIRCLECI" ]; then 11 | for i in $(seq 1 $((CIRCLE_NODE_TOTAL - 1))); do 12 | scp "node$i:$FROMDIR"/* "$DESTINATION" || true 13 | done 14 | fi 15 | 16 | go get github.com/weaveworks/build-tools/cover 17 | cover "$DESTINATION"/* >profile.cov 18 | go tool cover -html=profile.cov -o coverage.html 19 | go tool cover -func=profile.cov -o coverage.txt 20 | tar czf coverage.tar.gz "$DESTINATION" 21 | -------------------------------------------------------------------------------- /tools/files-with-type: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Find all files with a given MIME type. 4 | # 5 | # e.g. 6 | # $ files-with-type text/x-shellscript k8s infra 7 | 8 | mime_type=$1 9 | shift 10 | 11 | git ls-files "$@" | grep -vE '^vendor/' | xargs file --mime-type | grep "${mime_type}" | sed -e 's/:.*$//' 12 | -------------------------------------------------------------------------------- /tools/image-tag: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | WORKING_SUFFIX=$(if ! git diff --exit-code --quiet HEAD >&2; \ 8 | then echo "-WIP"; \ 9 | else echo ""; \ 10 | fi) 11 | BRANCH_PREFIX=$(git rev-parse --abbrev-ref HEAD) 12 | echo "${BRANCH_PREFIX//\//-}-$(git rev-parse --short HEAD)$WORKING_SUFFIX" 13 | -------------------------------------------------------------------------------- /tools/integration/assert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # assert.sh 1.1 - bash unit testing framework 3 | # Copyright (C) 2009-2015 Robert Lehmann 4 | # 5 | # http://github.com/lehmannro/assert.sh 6 | # 7 | # This program is free software: you can redistribute it and/or modify 8 | # it under the terms of the GNU Lesser General Public License as published 9 | # by the Free Software Foundation, either version 3 of the License, or 10 | # (at your option) any later version. 11 | # 12 | # This program is distributed in the hope that it will be useful, 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | # GNU Lesser General Public License for more details. 16 | # 17 | # You should have received a copy of the GNU Lesser General Public License 18 | # along with this program. If not, see . 19 | 20 | export DISCOVERONLY=${DISCOVERONLY:-} 21 | export DEBUG=${DEBUG:-} 22 | export STOP=${STOP:-} 23 | export INVARIANT=${INVARIANT:-} 24 | export CONTINUE=${CONTINUE:-} 25 | 26 | args="$(getopt -n "$0" -l \ 27 | verbose,help,stop,discover,invariant,continue vhxdic "$@")" \ 28 | || exit -1 29 | for arg in $args; do 30 | case "$arg" in 31 | -h) 32 | echo "$0 [-vxidc]" \ 33 | "[--verbose] [--stop] [--invariant] [--discover] [--continue]" 34 | echo "$(sed 's/./ /g' <<< "$0") [-h] [--help]" 35 | exit 0;; 36 | --help) 37 | cat < [stdin] 103 | (( tests_ran++ )) || : 104 | [[ -z "$DISCOVERONLY" ]] || return 105 | expected=$(echo -ne "${2:-}") 106 | result="$(eval 2>/dev/null "$1" <<< "${3:-}")" || true 107 | if [[ "$result" == "$expected" ]]; then 108 | [[ -z "$DEBUG" ]] || echo -n . 109 | return 110 | fi 111 | result="$(sed -e :a -e '$!N;s/\n/\\n/;ta' <<< "$result")" 112 | [[ -z "$result" ]] && result="nothing" || result="\"$result\"" 113 | [[ -z "$2" ]] && expected="nothing" || expected="\"$2\"" 114 | _assert_fail "expected $expected${_indent}got $result" "$1" "$3" 115 | } 116 | 117 | assert_raises() { 118 | # assert_raises [stdin] 119 | (( tests_ran++ )) || : 120 | [[ -z "$DISCOVERONLY" ]] || return 121 | status=0 122 | (eval "$1" <<< "${3:-}") > /dev/null 2>&1 || status=$? 123 | expected=${2:-0} 124 | if [[ "$status" -eq "$expected" ]]; then 125 | [[ -z "$DEBUG" ]] || echo -n . 126 | return 127 | fi 128 | _assert_fail "program terminated with code $status instead of $expected" "$1" "$3" 129 | } 130 | 131 | _assert_fail() { 132 | # _assert_fail 133 | [[ -n "$DEBUG" ]] && echo -n X 134 | report="test #$tests_ran \"$2${3:+ <<< $3}\" failed:${_indent}$1" 135 | if [[ -n "$STOP" ]]; then 136 | [[ -n "$DEBUG" ]] && echo 137 | echo "$report" 138 | exit 1 139 | fi 140 | tests_errors[$tests_failed]="$report" 141 | (( tests_failed++ )) || : 142 | } 143 | 144 | skip_if() { 145 | # skip_if 146 | (eval "$@") > /dev/null 2>&1 && status=0 || status=$? 147 | [[ "$status" -eq 0 ]] || return 148 | skip 149 | } 150 | 151 | skip() { 152 | # skip (no arguments) 153 | shopt -q extdebug && tests_extdebug=0 || tests_extdebug=1 154 | shopt -q -o errexit && tests_errexit=0 || tests_errexit=1 155 | # enable extdebug so returning 1 in a DEBUG trap handler skips next command 156 | shopt -s extdebug 157 | # disable errexit (set -e) so we can safely return 1 without causing exit 158 | set +o errexit 159 | tests_trapped=0 160 | trap _skip DEBUG 161 | } 162 | _skip() { 163 | if [[ $tests_trapped -eq 0 ]]; then 164 | # DEBUG trap for command we want to skip. Do not remove the handler 165 | # yet because *after* the command we need to reset extdebug/errexit (in 166 | # another DEBUG trap.) 167 | tests_trapped=1 168 | [[ -z "$DEBUG" ]] || echo -n s 169 | return 1 170 | else 171 | trap - DEBUG 172 | [[ $tests_extdebug -eq 0 ]] || shopt -u extdebug 173 | [[ $tests_errexit -eq 1 ]] || set -o errexit 174 | return 0 175 | fi 176 | } 177 | 178 | 179 | _assert_reset 180 | : ${tests_suite_status:=0} # remember if any of the tests failed so far 181 | _assert_cleanup() { 182 | local status=$? 183 | # modify exit code if it's not already non-zero 184 | [[ $status -eq 0 && -z $CONTINUE ]] && exit $tests_suite_status 185 | } 186 | trap _assert_cleanup EXIT 187 | -------------------------------------------------------------------------------- /tools/integration/config.sh: -------------------------------------------------------------------------------- 1 | # NB only to be sourced 2 | 3 | set -e 4 | 5 | DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 6 | 7 | # Protect against being sourced multiple times to prevent 8 | # overwriting assert.sh global state 9 | if ! [ -z "$SOURCED_CONFIG_SH" ]; then 10 | return 11 | fi 12 | SOURCED_CONFIG_SH=true 13 | 14 | # these ought to match what is in Vagrantfile 15 | N_MACHINES=${N_MACHINES:-3} 16 | IP_PREFIX=${IP_PREFIX:-192.168.48} 17 | IP_SUFFIX_BASE=${IP_SUFFIX_BASE:-10} 18 | 19 | if [ -z "$HOSTS" ] ; then 20 | for i in $(seq 1 $N_MACHINES); do 21 | IP="${IP_PREFIX}.$((${IP_SUFFIX_BASE}+$i))" 22 | HOSTS="$HOSTS $IP" 23 | done 24 | fi 25 | 26 | # these are used by the tests 27 | HOST1=$(echo $HOSTS | cut -f 1 -d ' ') 28 | HOST2=$(echo $HOSTS | cut -f 2 -d ' ') 29 | HOST3=$(echo $HOSTS | cut -f 3 -d ' ') 30 | 31 | . "$DIR/assert.sh" 32 | 33 | SSH_DIR=${SSH_DIR:-$DIR} 34 | SSH=${SSH:-ssh -l vagrant -i "$SSH_DIR/insecure_private_key" -o "UserKnownHostsFile=$SSH_DIR/.ssh_known_hosts" -o CheckHostIP=no -o StrictHostKeyChecking=no} 35 | 36 | SMALL_IMAGE="alpine" 37 | TEST_IMAGES="$SMALL_IMAGE" 38 | 39 | PING="ping -nq -W 1 -c 1" 40 | DOCKER_PORT=2375 41 | 42 | remote() { 43 | rem=$1 44 | shift 1 45 | "$@" > >(while read line; do echo -e $'\e[0;34m'"$rem>"$'\e[0m'" $line"; done) 46 | } 47 | 48 | colourise() { 49 | [ -t 0 ] && echo -ne $'\e['$1'm' || true 50 | shift 51 | # It's important that we don't do this in a subshell, as some 52 | # commands we execute need to modify global state 53 | "$@" 54 | [ -t 0 ] && echo -ne $'\e[0m' || true 55 | } 56 | 57 | whitely() { 58 | colourise '1;37' "$@" 59 | } 60 | 61 | greyly () { 62 | colourise '0;37' "$@" 63 | } 64 | 65 | redly() { 66 | colourise '1;31' "$@" 67 | } 68 | 69 | greenly() { 70 | colourise '1;32' "$@" 71 | } 72 | 73 | run_on() { 74 | host=$1 75 | shift 1 76 | [ -z "$DEBUG" ] || greyly echo "Running on $host: $@" >&2 77 | remote $host $SSH $host "$@" 78 | } 79 | 80 | docker_on() { 81 | host=$1 82 | shift 1 83 | [ -z "$DEBUG" ] || greyly echo "Docker on $host:$DOCKER_PORT: $@" >&2 84 | docker -H tcp://$host:$DOCKER_PORT "$@" 85 | } 86 | 87 | weave_on() { 88 | host=$1 89 | shift 1 90 | [ -z "$DEBUG" ] || greyly echo "Weave on $host:$DOCKER_PORT: $@" >&2 91 | DOCKER_HOST=tcp://$host:$DOCKER_PORT $WEAVE "$@" 92 | } 93 | 94 | exec_on() { 95 | host=$1 96 | container=$2 97 | shift 2 98 | docker -H tcp://$host:$DOCKER_PORT exec $container "$@" 99 | } 100 | 101 | rm_containers() { 102 | host=$1 103 | shift 104 | [ $# -eq 0 ] || docker_on $host rm -f "$@" >/dev/null 105 | } 106 | 107 | start_suite() { 108 | for host in $HOSTS; do 109 | [ -z "$DEBUG" ] || echo "Cleaning up on $host: removing all containers and resetting weave" 110 | PLUGIN_ID=$(docker_on $host ps -aq --filter=name=weaveplugin) 111 | PLUGIN_FILTER="cat" 112 | [ -n "$PLUGIN_ID" ] && PLUGIN_FILTER="grep -v $PLUGIN_ID" 113 | rm_containers $host $(docker_on $host ps -aq 2>/dev/null | $PLUGIN_FILTER) 114 | run_on $host "docker network ls | grep -q ' weave ' && docker network rm weave" || true 115 | weave_on $host reset 2>/dev/null 116 | done 117 | whitely echo "$@" 118 | } 119 | 120 | end_suite() { 121 | whitely assert_end 122 | } 123 | 124 | WEAVE=$DIR/../weave 125 | 126 | -------------------------------------------------------------------------------- /tools/integration/gce.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script has a bunch of GCE-related functions: 3 | # ./gce.sh setup - starts two VMs on GCE and configures them to run our integration tests 4 | # . ./gce.sh; ./run_all.sh - set a bunch of environment variables for the tests 5 | # ./gce.sh destroy - tear down the VMs 6 | # ./gce.sh make_template - make a fresh VM template; update TEMPLATE_NAME first! 7 | 8 | set -e 9 | 10 | : "${KEY_FILE:=/tmp/gce_private_key.json}" 11 | : "${SSH_KEY_FILE:=$HOME/.ssh/gce_ssh_key}" 12 | : "${IMAGE:=ubuntu-14-04}" 13 | : "${ZONE:=us-central1-a}" 14 | : "${PROJECT:=}" 15 | : "${TEMPLATE_NAME:=}" 16 | : "${NUM_HOSTS:=}" 17 | 18 | if [ -z "${PROJECT}" ] || [ -z "${NUM_HOSTS}" ] || [ -z "${TEMPLATE_NAME}" ]; then 19 | echo "Must specify PROJECT, NUM_HOSTS and TEMPLATE_NAME" 20 | exit 1 21 | fi 22 | 23 | SUFFIX="" 24 | if [ -n "$CIRCLECI" ]; then 25 | SUFFIX="-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX" 26 | fi 27 | 28 | # Setup authentication 29 | gcloud auth activate-service-account --key-file "$KEY_FILE" 1>/dev/null 30 | gcloud config set project "$PROJECT" 31 | 32 | function vm_names { 33 | local names= 34 | for i in $(seq 1 "$NUM_HOSTS"); do 35 | names=( "host$i$SUFFIX" "${names[@]}" ) 36 | done 37 | echo "${names[@]}" 38 | } 39 | 40 | # Delete all vms in this account 41 | function destroy { 42 | local names 43 | names="$(vm_names)" 44 | if [ "$(gcloud compute instances list --zone "$ZONE" -q "$names" | wc -l)" -le 1 ] ; then 45 | return 0 46 | fi 47 | for i in {0..10}; do 48 | # gcloud instances delete can sometimes hang. 49 | case $(set +e; timeout 60s /bin/bash -c "gcloud compute instances delete --zone $ZONE -q $names >/dev/null 2>&1"; echo $?) in 50 | 0) 51 | return 0 52 | ;; 53 | 124) 54 | # 124 means it timed out 55 | break 56 | ;; 57 | *) 58 | return 1 59 | esac 60 | done 61 | } 62 | 63 | function internal_ip { 64 | jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].networkIP" "$1" 65 | } 66 | 67 | function external_ip { 68 | jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].accessConfigs[0].natIP" "$1" 69 | } 70 | 71 | function try_connect { 72 | for i in {0..10}; do 73 | ssh -t "$1" true && return 74 | sleep 2 75 | done 76 | } 77 | 78 | function install_docker_on { 79 | name=$1 80 | ssh -t "$name" sudo bash -x -s <> /etc/default/docker; 87 | service docker restart 88 | EOF 89 | # It seems we need a short delay for docker to start up, so I put this in 90 | # a separate ssh connection. This installs nsenter. 91 | ssh -t "$name" sudo docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter 92 | } 93 | 94 | function copy_hosts { 95 | hostname=$1 96 | hosts=$2 97 | ssh -t "$hostname" "sudo -- sh -c \"cat >>/etc/hosts\"" < "$hosts" 98 | } 99 | 100 | # Create new set of VMs 101 | function setup { 102 | destroy 103 | 104 | names=( $(vm_names) ) 105 | gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE" 106 | gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE" 107 | sed -i '/UserKnownHostsFile=\/dev\/null/d' ~/.ssh/config 108 | 109 | # build an /etc/hosts file for these vms 110 | hosts=$(mktemp hosts.XXXXXXXXXX) 111 | json=$(mktemp json.XXXXXXXXXX) 112 | gcloud compute instances list --format=json > "$json" 113 | for name in "${names[@]}"; do 114 | echo "$(internal_ip "$json" "$name") $name.$ZONE.$PROJECT" >> "$hosts" 115 | done 116 | 117 | for name in "${names[@]}"; do 118 | hostname="$name.$ZONE.$PROJECT" 119 | 120 | # Add the remote ip to the local /etc/hosts 121 | sudo sed -i "/$hostname/d" /etc/hosts 122 | sudo sh -c "echo \"$(external_ip "$json" "$name") $hostname\" >>/etc/hosts" 123 | try_connect "$hostname" 124 | 125 | copy_hosts "$hostname" "$hosts" & 126 | done 127 | 128 | wait 129 | 130 | rm "$hosts" "$json" 131 | } 132 | 133 | function make_template { 134 | gcloud compute instances create "$TEMPLATE_NAME" --image "$IMAGE" --zone "$ZONE" 135 | gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE" 136 | name="$TEMPLATE_NAME.$ZONE.$PROJECT" 137 | try_connect "$name" 138 | install_docker_on "$name" 139 | gcloud -q compute instances delete "$TEMPLATE_NAME" --keep-disks boot --zone "$ZONE" 140 | gcloud compute images create "$TEMPLATE_NAME" --source-disk "$TEMPLATE_NAME" --source-disk-zone "$ZONE" 141 | } 142 | 143 | function hosts { 144 | hosts= 145 | args= 146 | json=$(mktemp json.XXXXXXXXXX) 147 | gcloud compute instances list --format=json > "$json" 148 | for name in $(vm_names); do 149 | hostname="$name.$ZONE.$PROJECT" 150 | hosts=( $hostname "${hosts[@]}" ) 151 | args=( "--add-host=$hostname:$(internal_ip "$json" "$name")" "${args[@]}" ) 152 | done 153 | echo export SSH=\"ssh -l vagrant\" 154 | echo "export HOSTS=\"${hosts[*]}\"" 155 | echo "export ADD_HOST_ARGS=\"${args[*]}\"" 156 | rm "$json" 157 | } 158 | 159 | case "$1" in 160 | setup) 161 | setup 162 | ;; 163 | 164 | hosts) 165 | hosts 166 | ;; 167 | 168 | destroy) 169 | destroy 170 | ;; 171 | 172 | make_template) 173 | # see if template exists 174 | if ! gcloud compute images list | grep "$PROJECT" | grep "$TEMPLATE_NAME"; then 175 | make_template 176 | fi 177 | esac 178 | -------------------------------------------------------------------------------- /tools/integration/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 6 | # shellcheck disable=SC1090 7 | . "$DIR/config.sh" 8 | 9 | whitely echo Sanity checks 10 | if ! bash "$DIR/sanity_check.sh"; then 11 | whitely echo ...failed 12 | exit 1 13 | fi 14 | whitely echo ...ok 15 | 16 | # shellcheck disable=SC2068 17 | TESTS=( ${@:-$(find . -name '*_test.sh')} ) 18 | RUNNER_ARGS=( ) 19 | 20 | # If running on circle, use the scheduler to work out what tests to run 21 | if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ]; then 22 | RUNNER_ARGS=( "${RUNNER_ARGS[@]}" -scheduler ) 23 | fi 24 | 25 | # If running on circle or PARALLEL is not empty, run tests in parallel 26 | if [ -n "$CIRCLECI" ] || [ -n "$PARALLEL" ]; then 27 | RUNNER_ARGS=( "${RUNNER_ARGS[@]}" -parallel ) 28 | fi 29 | 30 | make -C "${DIR}/../runner" 31 | HOSTS="$HOSTS" "${DIR}/../runner/runner" "${RUNNER_ARGS[@]}" "${TESTS[@]}" 32 | -------------------------------------------------------------------------------- /tools/integration/sanity_check.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # shellcheck disable=SC1091 3 | . ./config.sh 4 | 5 | set -e 6 | 7 | whitely echo Ping each host from the other 8 | for host in $HOSTS; do 9 | for other in $HOSTS; do 10 | [ "$host" = "$other" ] || run_on "$host" "$PING" "$other" 11 | done 12 | done 13 | 14 | whitely echo Check we can reach docker 15 | 16 | for host in $HOSTS; do 17 | echo 18 | echo "Host Version Info: $host" 19 | echo "=====================================" 20 | echo "# docker version" 21 | docker_on "$host" version 22 | echo "# docker info" 23 | docker_on "$host" info 24 | echo "# weave version" 25 | weave_on "$host" version 26 | done 27 | -------------------------------------------------------------------------------- /tools/lint: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This scipt lints go files for common errors. 3 | # 4 | # It runs gofmt and go vet, and optionally golint and 5 | # gocyclo, if they are installed. 6 | # 7 | # With no arguments, it lints the current files staged 8 | # for git commit. Or you can pass it explicit filenames 9 | # (or directories) and it will lint them. 10 | # 11 | # To use this script automatically, run: 12 | # ln -s ../../bin/lint .git/hooks/pre-commit 13 | 14 | set -e 15 | 16 | IGNORE_LINT_COMMENT= 17 | IGNORE_TEST_PACKAGES= 18 | IGNORE_SPELLINGS= 19 | while true; do 20 | case "$1" in 21 | -nocomment) 22 | IGNORE_LINT_COMMENT=1 23 | shift 1 24 | ;; 25 | -notestpackage) 26 | IGNORE_TEST_PACKAGES=1 27 | shift 1 28 | ;; 29 | -ignorespelling) 30 | IGNORE_SPELLINGS="$2,$IGNORE_SPELLINGS" 31 | shift 2 32 | ;; 33 | *) 34 | break 35 | esac 36 | done 37 | 38 | 39 | function spell_check { 40 | filename="$1" 41 | local lint_result=0 42 | 43 | # we don't want to spell check tar balls, binaries, Makefile and json files 44 | if file "$filename" | grep executable >/dev/null 2>&1; then 45 | return $lint_result 46 | fi 47 | if [[ $filename == *".tar" || $filename == *".gz" || $filename == *".json" || $(basename "$filename") == "Makefile" ]]; then 48 | return $lint_result 49 | fi 50 | 51 | # misspell is completely optional. If you don't like it 52 | # don't have it installed. 53 | if ! type misspell >/dev/null 2>&1; then 54 | return $lint_result 55 | fi 56 | 57 | if ! misspell -error -i "$IGNORE_SPELLINGS" "${filename}"; then 58 | lint_result=1 59 | fi 60 | 61 | return $lint_result 62 | } 63 | 64 | function test_mismatch { 65 | filename="$1" 66 | package=$(grep '^package ' "$filename" | awk '{print $2}') 67 | local lint_result=0 68 | 69 | if [[ $package == "main" ]]; then 70 | return # in package main, all bets are off 71 | fi 72 | 73 | if [[ $filename == *"_internal_test.go" ]]; then 74 | if [[ $package == *"_test" ]]; then 75 | lint_result=1 76 | echo "${filename}: should not be part of a _test package" 77 | fi 78 | else 79 | if [[ ! $package == *"_test" ]]; then 80 | lint_result=1 81 | echo "${filename}: should be part of a _test package" 82 | fi 83 | fi 84 | 85 | return $lint_result 86 | } 87 | 88 | function lint_go { 89 | filename="$1" 90 | local lint_result=0 91 | 92 | if [ -n "$(gofmt -s -l "${filename}")" ]; then 93 | lint_result=1 94 | echo "${filename}: run gofmt -s -w ${filename}!" 95 | fi 96 | 97 | go tool vet "${filename}" || lint_result=$? 98 | 99 | # golint is completely optional. If you don't like it 100 | # don't have it installed. 101 | if type golint >/dev/null 2>&1; then 102 | # golint doesn't set an exit code it seems 103 | if [ -z "$IGNORE_LINT_COMMENT" ]; then 104 | lintoutput=$(golint "${filename}") 105 | else 106 | lintoutput=$(golint "${filename}" | grep -vE 'comment|dot imports|ALL_CAPS') 107 | fi 108 | if [ -n "$lintoutput" ]; then 109 | lint_result=1 110 | echo "$lintoutput" 111 | fi 112 | fi 113 | 114 | # gocyclo is completely optional. If you don't like it 115 | # don't have it installed. Also never blocks a commit, 116 | # it just warns. 117 | if type gocyclo >/dev/null 2>&1; then 118 | gocyclo -over 25 "${filename}" | while read -r line; do 119 | echo "${filename}": higher than 25 cyclomatic complexity - "${line}" 120 | done 121 | fi 122 | 123 | return $lint_result 124 | } 125 | 126 | function lint { 127 | filename="$1" 128 | ext="${filename##*\.}" 129 | local lint_result=0 130 | 131 | # Don't lint deleted files 132 | if [ ! -f "$filename" ]; then 133 | return 134 | fi 135 | 136 | # Don't lint this script or static.go 137 | case "$(basename "${filename}")" in 138 | lint) return;; 139 | static.go) return;; 140 | coverage.html) return;; 141 | esac 142 | 143 | case "$ext" in 144 | go) lint_go "${filename}" || lint_result=1 145 | ;; 146 | esac 147 | 148 | if [ -z "$IGNORE_TEST_PACKAGES" ]; then 149 | if [[ "$filename" == *"_test.go" ]]; then 150 | test_mismatch "${filename}" || lint_result=1 151 | fi 152 | fi 153 | 154 | spell_check "${filename}" || lint_result=1 155 | 156 | return $lint_result 157 | } 158 | 159 | function lint_files { 160 | local lint_result=0 161 | while read -r filename; do 162 | lint "${filename}" || lint_result=1 163 | done 164 | exit $lint_result 165 | } 166 | 167 | function list_files { 168 | if [ $# -gt 0 ]; then 169 | git ls-files --exclude-standard | grep -vE '(^|/)vendor/' 170 | else 171 | git diff --cached --name-only 172 | fi 173 | } 174 | 175 | list_files "$@" | lint_files 176 | -------------------------------------------------------------------------------- /tools/publish-site: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | : "${PRODUCT:=}" 7 | 8 | fatal() { 9 | echo "$@" >&2 10 | exit 1 11 | } 12 | 13 | if [ ! -d .git ] ; then 14 | fatal "Current directory is not a git clone" 15 | fi 16 | 17 | if [ -z "${PRODUCT}" ]; then 18 | fatal "Must specify PRODUCT" 19 | fi 20 | 21 | if ! BRANCH=$(git symbolic-ref --short HEAD) || [ -z "$BRANCH" ] ; then 22 | fatal "Could not determine branch" 23 | fi 24 | 25 | case "$BRANCH" in 26 | issues/*) 27 | VERSION="${BRANCH#issues/}" 28 | TAGS="$VERSION" 29 | ;; 30 | *) 31 | if echo "$BRANCH" | grep -qE '^[0-9]+\.[0-9]+' ; then 32 | DESCRIBE=$(git describe --match 'v*') 33 | if ! VERSION=$(echo "$DESCRIBE" | grep -oP '(?<=^v)[0-9]+\.[0-9]+\.[0-9]+') ; then 34 | fatal "Could not infer latest $BRANCH version from $DESCRIBE" 35 | fi 36 | TAGS="$VERSION latest" 37 | else 38 | VERSION="$BRANCH" 39 | TAGS="$VERSION" 40 | fi 41 | ;; 42 | esac 43 | 44 | for TAG in $TAGS ; do 45 | echo ">>> Publishing $PRODUCT $VERSION to $1/docs/$PRODUCT/$TAG" 46 | wordepress \ 47 | --url "$1" --user "$2" --password "$3" \ 48 | --product "$PRODUCT" --version "$VERSION" --tag "$TAG" \ 49 | publish site 50 | done 51 | -------------------------------------------------------------------------------- /tools/rebuild-image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Rebuild a cached docker image if the input files have changed. 3 | # Usage: ./rebuild-image 4 | 5 | set -eux 6 | 7 | IMAGENAME=$1 8 | # shellcheck disable=SC2001 9 | SAVEDNAME=$(echo "$IMAGENAME" | sed "s/[\/\-]/\./g") 10 | IMAGEDIR=$2 11 | shift 2 12 | 13 | INPUTFILES=( "$@" ) 14 | CACHEDIR=$HOME/docker/ 15 | 16 | # Rebuild the image 17 | rebuild() { 18 | mkdir -p "$CACHEDIR" 19 | rm "$CACHEDIR/$SAVEDNAME"* || true 20 | docker build -t "$IMAGENAME" "$IMAGEDIR" 21 | docker save "$IMAGENAME:latest" | gzip - > "$CACHEDIR/$SAVEDNAME-$CIRCLE_SHA1.gz" 22 | } 23 | 24 | # Get the revision the cached image was build at 25 | cached_image_rev() { 26 | find "$CACHEDIR" -name "$SAVEDNAME-*" -type f | sed -n 's/^[^\-]*\-\([a-z0-9]*\).gz$/\1/p' 27 | } 28 | 29 | # Have there been any revision between $1 and $2 30 | has_changes() { 31 | local rev1=$1 32 | local rev2=$2 33 | local changes 34 | changes=$(git diff --oneline "$rev1..$rev2" -- "${INPUTFILES[@]}" | wc -l) 35 | [ "$changes" -gt 0 ] 36 | } 37 | 38 | commit_timestamp() { 39 | local rev=$1 40 | git show -s --format=%ct "$rev" 41 | } 42 | 43 | # Is the SHA1 actually present in the repo? 44 | # It could be it isn't, e.g. after a force push 45 | is_valid_commit() { 46 | local rev=$1 47 | git rev-parse --quiet --verify "$rev^{commit}" > /dev/null 48 | } 49 | 50 | cached_revision=$(cached_image_rev) 51 | if [ -z "$cached_revision" ]; then 52 | echo ">>> No cached image found; rebuilding" 53 | rebuild 54 | exit 0 55 | fi 56 | 57 | if ! is_valid_commit "$cached_revision"; then 58 | echo ">>> Git commit of cached image not found in repo; rebuilding" 59 | rebuild 60 | exit 0 61 | fi 62 | 63 | echo ">>> Found cached image rev $cached_revision" 64 | if has_changes "$cached_revision" "$CIRCLE_SHA1" ; then 65 | echo ">>> Found changes, rebuilding" 66 | rebuild 67 | exit 0 68 | fi 69 | 70 | IMAGE_TIMEOUT="$(( 3 * 24 * 60 * 60 ))" 71 | if [ "$(commit_timestamp "$cached_revision")" -lt "${IMAGE_TIMEOUT}" ]; then 72 | echo ">>> Image is more the 24hrs old; rebuilding" 73 | rebuild 74 | exit 0 75 | fi 76 | 77 | # we didn't rebuild; import cached version 78 | echo ">>> No changes found, importing cached image" 79 | zcat "$CACHEDIR/$SAVEDNAME-$cached_revision.gz" | docker load 80 | -------------------------------------------------------------------------------- /tools/runner/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean 2 | 3 | all: runner 4 | 5 | runner: *.go 6 | go get -tags netgo ./$(@D) 7 | go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D) 8 | 9 | clean: 10 | rm -rf runner 11 | go clean ./... 12 | -------------------------------------------------------------------------------- /tools/runner/runner.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | "net/url" 9 | "os" 10 | "os/exec" 11 | "sort" 12 | "strconv" 13 | "strings" 14 | "sync" 15 | "time" 16 | 17 | "github.com/mgutz/ansi" 18 | "github.com/weaveworks/docker/pkg/mflag" 19 | ) 20 | 21 | const ( 22 | defaultSchedulerHost = "positive-cocoa-90213.appspot.com" 23 | jsonContentType = "application/json" 24 | ) 25 | 26 | var ( 27 | start = ansi.ColorCode("black+ub") 28 | fail = ansi.ColorCode("red+b") 29 | succ = ansi.ColorCode("green+b") 30 | reset = ansi.ColorCode("reset") 31 | 32 | schedulerHost = defaultSchedulerHost 33 | useScheduler = false 34 | runParallel = false 35 | verbose = false 36 | timeout = 180 // In seconds. Three minutes ought to be enough for any test 37 | 38 | consoleLock = sync.Mutex{} 39 | ) 40 | 41 | type test struct { 42 | name string 43 | hosts int 44 | } 45 | 46 | type schedule struct { 47 | Tests []string `json:"tests"` 48 | } 49 | 50 | type result struct { 51 | test 52 | errored bool 53 | hosts []string 54 | } 55 | 56 | type tests []test 57 | 58 | func (ts tests) Len() int { return len(ts) } 59 | func (ts tests) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } 60 | func (ts tests) Less(i, j int) bool { 61 | if ts[i].hosts != ts[j].hosts { 62 | return ts[i].hosts < ts[j].hosts 63 | } 64 | return ts[i].name < ts[j].name 65 | } 66 | 67 | func (ts *tests) pick(available int) (test, bool) { 68 | // pick the first test that fits in the available hosts 69 | for i, test := range *ts { 70 | if test.hosts <= available { 71 | *ts = append((*ts)[:i], (*ts)[i+1:]...) 72 | return test, true 73 | } 74 | } 75 | 76 | return test{}, false 77 | } 78 | 79 | func (t test) run(hosts []string) bool { 80 | consoleLock.Lock() 81 | fmt.Printf("%s>>> Running %s on %s%s\n", start, t.name, hosts, reset) 82 | consoleLock.Unlock() 83 | 84 | var out bytes.Buffer 85 | 86 | cmd := exec.Command(t.name) 87 | cmd.Env = os.Environ() 88 | cmd.Stdout = &out 89 | cmd.Stderr = &out 90 | 91 | // replace HOSTS in env 92 | for i, env := range cmd.Env { 93 | if strings.HasPrefix(env, "HOSTS") { 94 | cmd.Env[i] = fmt.Sprintf("HOSTS=%s", strings.Join(hosts, " ")) 95 | break 96 | } 97 | } 98 | 99 | start := time.Now() 100 | var err error 101 | 102 | c := make(chan error, 1) 103 | go func() { c <- cmd.Run() }() 104 | select { 105 | case err = <-c: 106 | case <-time.After(time.Duration(timeout) * time.Second): 107 | err = fmt.Errorf("timed out") 108 | } 109 | 110 | duration := float64(time.Now().Sub(start)) / float64(time.Second) 111 | 112 | consoleLock.Lock() 113 | if err != nil { 114 | fmt.Printf("%s>>> Test %s finished after %0.1f secs with error: %v%s\n", fail, t.name, duration, err, reset) 115 | } else { 116 | fmt.Printf("%s>>> Test %s finished with success after %0.1f secs%s\n", succ, t.name, duration, reset) 117 | } 118 | if err != nil || verbose { 119 | fmt.Print(out.String()) 120 | fmt.Println() 121 | } 122 | consoleLock.Unlock() 123 | 124 | if err != nil && useScheduler { 125 | updateScheduler(t.name, duration) 126 | } 127 | 128 | return err != nil 129 | } 130 | 131 | func updateScheduler(test string, duration float64) { 132 | req := &http.Request{ 133 | Method: "POST", 134 | Host: schedulerHost, 135 | URL: &url.URL{ 136 | Opaque: fmt.Sprintf("/record/%s/%0.2f", url.QueryEscape(test), duration), 137 | Scheme: "http", 138 | Host: schedulerHost, 139 | }, 140 | Close: true, 141 | } 142 | if resp, err := http.DefaultClient.Do(req); err != nil { 143 | fmt.Printf("Error updating scheduler: %v\n", err) 144 | } else { 145 | resp.Body.Close() 146 | } 147 | } 148 | 149 | func getSchedule(tests []string) ([]string, error) { 150 | var ( 151 | project = os.Getenv("CIRCLE_PROJECT_REPONAME") 152 | buildNum = os.Getenv("CIRCLE_BUILD_NUM") 153 | testRun = project + "-integration-" + buildNum 154 | shardCount = os.Getenv("CIRCLE_NODE_TOTAL") 155 | shardID = os.Getenv("CIRCLE_NODE_INDEX") 156 | requestBody = &bytes.Buffer{} 157 | ) 158 | if err := json.NewEncoder(requestBody).Encode(schedule{tests}); err != nil { 159 | return []string{}, err 160 | } 161 | url := fmt.Sprintf("http://%s/schedule/%s/%s/%s", schedulerHost, testRun, shardCount, shardID) 162 | resp, err := http.Post(url, jsonContentType, requestBody) 163 | if err != nil { 164 | return []string{}, err 165 | } 166 | var sched schedule 167 | if err := json.NewDecoder(resp.Body).Decode(&sched); err != nil { 168 | return []string{}, err 169 | } 170 | return sched.Tests, nil 171 | } 172 | 173 | func getTests(testNames []string) (tests, error) { 174 | var err error 175 | if useScheduler { 176 | testNames, err = getSchedule(testNames) 177 | if err != nil { 178 | return tests{}, err 179 | } 180 | } 181 | tests := tests{} 182 | for _, name := range testNames { 183 | parts := strings.Split(strings.TrimSuffix(name, "_test.sh"), "_") 184 | numHosts, err := strconv.Atoi(parts[len(parts)-1]) 185 | if err != nil { 186 | numHosts = 1 187 | } 188 | tests = append(tests, test{name, numHosts}) 189 | fmt.Printf("Test %s needs %d hosts\n", name, numHosts) 190 | } 191 | return tests, nil 192 | } 193 | 194 | func summary(tests, failed tests) { 195 | if len(failed) > 0 { 196 | fmt.Printf("%s>>> Ran %d tests, %d failed%s\n", fail, len(tests), len(failed), reset) 197 | for _, test := range failed { 198 | fmt.Printf("%s>>> Fail %s%s\n", fail, test.name, reset) 199 | } 200 | } else { 201 | fmt.Printf("%s>>> Ran %d tests, all succeeded%s\n", succ, len(tests), reset) 202 | } 203 | } 204 | 205 | func parallel(ts tests, hosts []string) bool { 206 | testsCopy := ts 207 | sort.Sort(sort.Reverse(ts)) 208 | resultsChan := make(chan result) 209 | outstanding := 0 210 | failed := tests{} 211 | for len(ts) > 0 || outstanding > 0 { 212 | // While we have some free hosts, try and schedule 213 | // a test on them 214 | for len(hosts) > 0 { 215 | test, ok := ts.pick(len(hosts)) 216 | if !ok { 217 | break 218 | } 219 | testHosts := hosts[:test.hosts] 220 | hosts = hosts[test.hosts:] 221 | 222 | go func() { 223 | errored := test.run(testHosts) 224 | resultsChan <- result{test, errored, testHosts} 225 | }() 226 | outstanding++ 227 | } 228 | 229 | // Otherwise, wait for the test to finish and return 230 | // the hosts to the pool 231 | result := <-resultsChan 232 | hosts = append(hosts, result.hosts...) 233 | outstanding-- 234 | if result.errored { 235 | failed = append(failed, result.test) 236 | } 237 | } 238 | summary(testsCopy, failed) 239 | return len(failed) > 0 240 | } 241 | 242 | func sequential(ts tests, hosts []string) bool { 243 | failed := tests{} 244 | for _, test := range ts { 245 | if test.run(hosts) { 246 | failed = append(failed, test) 247 | } 248 | } 249 | summary(ts, failed) 250 | return len(failed) > 0 251 | } 252 | 253 | func main() { 254 | mflag.BoolVar(&useScheduler, []string{"scheduler"}, false, "Use scheduler to distribute tests across shards") 255 | mflag.BoolVar(&runParallel, []string{"parallel"}, false, "Run tests in parallel on hosts where possible") 256 | mflag.BoolVar(&verbose, []string{"v"}, false, "Print output from all tests (Also enabled via DEBUG=1)") 257 | mflag.StringVar(&schedulerHost, []string{"scheduler-host"}, defaultSchedulerHost, "Hostname of scheduler.") 258 | mflag.IntVar(&timeout, []string{"timeout"}, 180, "Max time to run one test for, in seconds") 259 | mflag.Parse() 260 | 261 | if len(os.Getenv("DEBUG")) > 0 { 262 | verbose = true 263 | } 264 | 265 | testArgs := mflag.Args() 266 | tests, err := getTests(testArgs) 267 | if err != nil { 268 | fmt.Printf("Error parsing tests: %v (%v)\n", err, testArgs) 269 | os.Exit(1) 270 | } 271 | 272 | hosts := strings.Fields(os.Getenv("HOSTS")) 273 | maxHosts := len(hosts) 274 | if maxHosts == 0 { 275 | fmt.Print("No HOSTS specified.\n") 276 | os.Exit(1) 277 | } 278 | 279 | var errored bool 280 | if runParallel { 281 | errored = parallel(tests, hosts) 282 | } else { 283 | errored = sequential(tests, hosts) 284 | } 285 | 286 | if errored { 287 | os.Exit(1) 288 | } 289 | } 290 | -------------------------------------------------------------------------------- /tools/sched: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys, string, json, urllib 3 | import requests 4 | import optparse 5 | 6 | def test_time(target, test_name, runtime): 7 | r = requests.post(target + "/record/%s/%f" % (urllib.quote(test_name, safe=""), runtime)) 8 | print r.text 9 | assert r.status_code == 204 10 | 11 | def test_sched(target, test_run, shard_count, shard_id): 12 | tests = json.dumps({'tests': string.split(sys.stdin.read())}) 13 | r = requests.post(target + "/schedule/%s/%d/%d" % (test_run, shard_count, shard_id), data=tests) 14 | assert r.status_code == 200 15 | result = r.json() 16 | for test in sorted(result['tests']): 17 | print test 18 | 19 | def usage(): 20 | print "%s (--target=...) " % sys.argv[0] 21 | print " time " 22 | print " sched " 23 | 24 | def main(): 25 | parser = optparse.OptionParser() 26 | parser.add_option('--target', default="http://positive-cocoa-90213.appspot.com") 27 | options, args = parser.parse_args() 28 | if len(args) < 3: 29 | usage() 30 | sys.exit(1) 31 | 32 | if args[0] == "time": 33 | test_time(options.target, args[1], float(args[2])) 34 | elif args[0] == "sched": 35 | test_sched(options.target, args[1], int(args[2]), int(args[3])) 36 | else: 37 | usage() 38 | 39 | if __name__ == '__main__': 40 | main() 41 | -------------------------------------------------------------------------------- /tools/scheduler/.gitignore: -------------------------------------------------------------------------------- 1 | lib 2 | -------------------------------------------------------------------------------- /tools/scheduler/README.md: -------------------------------------------------------------------------------- 1 | To upload newer version: 2 | 3 | ``` 4 | pip install -r requirements.txt -t lib 5 | appcfg.py update . 6 | ``` 7 | -------------------------------------------------------------------------------- /tools/scheduler/app.yaml: -------------------------------------------------------------------------------- 1 | application: positive-cocoa-90213 2 | version: 1 3 | runtime: python27 4 | api_version: 1 5 | threadsafe: true 6 | 7 | handlers: 8 | - url: .* 9 | script: main.app 10 | 11 | libraries: 12 | - name: webapp2 13 | version: latest 14 | - name: ssl 15 | version: latest 16 | -------------------------------------------------------------------------------- /tools/scheduler/appengine_config.py: -------------------------------------------------------------------------------- 1 | from google.appengine.ext import vendor 2 | 3 | vendor.add('lib') 4 | -------------------------------------------------------------------------------- /tools/scheduler/cron.yaml: -------------------------------------------------------------------------------- 1 | cron: 2 | - description: periodic gc 3 | url: /tasks/gc 4 | schedule: every 5 minutes 5 | -------------------------------------------------------------------------------- /tools/scheduler/main.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import json 3 | import logging 4 | import operator 5 | import re 6 | 7 | import flask 8 | from oauth2client.client import GoogleCredentials 9 | from googleapiclient import discovery 10 | 11 | from google.appengine.api import urlfetch 12 | from google.appengine.ext import ndb 13 | 14 | app = flask.Flask('scheduler') 15 | app.debug = True 16 | 17 | # We use exponential moving average to record 18 | # test run times. Higher alpha discounts historic 19 | # observations faster. 20 | alpha = 0.3 21 | 22 | class Test(ndb.Model): 23 | total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA 24 | total_runs = ndb.IntegerProperty(default=0) 25 | 26 | def parallelism(self): 27 | name = self.key.string_id() 28 | m = re.search('(\d+)_test.sh$', name) 29 | if m is None: 30 | return 1 31 | else: 32 | return int(m.group(1)) 33 | 34 | def cost(self): 35 | p = self.parallelism() 36 | logging.info("Test %s has parallelism %d and avg run time %s", self.key.string_id(), p, self.total_run_time) 37 | return self.parallelism() * self.total_run_time 38 | 39 | class Schedule(ndb.Model): 40 | shards = ndb.JsonProperty() 41 | 42 | @app.route('/record//', methods=['POST']) 43 | @ndb.transactional 44 | def record(test_name, runtime): 45 | test = Test.get_by_id(test_name) 46 | if test is None: 47 | test = Test(id=test_name) 48 | test.total_run_time = (test.total_run_time * (1-alpha)) + (float(runtime) * alpha) 49 | test.total_runs += 1 50 | test.put() 51 | return ('', 204) 52 | 53 | @app.route('/schedule///', methods=['POST']) 54 | def schedule(test_run, shard_count, shard): 55 | # read tests from body 56 | test_names = flask.request.get_json(force=True)['tests'] 57 | 58 | # first see if we have a scedule already 59 | schedule_id = "%s-%d" % (test_run, shard_count) 60 | schedule = Schedule.get_by_id(schedule_id) 61 | if schedule is not None: 62 | return flask.json.jsonify(tests=schedule.shards[str(shard)]) 63 | 64 | # if not, do simple greedy algorithm 65 | test_times = ndb.get_multi(ndb.Key(Test, test_name) for test_name in test_names) 66 | def avg(test): 67 | if test is not None: 68 | return test.cost() 69 | return 1 70 | test_times = [(test_name, avg(test)) for test_name, test in zip(test_names, test_times)] 71 | test_times_dict = dict(test_times) 72 | test_times.sort(key=operator.itemgetter(1)) 73 | 74 | shards = {i: [] for i in xrange(shard_count)} 75 | while test_times: 76 | test_name, time = test_times.pop() 77 | 78 | # find shortest shard and put it in that 79 | s, _ = min(((i, sum(test_times_dict[t] for t in shards[i])) 80 | for i in xrange(shard_count)), key=operator.itemgetter(1)) 81 | 82 | shards[s].append(test_name) 83 | 84 | # atomically insert or retrieve existing schedule 85 | schedule = Schedule.get_or_insert(schedule_id, shards=shards) 86 | return flask.json.jsonify(tests=schedule.shards[str(shard)]) 87 | 88 | NAME_RE = re.compile(r'^host(?P\d+)-(?P\d+)-(?P\d+)$') 89 | 90 | PROJECTS = [ 91 | ('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a'), 92 | ('weaveworks/scope', 'scope-integration-tests', 'us-central1-a'), 93 | ] 94 | 95 | @app.route('/tasks/gc') 96 | def gc(): 97 | # Get list of running VMs, pick build id out of VM name 98 | credentials = GoogleCredentials.get_application_default() 99 | compute = discovery.build('compute', 'v1', credentials=credentials) 100 | 101 | for repo, project, zone in PROJECTS: 102 | gc_project(compute, repo, project, zone) 103 | 104 | return "Done" 105 | 106 | def gc_project(compute, repo, project, zone): 107 | logging.info("GCing %s, %s, %s", repo, project, zone) 108 | instances = compute.instances().list(project=project, zone=zone).execute() 109 | if 'items' not in instances: 110 | return 111 | 112 | host_by_build = collections.defaultdict(list) 113 | for instance in instances['items']: 114 | matches = NAME_RE.match(instance['name']) 115 | if matches is None: 116 | continue 117 | host_by_build[int(matches.group('build'))].append(instance['name']) 118 | logging.info("Running VMs by build: %r", host_by_build) 119 | 120 | # Get list of builds, filter down to runnning builds 121 | result = urlfetch.fetch('https://circleci.com/api/v1/project/%s' % repo, 122 | headers={'Accept': 'application/json'}) 123 | assert result.status_code == 200 124 | builds = json.loads(result.content) 125 | running = {build['build_num'] for build in builds if not build.get('stop_time')} 126 | logging.info("Runnings builds: %r", running) 127 | 128 | # Stop VMs for builds that aren't running 129 | stopped = [] 130 | for build, names in host_by_build.iteritems(): 131 | if build in running: 132 | continue 133 | for name in names: 134 | stopped.append(name) 135 | logging.info("Stopping VM %s", name) 136 | compute.instances().delete(project=project, zone=zone, instance=name).execute() 137 | 138 | return 139 | -------------------------------------------------------------------------------- /tools/scheduler/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | google-api-python-client 3 | -------------------------------------------------------------------------------- /tools/shell-lint: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Lint all shell files in given directories with `shellcheck`. 4 | # 5 | # e.g. 6 | # $ shell-lint infra k8s 7 | # 8 | # Depends on: 9 | # - shellcheck 10 | # - files-with-type 11 | # - file >= 5.22 12 | 13 | "$(dirname "${BASH_SOURCE[0]}")/files-with-type" text/x-shellscript "$@" | xargs --no-run-if-empty shellcheck 14 | -------------------------------------------------------------------------------- /tools/socks/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gliderlabs/alpine 2 | MAINTAINER Weaveworks Inc 3 | WORKDIR / 4 | COPY proxy / 5 | EXPOSE 8000 6 | EXPOSE 8080 7 | ENTRYPOINT ["/proxy"] 8 | -------------------------------------------------------------------------------- /tools/socks/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean 2 | 3 | IMAGE_TAR=image.tar 4 | IMAGE_NAME=weaveworks/socksproxy 5 | PROXY_EXE=proxy 6 | NETGO_CHECK=@strings $@ | grep cgo_stub\\\.go >/dev/null || { \ 7 | rm $@; \ 8 | echo "\nYour go standard library was built without the 'netgo' build tag."; \ 9 | echo "To fix that, run"; \ 10 | echo " sudo go clean -i net"; \ 11 | echo " sudo go install -tags netgo std"; \ 12 | false; \ 13 | } 14 | 15 | all: $(IMAGE_TAR) 16 | 17 | $(IMAGE_TAR): Dockerfile $(PROXY_EXE) 18 | docker build -t $(IMAGE_NAME) . 19 | docker save $(IMAGE_NAME):latest > $@ 20 | 21 | $(PROXY_EXE): *.go 22 | go get -tags netgo ./$(@D) 23 | go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D) 24 | $(NETGO_CHECK) 25 | 26 | clean: 27 | -docker rmi $(IMAGE_NAME) 28 | rm -rf $(PROXY_EXE) $(IMAGE_TAR) 29 | go clean ./... 30 | -------------------------------------------------------------------------------- /tools/socks/README.md: -------------------------------------------------------------------------------- 1 | # SOCKS Proxy 2 | 3 | The challenge: you’ve built and deployed your microservices based 4 | application on a Weave network, running on a set of VMs on EC2. Many 5 | of the services’ public API are reachable from the internet via an 6 | Nginx-based reverse proxy, but some of the services also expose 7 | private monitoring and manage endpoints via embedded HTTP servers. 8 | How do I securely get access to these from my laptop, without exposing 9 | them to the world? 10 | 11 | One method we’ve started using at Weaveworks is a 90’s technology - a 12 | SOCKS proxy combined with a PAC script. It’s relatively 13 | straight-forward: one ssh’s into any of the VMs participating in the 14 | Weave network, starts the SOCKS proxy in a container on Weave the 15 | network, and SSH port forwards a few local port to the proxy. All 16 | that’s left is for the user to configure his browser to use the proxy, 17 | and voila, you can now access your Docker containers, via the Weave 18 | network (and with all the magic of weavedns), from your laptop’s 19 | browser! 20 | 21 | It is perhaps worth noting there is nothing Weave-specific about this 22 | approach - this should work with any SDN or private network. 23 | 24 | A quick example: 25 | 26 | ``` 27 | vm1$ weave launch 28 | vm1$ eval $(weave env) 29 | vm1$ docker run -d --name nginx nginx 30 | ``` 31 | 32 | And on your laptop 33 | 34 | ``` 35 | laptop$ git clone https://github.com/weaveworks/tools 36 | laptop$ cd tools/socks 37 | laptop$ ./connect.sh vm1 38 | Starting proxy container... 39 | Please configure your browser for proxy 40 | http://localhost:8080/proxy.pac 41 | ``` 42 | 43 | To configure your Mac to use the proxy: 44 | 45 | 1. Open System Preferences 46 | 2. Select Network 47 | 3. Click the 'Advanced' button 48 | 4. Select the Proxies tab 49 | 5. Click the 'Automatic Proxy Configuration' check box 50 | 6. Enter 'http://localhost:8080/proxy.pac' in the URL box 51 | 7. Remove `*.local` from the 'Bypass proxy settings for these Hosts & Domains' 52 | 53 | Now point your browser at http://nginx.weave.local/ 54 | -------------------------------------------------------------------------------- /tools/socks/connect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | if [ $# -ne 1 ]; then 6 | echo "Usage: $0 " 7 | exit 1 8 | fi 9 | 10 | HOST=$1 11 | 12 | echo "Starting proxy container..." 13 | PROXY_CONTAINER=$(ssh "$HOST" weave run -d weaveworks/socksproxy) 14 | 15 | function finish { 16 | echo "Removing proxy container.." 17 | # shellcheck disable=SC2029 18 | ssh "$HOST" docker rm -f "$PROXY_CONTAINER" 19 | } 20 | trap finish EXIT 21 | 22 | # shellcheck disable=SC2029 23 | PROXY_IP=$(ssh "$HOST" -- "docker inspect --format='{{.NetworkSettings.IPAddress}}' $PROXY_CONTAINER") 24 | echo 'Please configure your browser for proxy http://localhost:8080/proxy.pac' 25 | # shellcheck disable=SC2029 26 | ssh "-L8000:$PROXY_IP:8000" "-L8080:$PROXY_IP:8080" "$HOST" docker attach "$PROXY_CONTAINER" 27 | -------------------------------------------------------------------------------- /tools/socks/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "net/http" 7 | "os" 8 | "strings" 9 | "text/template" 10 | 11 | socks5 "github.com/armon/go-socks5" 12 | "github.com/weaveworks/docker/pkg/mflag" 13 | "github.com/weaveworks/weave/common/mflagext" 14 | "golang.org/x/net/context" 15 | ) 16 | 17 | type pacFileParameters struct { 18 | HostMatch string 19 | Aliases map[string]string 20 | } 21 | 22 | const ( 23 | pacfile = ` 24 | function FindProxyForURL(url, host) { 25 | if(shExpMatch(host, "{{.HostMatch}}")) { 26 | return "SOCKS5 localhost:8000"; 27 | } 28 | {{range $key, $value := .Aliases}} 29 | if (host == "{{$key}}") { 30 | return "SOCKS5 localhost:8000"; 31 | } 32 | {{end}} 33 | return "DIRECT"; 34 | } 35 | ` 36 | ) 37 | 38 | func main() { 39 | var ( 40 | as []string 41 | hostMatch string 42 | ) 43 | mflagext.ListVar(&as, []string{"a", "-alias"}, []string{}, "Specify hostname aliases in the form alias:hostname. Can be repeated.") 44 | mflag.StringVar(&hostMatch, []string{"h", "-host-match"}, "*.weave.local", "Specify main host shExpMatch expression in pacfile") 45 | mflag.Parse() 46 | 47 | var aliases = map[string]string{} 48 | for _, a := range as { 49 | parts := strings.SplitN(a, ":", 2) 50 | if len(parts) != 2 { 51 | fmt.Printf("'%s' is not a valid alias.\n", a) 52 | mflag.Usage() 53 | os.Exit(1) 54 | } 55 | aliases[parts[0]] = parts[1] 56 | } 57 | 58 | go socksProxy(aliases) 59 | 60 | t := template.Must(template.New("pacfile").Parse(pacfile)) 61 | http.HandleFunc("/proxy.pac", func(w http.ResponseWriter, r *http.Request) { 62 | w.Header().Set("Content-Type", "application/x-ns-proxy-autoconfig") 63 | t.Execute(w, pacFileParameters{hostMatch, aliases}) 64 | }) 65 | 66 | if err := http.ListenAndServe(":8080", nil); err != nil { 67 | panic(err) 68 | } 69 | } 70 | 71 | type aliasingResolver struct { 72 | aliases map[string]string 73 | socks5.NameResolver 74 | } 75 | 76 | func (r aliasingResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) { 77 | if alias, ok := r.aliases[name]; ok { 78 | return r.NameResolver.Resolve(ctx, alias) 79 | } 80 | return r.NameResolver.Resolve(ctx, name) 81 | } 82 | 83 | func socksProxy(aliases map[string]string) { 84 | conf := &socks5.Config{ 85 | Resolver: aliasingResolver{ 86 | aliases: aliases, 87 | NameResolver: socks5.DNSResolver{}, 88 | }, 89 | } 90 | server, err := socks5.New(conf) 91 | if err != nil { 92 | panic(err) 93 | } 94 | if err := server.ListenAndServe("tcp", ":8000"); err != nil { 95 | panic(err) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /tools/test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 6 | GO_TEST_ARGS=( -tags netgo -cpu 4 -timeout 8m ) 7 | SLOW= 8 | NO_GO_GET= 9 | 10 | usage() { 11 | echo "$0 [-slow] [-in-container foo]" 12 | } 13 | 14 | while [ $# -gt 0 ]; do 15 | case "$1" in 16 | "-slow") 17 | SLOW=true 18 | shift 1 19 | ;; 20 | "-no-go-get") 21 | NO_GO_GET=true 22 | shift 1 23 | ;; 24 | *) 25 | usage 26 | exit 2 27 | ;; 28 | esac 29 | done 30 | 31 | if [ -n "$SLOW" ] || [ -n "$CIRCLECI" ]; then 32 | SLOW=true 33 | fi 34 | 35 | if [ -n "$SLOW" ]; then 36 | GO_TEST_ARGS=( "${GO_TEST_ARGS[@]}" -race -covermode=atomic ) 37 | 38 | # shellcheck disable=SC2153 39 | if [ -n "$COVERDIR" ] ; then 40 | coverdir="$COVERDIR" 41 | else 42 | coverdir=$(mktemp -d coverage.XXXXXXXXXX) 43 | fi 44 | 45 | mkdir -p "$coverdir" 46 | fi 47 | 48 | fail=0 49 | 50 | if [ -z "$TESTDIRS" ]; then 51 | # NB: Relies on paths being prefixed with './'. 52 | TESTDIRS=( $(git ls-files -- '*_test.go' | grep -vE '^(vendor|prog|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|') ) 53 | else 54 | # TESTDIRS on the right side is not really an array variable, it 55 | # is just a string with spaces, but it is written like that to 56 | # shut up the shellcheck tool. 57 | TESTDIRS=( $(for d in ${TESTDIRS[*]}; do echo "$d"; done) ) 58 | fi 59 | 60 | # If running on circle, use the scheduler to work out what tests to run on what shard 61 | if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then 62 | PREFIX=$(go list -e ./ | sed -e 's/\//-/g') 63 | TESTDIRS=( $(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX") ) 64 | echo "${TESTDIRS[@]}" 65 | fi 66 | 67 | PACKAGE_BASE=$(go list -e ./) 68 | 69 | # Speed up the tests by compiling and installing their dependencies first. 70 | go test -i "${GO_TEST_ARGS[@]}" "${TESTDIRS[@]}" 71 | 72 | for dir in "${TESTDIRS[@]}"; do 73 | if [ -z "$NO_GO_GET" ]; then 74 | go get -t -tags netgo "$dir" 75 | fi 76 | 77 | GO_TEST_ARGS_RUN=( "${GO_TEST_ARGS[@]}" ) 78 | if [ -n "$SLOW" ]; then 79 | COVERPKGS=$( (go list "$dir"; go list -f '{{join .Deps "\n"}}' "$dir" | grep -v "vendor" | grep "^$PACKAGE_BASE/") | paste -s -d, -) 80 | output=$(mktemp "$coverdir/unit.XXXXXXXXXX") 81 | GO_TEST_ARGS_RUN=( "${GO_TEST_ARGS[@]}" -coverprofile=$output -coverpkg=$COVERPKGS ) 82 | fi 83 | 84 | START=$(date +%s) 85 | if ! go test "${GO_TEST_ARGS_RUN[@]}" "$dir"; then 86 | fail=1 87 | fi 88 | RUNTIME=$(( $(date +%s) - START )) 89 | 90 | # Report test runtime when running on circle, to help scheduler 91 | if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then 92 | "$DIR/sched" time "$dir" $RUNTIME 93 | fi 94 | done 95 | 96 | if [ -n "$SLOW" ] && [ -z "$COVERDIR" ] ; then 97 | go get github.com/weaveworks/tools/cover 98 | cover "$coverdir"/* >profile.cov 99 | rm -rf "$coverdir" 100 | go tool cover -html=profile.cov -o=coverage.html 101 | go tool cover -func=profile.cov | tail -n1 102 | fi 103 | 104 | exit $fail 105 | --------------------------------------------------------------------------------