├── .github ├── ISSUE_TEMPLATE.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .pluginsync.yml ├── .sync.yml ├── .travis.yml ├── CONTRIBUTING.md ├── LICENSE ├── METRICS.md ├── Makefile ├── NOTICE ├── README.md ├── collector ├── collector.go ├── collector_test.go └── utils.go ├── config └── config.go ├── container ├── cgroupfs │ ├── blkio.go │ ├── blkio_test.go │ ├── cpu.go │ ├── cpu_test.go │ ├── cpuset.go │ ├── cpuset_test.go │ ├── hugetlb.go │ ├── hugetlb_test.go │ ├── memory.go │ ├── memory_test.go │ ├── pids.go │ ├── pids_test.go │ └── utils.go ├── client.go ├── fs │ ├── fs.go │ ├── fs_mocks.go │ ├── fs_test.go │ └── types.go ├── network │ ├── network.go │ ├── network_test.go │ ├── tcp.go │ └── tcp_test.go └── statistics.go ├── examples ├── .setup.sh ├── README.md ├── configs │ └── config.yaml ├── docker-compose.yml ├── docker-file.sh ├── run-docker-file.sh ├── run-dockerception.sh └── tasks │ ├── docker-file.json │ └── docker-file.yaml ├── glide.lock ├── glide.yaml ├── main.go ├── metadata.yml ├── mocks └── mocks.go └── scripts ├── build.sh ├── common.sh ├── config └── docker-deployment.yml ├── deps.sh ├── large.sh ├── large_compose.sh ├── large_k8s.sh ├── large_tests.sh ├── pre_deploy.sh ├── test.sh └── test ├── large_spec.rb └── spec_helper.rb /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 10 | 11 | **Snap daemon version** (use `snapteld -v`): 12 | 13 | **Environment**: 14 | - **Cloud provider or hardware configuration**: 15 | - **OS** (e.g. from /etc/os-release): 16 | - **Kernel** (e.g. `uname -a`): 17 | - **Relevant tools** (e.g. plugins used with Snap): 18 | - **Others** (e.g. deploying with Ansible): 19 | 20 | 21 | **What happened**: 22 | 23 | 24 | **What you expected to happen**: 25 | 26 | 27 | **Steps to reproduce it** (as minimally and precisely as possible): 28 | 29 | 1. 30 | 2. 31 | 3. 32 | 33 | 34 | **Anything else do we need to know** (e.g. issue happens only occasionally): 35 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 7 | Fixes # 8 | 9 | Summary of changes: 10 | - 11 | - 12 | - 13 | 14 | How to verify it: 15 | - 16 | 17 | Testing done: 18 | - 19 | 20 | A picture of a snapping turtle (not required but encouraged): 21 | - 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # File managed by pluginsync 2 | # 3 | # NOTE: please commit OS/Editor specific settings in your .gitignore_global 4 | # .idea 5 | # .DS_Store 6 | # 7 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 8 | *.o 9 | *.a 10 | *.so 11 | 12 | # Folders 13 | _obj 14 | _test 15 | 16 | # Architecture specific extensions/prefixes 17 | *.[568vq] 18 | [568vq].out 19 | 20 | *.cgo1.go 21 | *.cgo2.c 22 | _cgo_defun.c 23 | _cgo_gotypes.go 24 | _cgo_export.* 25 | 26 | _testmain.go 27 | 28 | *.exe 29 | *.test 30 | *.prof 31 | 32 | # Output of the go coverage tool 33 | *.out 34 | profile.cov 35 | 36 | # we don't vendor godep _workspace 37 | **/Godeps/_workspace/** 38 | vendor/ 39 | 40 | # ignore build artifacts 41 | build/ 42 | -------------------------------------------------------------------------------- /.pluginsync.yml: -------------------------------------------------------------------------------- 1 | # File managed by pluginsync 2 | pluginsync_config: '0.1.14' 3 | managed_files: 4 | - .github 5 | - .github/ISSUE_TEMPLATE.md 6 | - .github/PULL_REQUEST_TEMPLATE.md 7 | - .gitignore 8 | - .pluginsync.yml 9 | - .travis.yml 10 | - CONTRIBUTING.md 11 | - LICENSE 12 | - Makefile 13 | - scripts 14 | - scripts/build.sh 15 | - scripts/common.sh 16 | - scripts/deps.sh 17 | - scripts/large.sh 18 | - scripts/pre_deploy.sh 19 | - scripts/test 20 | - scripts/test/large_spec.rb 21 | - scripts/test/spec_helper.rb 22 | - scripts/test.sh 23 | -------------------------------------------------------------------------------- /.sync.yml: -------------------------------------------------------------------------------- 1 | # NOTE: this plugin uses default settings 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # File managed by pluginsync 2 | sudo: false 3 | language: go 4 | go: 5 | - 1.7.x 6 | - 1.8.x 7 | env: 8 | global: 9 | - ORG_PATH=/home/travis/gopath/src/github.com/intelsdi-x 10 | - SNAP_PLUGIN_SOURCE=/home/travis/gopath/src/github.com/${TRAVIS_REPO_SLUG} 11 | - GLIDE_HOME="${HOME}/.glide" 12 | matrix: 13 | - TEST_TYPE=small 14 | - TEST_TYPE: build 15 | matrix: 16 | exclude: 17 | - go: 1.7.x 18 | env: TEST_TYPE=build 19 | before_install: 20 | - "[[ -d $SNAP_PLUGIN_SOURCE ]] || mkdir -p $ORG_PATH && ln -s $TRAVIS_BUILD_DIR $SNAP_PLUGIN_SOURCE" 21 | install: 22 | - cd $SNAP_PLUGIN_SOURCE 23 | - make deps 24 | script: 25 | - make check 2>&1 26 | notifications: 27 | email: false 28 | slack: 29 | secure: VkbZLIc2RH8yf3PtIAxUNPdAu3rQQ7yQx0GcK124JhbEnZGaHyK615V0rbG7HcVmYKGPdB0cXqZiLBDKGqGKb2zR1NepOe1nF03jxGSpPq8jIFeEXSJGEYGL34ScDzZZGuG6qwbjFcXiW5lqn6t8igzp7v2+URYBaZo5ktCS2xY= 30 | before_deploy: 31 | - "./scripts/pre_deploy.sh" 32 | deploy: 33 | - provider: s3 34 | access_key_id: $AWS_ACCESS_KEY_ID 35 | secret_access_key: $AWS_SECRET_ACCESS_KEY 36 | bucket: snap.ci.snap-telemetry.io 37 | region: us-west-2 38 | skip_cleanup: true 39 | local-dir: s3 40 | upload-dir: plugins 41 | acl: public_read 42 | on: 43 | repo: intelsdi-x/snap-plugin-collector-docker 44 | branch: master 45 | condition: $TEST_TYPE = "build" && $TRAVIS_GO_VERSION =~ ^1\.8(|\.[0-9]+)$ 46 | - provider: s3 47 | access_key_id: $AWS_ACCESS_KEY_ID 48 | secret_access_key: $AWS_SECRET_ACCESS_KEY 49 | bucket: snap.ci.snap-telemetry.io 50 | region: us-west-2 51 | skip_cleanup: true 52 | local-dir: s3 53 | upload-dir: plugins 54 | acl: public_read 55 | on: 56 | repo: intelsdi-x/snap-plugin-collector-docker 57 | tags: true 58 | condition: $TEST_TYPE = "build" && $TRAVIS_GO_VERSION =~ ^1\.8(|\.[0-9]+)$ 59 | - provider: releases 60 | api_key: $GITHUB_API_KEY 61 | file: 62 | - release/snap-plugin-collector-docker_linux_x86_64 63 | skip_cleanup: true 64 | on: 65 | repo: intelsdi-x/snap-plugin-collector-docker 66 | tags: true 67 | condition: $TEST_TYPE = "build" && $TRAVIS_GO_VERSION =~ ^1\.8(|\.[0-9]+)$ 68 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # snap plugin collector docker 2 | 3 | 1. [Contributing Code](#contributing-code) 4 | 2. [Contributing Examples](#contributing-examples) 5 | 3. [Contribute Elsewhere](#contribute-elsewhere) 6 | 4. [Reporting Security Issues](#reporting-security-issues) 7 | 5. [Thank You](#thank-you) 8 | 9 | This repository is primarily **community supported**. We both appreciate and need your contribution to keep it stable. Thank you for being part of the community! We love you for it. 10 | 11 | ## Contributing Code 12 | **_IMPORTANT_**: We encourage contributions to the project from the community. We ask that you keep the following guidelines in mind when planning your contribution. 13 | 14 | * Whether your contribution is for a bug fix or a feature request, **create an [Issue](https://github.com/intelsdi-x/snap-plugin-collector-docker/issues)** and let us know what you are thinking. 15 | * **For bugs**, if you have already found a fix, feel free to submit a Pull Request referencing the Issue you created. Include the `Fixes #` syntax to link it to the issue you're addressing. 16 | * **For feature requests**, we want to improve upon the library incrementally which means small changes at a time. In order to ensure your PR can be reviewed in a timely manner, please keep PRs small, e.g. <10 files and <500 lines changed. If you think this is unrealistic, then mention that within the issue and we can discuss it. 17 | 18 | Once you're ready to contribute code back to this repo, start with these steps: 19 | 20 | * Fork the appropriate sub-projects that are affected by your change. 21 | * Clone the fork to `$GOPATH/src/github.com/intelsdi-x/`: 22 | ``` 23 | $ cd "${GOPATH}/src/github.com/intelsdi-x/" 24 | $ git clone https://github.com/intelsdi-x/snap-plugin-collector-docker.git 25 | ``` 26 | * Create a topic branch for your change and checkout that branch: 27 | ``` 28 | $ git checkout -b some-topic-branch 29 | ``` 30 | * Make your changes to the code and add tests to cover contributed code. 31 | * Validate the changes and run the test suite if one is provided. 32 | * Commit your changes and push them to your fork. 33 | * Open a pull request for the appropriate project. 34 | * Contributors will review your pull request, suggest changes, and merge it when it’s ready and/or offer feedback. 35 | 36 | If you have questions feel free to contact the [maintainers](https://github.com/intelsdi-x/snap/blob/master/docs/MAINTAINERS.md). 37 | 38 | ## Contributing Examples 39 | The most immediately helpful way you can benefit this project is by cloning the repository, adding some further examples and submitting a pull request. 40 | 41 | Have you written a blog post about how you use [Snap](http://github.com/intelsdi-x/snap) and/or this plugin? Send it to us [on Slack](http://slack.snap-telemetry.io)! 42 | 43 | ## Contribute Elsewhere 44 | This repository is one of **many** plugins in **Snap**, a powerful telemetry framework. See the full project at http://snap-telemetry.io 45 | 46 | 47 | ## Reporting Security Issues 48 | 49 | The Snap team take security very seriously. If you have any issue regarding security, 50 | please notify us by sending an email to snap-security@intel.com and not by creating a GitHub issue. 51 | We will follow up with you promptly with more information and a plan for remediation. 52 | While we are not offering a security bounty, we would love to send some Snap swag your way along with our 53 | deepest gratitude for your assistance in making Snap a more secure product. 54 | 55 | ## Thank You 56 | And **thank you!** Your contribution, through code and participation, is incredibly important to us. 57 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # File managed by pluginsync 2 | # http://www.apache.org/licenses/LICENSE-2.0.txt 3 | # 4 | # 5 | # Copyright 2015 Intel Corporation 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | default: 20 | $(MAKE) deps 21 | $(MAKE) all 22 | deps: 23 | bash -c "./scripts/deps.sh" 24 | test: 25 | bash -c "./scripts/test.sh $(TEST_TYPE)" 26 | test-legacy: 27 | bash -c "./scripts/test.sh legacy" 28 | test-small: 29 | bash -c "./scripts/test.sh small" 30 | test-medium: 31 | bash -c "./scripts/test.sh medium" 32 | test-large: 33 | bash -c "./scripts/test.sh large" 34 | test-all: 35 | $(MAKE) test-small 36 | $(MAKE) test-medium 37 | $(MAKE) test-large 38 | check: 39 | $(MAKE) test 40 | all: 41 | bash -c "./scripts/build.sh" 42 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Snap 2 | Copyright 2016 Intel Corporation 3 | 4 | This product includes software developed at 5 | Intel Corporation (http://www.intel.com/). 6 | 7 | Portions of this software were developed at 8 | Docker, Inc. (http://www.docker.com/). 9 | 10 | This software contains portions of OCI/opencontainers from https://github.com/opencontainers/runc, 11 | licensed under an Apache License 2.0 in files container/statistics.go, container/cgroupfs/blkio.go. 12 | 13 | This software contains portions of Google cAdvisor from https://github.com/google/cadvisor, 14 | licensed under an Apache License 2.0 in files container/fs/fs.go, container/fs/types.go, container/network/network.go, container/network/tcp.go. 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | DISCONTINUATION OF PROJECT. 2 | 3 | This project will no longer be maintained by Intel. 4 | 5 | This project has been identified as having known security escapes. 6 | 7 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project. 8 | 9 | Intel no longer accepts patches to this project. 10 | # DISCONTINUATION OF PROJECT 11 | 12 | **This project will no longer be maintained by Intel. Intel will not provide or guarantee development of or support for this project, including but not limited to, maintenance, bug fixes, new releases or updates. Patches to this project are no longer accepted by Intel. If you have an ongoing need to use this project, are interested in independently developing it, or would like to maintain patches for the community, please create your own fork of the project.** 13 | 14 | 15 | 16 | [![Build Status](https://travis-ci.org/intelsdi-x/snap-plugin-collector-docker.svg?branch=master)](https://travis-ci.com/intelsdi-x/snap-plugin-collector-docker) 17 | # Snap collector plugin - Docker 18 | 19 | This plugin collects runtime metrics from Docker containers and its host machine. It gathers information about resource usage and performance characteristics. 20 | 21 | It's used in the [Snap framework](http://github.com/intelsdi-x/snap). 22 | 23 | 1. [Getting Started](#getting-started) 24 | * [Installation](#installation) 25 | * [Configuration and Usage](#configuration-and-usage) 26 | 2. [Documentation](#documentation) 27 | * [Collected Metrics](#collected-metrics) 28 | * [Examples](#examples) 29 | 3. [Community Support](#community-support) 30 | 4. [Contributing](#contributing) 31 | 5. [License](#license) 32 | 6. [Acknowledgements](#acknowledgements) 33 | 34 | ## Getting Started 35 | 36 | In order to use this plugin you need Docker Engine installed. Visit [Install Docker Engine](https://docs.docker.com/engine/installation/) for detailed instructions how to do it. 37 | Plugin was tested against Docker version 1.12.3. 38 | 39 | ### Operating systems 40 | * Linux/amd64 41 | * Darwin/amd64 (needs [docker-machine](https://docs.docker.com/v1.8/installation/mac/)) 42 | 43 | ### Installation 44 | #### Download the plugin binary: 45 | 46 | You can get the pre-built binaries for your OS and architecture from the plugin's [GitHub Releases](https://github.com/intelsdi-x/snap-plugin-collector-docker/releases) page. Download the plugin from the latest release and load it into `snapteld` (`/opt/snap/plugins` is the default location for Snap packages). 47 | 48 | #### To build the plugin binary: 49 | Fork https://github.com/intelsdi-x/snap-plugin-collector-docker 50 | Clone repo into `$GOPATH/src/github.com/intelsdi-x/`: 51 | 52 | ``` 53 | $ git clone https://github.com//snap-plugin-collector-docker.git 54 | ``` 55 | 56 | Build the Snap docker plugin by running make within the cloned repo: 57 | ``` 58 | $ make 59 | ``` 60 | It may take a while to pull dependencies if you haven't had them already. 61 | This builds the plugin in `./build/` 62 | 63 | ### Configuration and Usage 64 | * Set up the [Snap framework](https://github.com/intelsdi-x/snap/blob/master/README.md#getting-started) 65 | * Load the plugin and create a task, see example in [Examples](#examples). 66 | 67 | #### Configuration parameters 68 | It's possible to provide configuration to plugin via task manifest. 69 | 70 | In order to setup Docker Remote API endpoint and procfs path in **workflow** section of a task configuration file it is necessary to include following: 71 | 72 | workflow: 73 | collect: 74 | config: 75 | /intel/docker: 76 | endpoint: "" 77 | procfs: "" 78 | 79 | where *DOCKER_REMOTE_API_ENDPOINT* is an endpoint that is being used to communicate with Docker daemon via Docker Remote API, 80 | where *PATH_TO_PROCFS* is a path to proc filesystem on host. 81 | 82 | For more information see [Docker Remote API reference](https://docs.docker.com/engine/reference/api/docker_remote_api/) 83 | 84 | ## Documentation 85 | There are a number of other resources you can review to learn to use this plugin: 86 | * [Docker documentation](https://docs.docker.com/) 87 | * [Docker runtime metrics](https://docs.docker.com/v1.9/engine/articles/runmetrics/) 88 | 89 | Notice, that this plugin using default docker server endpoint `unix:///var/run/docker.sock` to communicate with docker deamon. 90 | However, adding support for custom endpoints is on Roadmap. 91 | 92 | Client instance ready for communication with the given 93 | // server endpoint. It will use the latest remote API version available in the 94 | // server. 95 | ### Collected Metrics 96 | 97 | The list of collected metrics is described in [METRICS.md](METRICS.md). 98 | 99 | ### Examples 100 | Similar to dream levels in the movie _Inception_, we have different levels of examples: 101 | * LEVEL 0: Snap running on your system (Linux only). 102 | * LEVEL 1: Snap runs in a container. 103 | * LEVEL 2: Snap runs in a docker-in-docker container. 104 | 105 | For the sake of ease-of-use, these examples are presented in reverse order. 106 | 107 | #### Run example in a docker-in-docker container 108 | ``` 109 | ./examples/tasks/run-docker-file.sh 110 | ``` 111 | 112 | #### Run example in a docker container (Linux or Darwin only) 113 | ``` 114 | ./examples/tasks/run-dockerception.sh 115 | ``` 116 | 117 | #### Run example on your Linux system 118 | 119 | Check if there is some running docker container(s): 120 | 121 | ``` 122 | $ docker ps 123 | 124 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 125 | 7720efd76bb8 ubuntu "/bin/bash" 35 minutes ago Up 35 minutes prickly_spence 126 | ad5221e8ae73 ubuntu "/bin/bash" 36 minutes ago Up 36 minutes suspicious_mirzakhani 127 | ``` 128 | 129 | 130 | In one terminal window, start the Snap daemon (in this case with logging set to 1 and trust disabled): 131 | ``` 132 | $ snapteld -l 1 -t 0 133 | ``` 134 | 135 | In another terminal window download and load plugins: 136 | ``` 137 | $ wget http://snap.ci.snap-telemetry.io/plugins/snap-plugin-collector-docker/latest/linux/x86_64/snap-plugin-collector-docker 138 | $ wget http://snap.ci.snap-telemetry.io/plugins/snap-plugin-publisher-file/latest/linux/x86_64/snap-plugin-publisher-file 139 | $ chmod 755 snap-plugin-* 140 | $ snaptel plugin load snap-plugin-collector-docker 141 | $ snaptel plugin load snap-plugin-publisher-file 142 | ``` 143 | 144 | You can list all of available metrics: 145 | ``` 146 | $ snaptel metric list 147 | ``` 148 | 149 | Download an [example task file](examples/tasks/docker-file.json) and load it: 150 | ``` 151 | $ curl -sfLO https://raw.githubusercontent.com/intelsdi-x/snap-plugin-collector-docker/master/examples/tasks/docker-file.json 152 | $ snaptel task create -t docker-file.json 153 | Using task manifest to create task 154 | Task created 155 | ID: 02dd7ff4-8106-47e9-8b86-70067cd0a850 156 | Name: Task-02dd7ff4-8106-47e9-8b86-70067cd0a850 157 | State: Running 158 | ``` 159 | 160 | See output from snaptel task watch 161 | 162 | (notice, that below only the fragment of task watcher output has been presented) 163 | 164 | ``` 165 | $ snaptel task watch 02dd7ff4-8106-47e9-8b86-70067cd0a850 166 | Watching Task (02dd7ff4-8106-47e9-8b86-70067cd0a850): 167 | NAMESPACE DATA TIMESTAMP 168 | /intel/docker/7720efd76bb8/cgroups/cpu_stats/cpu_usage/total 2.146646e+07 2016-06-21 12:44:09.551811277 +0200 CEST 169 | /intel/docker/7720efd76bb8/cgroups/cpu_stats/cpu_usage/kernel_mode 1e+07 2016-06-21 12:44:09.552107446 +0200 CEST 170 | /intel/docker/7720efd76bb8/cgroups/cpu_stats/cpu_usage/user_mode 0 2016-06-21 12:44:09.552146203 +0200 CEST 171 | /intel/docker/ad5221e8ae73/cgroups/cpu_stats/cpu_usage/total 2.146646e+07 2016-06-21 12:44:09.551811277 +0200 CEST 172 | /intel/docker/ad5221e8ae73/cgroups/cpu_stats/cpu_usage/kernel_mode 1e+07 2016-06-21 12:44:09.552107446 +0200 CEST 173 | /intel/docker/ad5221e8ae73/cgroups/cpu_stats/cpu_usage/user_mode 0 2016-06-21 12:44:09.552146203 +0200 CEST 174 | /intel/docker/root/cgroups/cpu_stats/cpu_usage/total 2.88984998661e+12 2016-06-21 12:44:09.551811277 +0200 CEST 175 | /intel/docker/root/cgroups/cpu_stats/cpu_usage/kernel_mode 6.38e+11 2016-06-21 12:44:09.552107446 +0200 CEST 176 | /intel/docker/root/cgroups/cpu_stats/cpu_usage/user_mode 9.4397e+11 2016-06-21 12:44:09.552146203 +0200 CEST 177 | ``` 178 | (Keys `ctrl+c` terminate task watcher) 179 | 180 | These data are published to file and stored there (in this example in `/tmp/snap-docker-file.log`). 181 | 182 | ### Roadmap 183 | There isn't a current roadmap for this plugin, but it is in active development. As we launch this plugin, we do not have any outstanding requirements for the next release. 184 | 185 | If you have a feature request, please add it as an [issue](https://github.com/intelsdi-x/snap-plugin-collector-docker/issues) and/or submit a [pull request](https://github.com/intelsdi-x/snap-plugin-collector-docker/pulls). 186 | 187 | ## Community Support 188 | This repository is one of **many** plugins in **snap**, a powerful telemetry framework. See the full project at http://github.com/intelsdi-x/snap. 189 | 190 | To reach out to other users, head to the [main framework](https://github.com/intelsdi-x/snap#community-support) or visit [Slack](http://slack.snap-telemetry.io). 191 | 192 | ## Contributing 193 | We love contributions! 194 | 195 | There's more than one way to give back, from examples to blogs to code updates. See our recommended process in [CONTRIBUTING.md](CONTRIBUTING.md). 196 | 197 | ## License 198 | [Snap](http://github.com/intelsdi-x/snap), along with this plugin, is an Open Source software released under the Apache 2.0 [License](LICENSE). 199 | 200 | ## Acknowledgements 201 | 202 | * Author: [Marcin Krolik](https://github.com/marcin-krolik) 203 | * Co-authors: [Izabella Raulin](https://github.com/IzabellaRaulin), [Marcin Olszewski](https://github.com/marcintao) 204 | 205 | **Thank you!** Your contribution is incredibly important to us. 206 | -------------------------------------------------------------------------------- /collector/utils.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package collector 23 | 24 | import ( 25 | "fmt" 26 | "strings" 27 | 28 | log "github.com/sirupsen/logrus" 29 | 30 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 31 | "github.com/intelsdi-x/snap-plugin-lib-go/v1/plugin" 32 | ) 33 | 34 | // dynamicElement is defined by its name and description 35 | type dynamicElement struct { 36 | name string 37 | description string 38 | } 39 | 40 | type nsCreator struct { 41 | dynamicElements map[string]dynamicElement 42 | } 43 | 44 | // definedDynamicElements holds expected dynamic element(s) with definition in docker metrics namespaces which occurs after the key-word 45 | var definedDynamicElements = map[string]dynamicElement{ 46 | "filesystem": {"device_name", "a name of filesystem device"}, 47 | "labels": {"label_key", "a key of container's label"}, 48 | "network": {"network_interface", "a name of network interface or 'total' for aggregate"}, 49 | "per_cpu": {"cpu_id", "an id of cpu"}, 50 | "io_service_bytes_recursive": {"device_name", "a name of block device"}, 51 | "io_serviced_recursive": {"device_name", "a name of block device"}, 52 | "io_queue_recursive": {"device_name", "a name of block device"}, 53 | "io_service_time_recursive": {"device_name", "a name of block device"}, 54 | "io_wait_time_recursive": {"device_name", "a name of block device"}, 55 | "io_merged_recursive": {"device_name", "a name of block device"}, 56 | "io_time_recursive": {"device_name", "a name of block device"}, 57 | "sectors_recursive": {"device_name", "a name of block device"}, 58 | "hugetlb_stats": {"size", "hugetlb page size"}, 59 | } 60 | 61 | func initClient(c *collector, endpoint string) error { 62 | dc, err := container.NewDockerClient(endpoint) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | params, err := dc.GetDockerParams("DockerRootDir", "Driver") 68 | if err != nil { 69 | return err 70 | } 71 | 72 | c.rootDir = params["DockerRootDir"] 73 | c.driver = params["Driver"] 74 | c.client = dc 75 | 76 | log.WithFields(log.Fields{ 77 | "block": "initClient", 78 | }).Infof("Docker client initialized with storage driver %s and docker root dir %s", c.driver, c.rootDir) 79 | 80 | return nil 81 | } 82 | 83 | // createMetricNamespace returns metric namespace based on given `ns` which is used as a prefix; all dynamic elements 84 | // in the `metricName` are defined based on content of map `dynamicElements` 85 | func (creator *nsCreator) createMetricNamespace(ns plugin.Namespace, metricName string) (plugin.Namespace, error) { 86 | metricName = strings.TrimSpace(metricName) 87 | 88 | if len(metricName) == 0 { 89 | return nil, fmt.Errorf("Cannot create metric namespace: empty metric name %s", metricName) 90 | } 91 | 92 | elements := strings.Split(metricName, "/") 93 | 94 | // check if metricName contains only static elements 95 | if !strings.Contains(metricName, "*") { 96 | ns = ns.AddStaticElements(elements...) 97 | return ns, nil 98 | } 99 | 100 | // when metric name contains dynamic element iterate over elements 101 | for index, element := range elements { 102 | if element == "*" { 103 | // the following element is dynamic 104 | dynamicElement, ok := creator.dynamicElements[elements[index-1]] 105 | // check if this dynamic element is supported (name and description are available) 106 | if !ok { 107 | return nil, fmt.Errorf("Unknown dynamic element in metric `%s` under index %d", metricName, index) 108 | } 109 | // add recognize dynamic element (define its name and description) 110 | ns = ns.AddDynamicElement(dynamicElement.name, dynamicElement.description) 111 | 112 | if len(elements)-1 == index { 113 | // in case when an asterisk is the last element, add `value` at the end of ns 114 | ns = ns.AddStaticElement("value") 115 | } 116 | } else { 117 | // the following element is static 118 | ns = ns.AddStaticElement(element) 119 | } 120 | } 121 | if len(ns) == 0 { 122 | return nil, fmt.Errorf("Cannot create metric namespace for metric %s", metricName) 123 | } 124 | return ns, nil 125 | } 126 | 127 | func getQueryGroup(ns []string) (string, error) { 128 | if ns[0] == "spec" { 129 | return ns[0], nil 130 | } 131 | 132 | for _, ne := range ns { 133 | if _, exists := getters[ne]; exists { 134 | return ne, nil 135 | } 136 | } 137 | return "", fmt.Errorf("Cannot identify query group for given namespace %s", strings.Join(ns, "/")) 138 | } 139 | 140 | func appendIfMissing(collectGroup map[string]map[string]struct{}, rid string, query string) { 141 | group, exists := collectGroup[rid] 142 | if !exists { 143 | collectGroup[rid] = map[string]struct{}{query: {}} 144 | } 145 | 146 | if _, exists := group[query]; !exists { 147 | collectGroup[rid][query] = struct{}{} 148 | } 149 | } 150 | 151 | func getDockerConfig(cfg plugin.Config) (map[string]string, error) { 152 | config := make(map[string]string) 153 | values := []string{"endpoint", "procfs"} 154 | var err error 155 | for _, v := range values { 156 | config[v], err = cfg.GetString(v) 157 | if err != nil { 158 | return config, err 159 | } 160 | } 161 | return config, nil 162 | } 163 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | /* 2 | http://www.apache.org/licenses/LICENSE-2.0.txt 3 | 4 | 5 | Copyright 2016 Intel Corporation 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | */ 19 | 20 | package config 21 | 22 | // DockerVersion is a version of running docker server 23 | var DockerVersion = []int{0, 0} 24 | -------------------------------------------------------------------------------- /container/cgroupfs/blkio.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | 21 | This file incorporates work covered by the following copyright and permission notice: 22 | Copyright 2014 Docker, Inc. 23 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 24 | this file except in compliance with the License. You may obtain a copy of the License at 25 | 26 | http://www.apache.org/licenses/LICENSE-2.0 27 | 28 | Unless required by applicable law or agreed to in writing, software 29 | distributed under the License is distributed on an "AS IS" BASIS, 30 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 31 | See the License for the specific language governing permissions and 32 | limitations under the License. 33 | */ 34 | 35 | // Package contains code from OCI/opencontainers (https://github.com/opencontainers/runc) with following: 36 | // - function getCFQStats 37 | // - function getStats 38 | // - function splitBlkioStatLine 39 | // - function getBlkioStat 40 | 41 | package cgroupfs 42 | 43 | import ( 44 | "bufio" 45 | "fmt" 46 | "os" 47 | "path/filepath" 48 | "strconv" 49 | "strings" 50 | 51 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 52 | ) 53 | 54 | // Blkio implements StatGetter interface 55 | type Blkio struct{} 56 | 57 | // GetStats reads blkio metrics from Blkio Group from blkio.* 58 | func (b *Blkio) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 59 | path, err := opts.GetStringValue("cgroup_path") 60 | if err != nil { 61 | return err 62 | } 63 | // Try to read CFQ stats available on all CFQ enabled kernels first 64 | if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil { 65 | return getCFQStats(path, stats) 66 | } 67 | return getStats(path, stats) // Use generic stats as fallback 68 | } 69 | 70 | func splitBlkioStatLine(r rune) bool { 71 | return r == ' ' || r == ':' 72 | } 73 | 74 | func getBlkioStat(path string) ([]container.BlkioStatEntry, error) { 75 | var blkioStats []container.BlkioStatEntry 76 | f, err := os.Open(path) 77 | if err != nil { 78 | if os.IsNotExist(err) { 79 | return blkioStats, nil 80 | } 81 | return nil, err 82 | } 83 | defer f.Close() 84 | 85 | sc := bufio.NewScanner(f) 86 | for sc.Scan() { 87 | // format: dev type amount 88 | fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine) 89 | if len(fields) < 3 { 90 | if len(fields) == 2 && fields[0] == "Total" { 91 | // skip total line 92 | continue 93 | } else { 94 | return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) 95 | } 96 | } 97 | 98 | major, err := strconv.ParseUint(fields[0], 10, 64) 99 | if err != nil { 100 | return nil, err 101 | } 102 | 103 | minor, err := strconv.ParseUint(fields[1], 10, 64) 104 | if err != nil { 105 | return nil, err 106 | } 107 | 108 | op := "" 109 | index := 2 110 | if len(fields) == 4 { 111 | op = fields[2] 112 | index = 3 113 | } 114 | val, err := strconv.ParseUint(fields[index], 10, 64) 115 | if err != nil { 116 | return nil, err 117 | } 118 | blkioStats = append(blkioStats, container.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: val}) 119 | } 120 | 121 | return blkioStats, nil 122 | } 123 | 124 | func getCFQStats(path string, stats *container.Statistics) error { 125 | var blkioStats []container.BlkioStatEntry 126 | var err error 127 | 128 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil { 129 | return err 130 | } 131 | stats.Cgroups.BlkioStats.SectorsRecursive = blkioStats 132 | 133 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil { 134 | return err 135 | } 136 | stats.Cgroups.BlkioStats.IoServiceBytesRecursive = blkioStats 137 | 138 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil { 139 | return err 140 | } 141 | stats.Cgroups.BlkioStats.IoServicedRecursive = blkioStats 142 | 143 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil { 144 | return err 145 | } 146 | stats.Cgroups.BlkioStats.IoQueuedRecursive = blkioStats 147 | 148 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil { 149 | return err 150 | } 151 | stats.Cgroups.BlkioStats.IoServiceTimeRecursive = blkioStats 152 | 153 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil { 154 | return err 155 | } 156 | stats.Cgroups.BlkioStats.IoWaitTimeRecursive = blkioStats 157 | 158 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil { 159 | return err 160 | } 161 | stats.Cgroups.BlkioStats.IoMergedRecursive = blkioStats 162 | 163 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil { 164 | return err 165 | } 166 | stats.Cgroups.BlkioStats.IoTimeRecursive = blkioStats 167 | 168 | return nil 169 | } 170 | 171 | func getStats(path string, stats *container.Statistics) error { 172 | var blkioStats []container.BlkioStatEntry 173 | var err error 174 | 175 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil { 176 | return err 177 | } 178 | stats.Cgroups.BlkioStats.IoServiceBytesRecursive = blkioStats 179 | 180 | if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil { 181 | return err 182 | } 183 | stats.Cgroups.BlkioStats.IoServicedRecursive = blkioStats 184 | 185 | return nil 186 | } 187 | -------------------------------------------------------------------------------- /container/cgroupfs/blkio_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "io/ioutil" 26 | "os" 27 | "path/filepath" 28 | "testing" 29 | 30 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 31 | . "github.com/smartystreets/goconvey/convey" 32 | "github.com/stretchr/testify/suite" 33 | ) 34 | 35 | const ( 36 | blkioContents = `8:0 Read 100 37 | 8:0 Write 200 38 | 8:0 Sync 300 39 | 8:0 Async 500 40 | 8:0 Total 500 41 | Total 500` 42 | ) 43 | 44 | type BlkioSuite struct { 45 | suite.Suite 46 | blkioPath string 47 | } 48 | 49 | func TestBlkioSuite(t *testing.T) { 50 | suite.Run(t, &BlkioSuite{}) 51 | } 52 | 53 | func (s *BlkioSuite) SetupSuite() { 54 | s.blkioPath = "/tmp/blkio_test" 55 | err := os.Mkdir(s.blkioPath, 0700) 56 | if err != nil { 57 | s.T().Fatal(err) 58 | } 59 | s.writeFile(filepath.Join(s.blkioPath, "blkio.io_service_bytes_recursive"), []byte(blkioContents)) 60 | s.writeFile(filepath.Join(s.blkioPath, "blkio.io_serviced_recursive"), []byte(blkioContents)) 61 | s.writeFile(filepath.Join(s.blkioPath, "blkio.io_queued_recursive"), []byte(blkioContents)) 62 | s.writeFile(filepath.Join(s.blkioPath, "blkio.io_service_time_recursive"), []byte(blkioContents)) 63 | s.writeFile(filepath.Join(s.blkioPath, "blkio.io_wait_time_recursive"), []byte(blkioContents)) 64 | s.writeFile(filepath.Join(s.blkioPath, "blkio.io_merged_recursive"), []byte(blkioContents)) 65 | s.writeFile(filepath.Join(s.blkioPath, "blkio.time_recursive"), []byte(blkioContents)) 66 | } 67 | 68 | func (s *BlkioSuite) TearDownSuite() { 69 | err := os.RemoveAll(s.blkioPath) 70 | if err != nil { 71 | s.T().Fatal(err) 72 | } 73 | } 74 | 75 | func (s *BlkioSuite) TestgetBlkioStat() { 76 | testCases := []testCase{ 77 | {ExpectedMajor: 8, ExpectedMinor: 0, ExpectedValue: 100, ExpectedOp: "Read"}, 78 | {ExpectedMajor: 8, ExpectedMinor: 0, ExpectedValue: 200, ExpectedOp: "Write"}, 79 | {ExpectedMajor: 8, ExpectedMinor: 0, ExpectedValue: 300, ExpectedOp: "Sync"}, 80 | {ExpectedMajor: 8, ExpectedMinor: 0, ExpectedValue: 500, ExpectedOp: "Async"}, 81 | {ExpectedMajor: 8, ExpectedMinor: 0, ExpectedValue: 500, ExpectedOp: "Total"}, 82 | } 83 | 84 | Convey("Read blkio.io_service_bytes_recursive content", s.T(), func() { 85 | blkio, err := getBlkioStat(filepath.Join(s.blkioPath, "blkio.io_service_bytes_recursive")) 86 | So(err, ShouldBeNil) 87 | So(len(blkio), ShouldEqual, 5) 88 | for i := 0; i < len(testCases); i++ { 89 | So(blkio[i].Major, ShouldEqual, testCases[i].ExpectedMajor) 90 | So(blkio[i].Minor, ShouldEqual, testCases[i].ExpectedMinor) 91 | So(blkio[i].Value, ShouldEqual, testCases[i].ExpectedValue) 92 | So(blkio[i].Op, ShouldEqual, testCases[i].ExpectedOp) 93 | } 94 | }) 95 | } 96 | 97 | func (s *BlkioSuite) TestGetStatsPositive() { 98 | Convey("Call GetStats", s.T(), func() { 99 | blkio := Blkio{} 100 | stats := container.NewStatistics() 101 | err := blkio.GetStats(stats, container.GetStatOpt{"cgroup_path": s.blkioPath}) 102 | So(err, ShouldBeNil) 103 | So(len(stats.Cgroups.BlkioStats.IoMergedRecursive), ShouldEqual, 5) 104 | So(len(stats.Cgroups.BlkioStats.IoQueuedRecursive), ShouldEqual, 5) 105 | So(len(stats.Cgroups.BlkioStats.IoServiceBytesRecursive), ShouldEqual, 5) 106 | So(len(stats.Cgroups.BlkioStats.IoServicedRecursive), ShouldEqual, 5) 107 | So(len(stats.Cgroups.BlkioStats.IoServiceTimeRecursive), ShouldEqual, 5) 108 | So(len(stats.Cgroups.BlkioStats.IoTimeRecursive), ShouldEqual, 5) 109 | So(len(stats.Cgroups.BlkioStats.IoWaitTimeRecursive), ShouldEqual, 5) 110 | So(len(stats.Cgroups.BlkioStats.SectorsRecursive), ShouldEqual, 0) 111 | }) 112 | } 113 | 114 | func (s *BlkioSuite) TestGetStatsNegative() { 115 | Convey("Call GetStats", s.T(), func() { 116 | blkio := Blkio{} 117 | err := blkio.GetStats(container.NewStatistics(), container.GetStatOpt{}) 118 | So(err, ShouldNotBeNil) 119 | }) 120 | } 121 | 122 | type testCase struct { 123 | ExpectedMajor uint64 124 | ExpectedMinor uint64 125 | ExpectedValue uint64 126 | ExpectedOp string 127 | } 128 | 129 | func (s *BlkioSuite) writeFile(path string, content []byte) { 130 | err := ioutil.WriteFile(path, content, 0700) 131 | if err != nil { 132 | s.T().Fatal(err) 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /container/cgroupfs/cpu.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "bufio" 26 | "fmt" 27 | "io/ioutil" 28 | "os" 29 | "path/filepath" 30 | "strconv" 31 | "strings" 32 | 33 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 34 | ) 35 | 36 | // Cpu implements StatGetter interface 37 | type Cpu struct{} 38 | 39 | // GetStats reads throttling metrics from Cpu Group from cpu.stat 40 | func (cpu *Cpu) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 41 | path, err := opts.GetStringValue("cgroup_path") 42 | if err != nil { 43 | return err 44 | } 45 | 46 | f, err := os.Open(filepath.Join(path, "cpu.stat")) 47 | if err != nil { 48 | return err 49 | } 50 | defer f.Close() 51 | 52 | scan := bufio.NewScanner(f) 53 | for scan.Scan() { 54 | param, value, err := parseEntry(scan.Text()) 55 | if err != nil { 56 | return err 57 | } 58 | 59 | switch param { 60 | case "nr_periods": 61 | stats.Cgroups.CpuStats.ThrottlingData.NrPeriods = value 62 | case "nr_throttled": 63 | stats.Cgroups.CpuStats.ThrottlingData.NrThrottled = value 64 | case "throttled_time": 65 | stats.Cgroups.CpuStats.ThrottlingData.ThrottledTime = value 66 | default: 67 | return fmt.Errorf("Unknown cpu.stat parameter: %s", param) 68 | } 69 | } 70 | 71 | return nil 72 | } 73 | 74 | // CpuAcct implements StatGetter interface 75 | type CpuAcct struct{} 76 | 77 | // GetStats reads usage metrics from Cpu Group from cpuacct.stat 78 | func (cpuacct *CpuAcct) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 79 | path, err := opts.GetStringValue("cgroup_path") 80 | if err != nil { 81 | return err 82 | } 83 | 84 | f, err := os.Open(filepath.Join(path, "cpuacct.stat")) 85 | if err != nil { 86 | return err 87 | } 88 | defer f.Close() 89 | 90 | scan := bufio.NewScanner(f) 91 | for scan.Scan() { 92 | param, value, err := parseEntry(scan.Text()) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | switch param { 98 | case "user": 99 | stats.Cgroups.CpuStats.CpuUsage.UserMode = value 100 | case "system": 101 | stats.Cgroups.CpuStats.CpuUsage.KernelMode = value 102 | default: 103 | return fmt.Errorf("Unknown cpuacct.stat parameter: %s", param) 104 | } 105 | } 106 | 107 | usages, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu")) 108 | if err != nil { 109 | return err 110 | } 111 | 112 | perCpu := []uint64{} 113 | for _, usage := range strings.Fields(string(usages)) { 114 | value, err := strconv.ParseUint(usage, 10, 64) 115 | if err != nil { 116 | return err 117 | } 118 | perCpu = append(perCpu, value) 119 | } 120 | stats.Cgroups.CpuStats.CpuUsage.PerCpu = perCpu 121 | 122 | total, err := parseIntValue(filepath.Join(path, "cpuacct.usage")) 123 | if err != nil { 124 | return err 125 | } 126 | stats.Cgroups.CpuStats.CpuUsage.Total = total 127 | 128 | return nil 129 | } 130 | 131 | // CpuShares implements StatGetter interface 132 | type CpuShares struct{} 133 | 134 | // GetStats reads shares metrics from Cpu Group from cpu.shares 135 | func (cpuShares *CpuShares) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 136 | path, err := opts.GetStringValue("cgroup_path") 137 | if err != nil { 138 | return err 139 | } 140 | 141 | shares, err := parseIntValue(filepath.Join(path, "cpu.shares")) 142 | if err != nil { 143 | return err 144 | } 145 | 146 | stats.Cgroups.CpuStats.CpuShares = shares 147 | 148 | return nil 149 | } 150 | -------------------------------------------------------------------------------- /container/cgroupfs/cpu_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "io/ioutil" 26 | "os" 27 | "path/filepath" 28 | "testing" 29 | 30 | . "github.com/smartystreets/goconvey/convey" 31 | "github.com/stretchr/testify/suite" 32 | 33 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 34 | ) 35 | 36 | const ( 37 | cpuStatContents = `nr_periods 11 38 | nr_throttled 22 39 | throttled_time 33 40 | ` 41 | cpuAcctStatContents = `user 11111111 42 | system 22222222 43 | ` 44 | ) 45 | 46 | type CpuSuite struct { 47 | suite.Suite 48 | cpuPath string 49 | } 50 | 51 | func (suite *CpuSuite) SetupSuite() { 52 | suite.cpuPath = "/tmp/cpu_test" 53 | err := os.Mkdir(suite.cpuPath, 0700) 54 | if err != nil { 55 | suite.T().Fatal(err) 56 | } 57 | 58 | suite.writeFile(filepath.Join(suite.cpuPath, "cpu.stat"), []byte(cpuStatContents)) 59 | suite.writeFile(filepath.Join(suite.cpuPath, "cpuacct.stat"), []byte(cpuAcctStatContents)) 60 | suite.writeFile(filepath.Join(suite.cpuPath, "cpuacct.usage"), []byte("3333333333")) 61 | suite.writeFile(filepath.Join(suite.cpuPath, "cpuacct.usage_percpu"), []byte("44444444 555555555")) 62 | suite.writeFile(filepath.Join(suite.cpuPath, "cpu.shares"), []byte("6666")) 63 | } 64 | 65 | func (suite *CpuSuite) TearDownSuite() { 66 | err := os.RemoveAll(suite.cpuPath) 67 | if err != nil { 68 | suite.T().Fatal(err) 69 | } 70 | } 71 | 72 | func TestCpuSuite(t *testing.T) { 73 | suite.Run(t, &CpuSuite{}) 74 | } 75 | 76 | func (suite *CpuSuite) TestCpuGetStats() { 77 | Convey("collecting data from cpu.stat", suite.T(), func() { 78 | stats := container.NewStatistics() 79 | opts := container.GetStatOpt{"cgroup_path": suite.cpuPath} 80 | cpu := Cpu{} 81 | err := cpu.GetStats(stats, opts) 82 | So(err, ShouldBeNil) 83 | So(stats.Cgroups.CpuStats.ThrottlingData.NrPeriods, ShouldEqual, 11) 84 | So(stats.Cgroups.CpuStats.ThrottlingData.NrThrottled, ShouldEqual, 22) 85 | So(stats.Cgroups.CpuStats.ThrottlingData.ThrottledTime, ShouldEqual, 33) 86 | 87 | }) 88 | } 89 | 90 | func (suite *CpuSuite) TestCpuAcctGetStats() { 91 | Convey("collecting data from cpuacct.stat", suite.T(), func() { 92 | stats := container.NewStatistics() 93 | opts := container.GetStatOpt{"cgroup_path": suite.cpuPath} 94 | cpu := CpuAcct{} 95 | err := cpu.GetStats(stats, opts) 96 | So(err, ShouldBeNil) 97 | So(stats.Cgroups.CpuStats.CpuUsage.UserMode, ShouldEqual, 11111111) 98 | So(stats.Cgroups.CpuStats.CpuUsage.KernelMode, ShouldEqual, 22222222) 99 | So(stats.Cgroups.CpuStats.CpuUsage.Total, ShouldEqual, 3333333333) 100 | So(stats.Cgroups.CpuStats.CpuUsage.PerCpu[0], ShouldEqual, 44444444) 101 | So(stats.Cgroups.CpuStats.CpuUsage.PerCpu[1], ShouldEqual, 555555555) 102 | }) 103 | } 104 | 105 | func (suite *CpuSuite) TestCpuSharesGetStats() { 106 | Convey("collecting data from cpu.shares", suite.T(), func() { 107 | stats := container.NewStatistics() 108 | opts := container.GetStatOpt{"cgroup_path": suite.cpuPath} 109 | cpu := CpuShares{} 110 | err := cpu.GetStats(stats, opts) 111 | So(err, ShouldBeNil) 112 | So(stats.Cgroups.CpuStats.CpuShares, ShouldEqual, 6666) 113 | }) 114 | } 115 | 116 | func (suite *CpuSuite) writeFile(path string, content []byte) { 117 | err := ioutil.WriteFile(path, content, 0700) 118 | if err != nil { 119 | suite.T().Fatal(err) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /container/cgroupfs/cpuset.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "path/filepath" 26 | 27 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 28 | ) 29 | 30 | // CpuSet implements StatGetter interface 31 | type CpuSet struct{} 32 | 33 | // GetStats reads cpuset metrics from Cpuset Group 34 | func (cs *CpuSet) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 35 | path, err := opts.GetStringValue("cgroup_path") 36 | if err != nil { 37 | return err 38 | } 39 | 40 | cpus, err := parseStrValue(filepath.Join(path, "cpuset.cpus")) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | mems, err := parseStrValue(filepath.Join(path, "cpuset.mems")) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | memmig, err := parseIntValue(filepath.Join(path, "cpuset.memory_migrate")) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | cpuexc, err := parseIntValue(filepath.Join(path, "cpuset.cpu_exclusive")) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | memexc, err := parseIntValue(filepath.Join(path, "cpuset.mem_exclusive")) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | stats.Cgroups.CpuSetStats.Cpus = cpus 66 | stats.Cgroups.CpuSetStats.Mems = mems 67 | stats.Cgroups.CpuSetStats.MemoryMigrate = memmig 68 | stats.Cgroups.CpuSetStats.CpuExclusive = cpuexc 69 | stats.Cgroups.CpuSetStats.MemoryExclusive = memexc 70 | 71 | return nil 72 | } 73 | -------------------------------------------------------------------------------- /container/cgroupfs/cpuset_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "io/ioutil" 26 | "os" 27 | "path/filepath" 28 | "testing" 29 | 30 | . "github.com/smartystreets/goconvey/convey" 31 | "github.com/stretchr/testify/suite" 32 | 33 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 34 | ) 35 | 36 | type CpuSetSuite struct { 37 | suite.Suite 38 | cpusetPath string 39 | } 40 | 41 | func (suite *CpuSetSuite) SetupSuite() { 42 | suite.cpusetPath = "/tmp/cpuset_test" 43 | err := os.Mkdir(suite.cpusetPath, 0700) 44 | if err != nil { 45 | suite.T().Fatal(err) 46 | } 47 | 48 | suite.writeFile(filepath.Join(suite.cpusetPath, "cpuset.memory_migrate"), []byte("1")) 49 | suite.writeFile(filepath.Join(suite.cpusetPath, "cpuset.cpu_exclusive"), []byte("2")) 50 | suite.writeFile(filepath.Join(suite.cpusetPath, "cpuset.mem_exclusive"), []byte("3")) 51 | suite.writeFile(filepath.Join(suite.cpusetPath, "cpuset.mems"), []byte("4")) 52 | suite.writeFile(filepath.Join(suite.cpusetPath, "cpuset.cpus"), []byte("5")) 53 | } 54 | 55 | func (suite *CpuSetSuite) TearDownSuite() { 56 | err := os.RemoveAll(suite.cpusetPath) 57 | if err != nil { 58 | suite.T().Fatal(err) 59 | } 60 | } 61 | 62 | func TestCpuSetSuite(t *testing.T) { 63 | suite.Run(t, &CpuSetSuite{}) 64 | } 65 | 66 | func (suite *CpuSetSuite) TestCpuGetStats() { 67 | Convey("collecting data from cpuset controller", suite.T(), func() { 68 | stats := container.NewStatistics() 69 | opts := container.GetStatOpt{"cgroup_path": suite.cpusetPath} 70 | cpu := CpuSet{} 71 | err := cpu.GetStats(stats, opts) 72 | So(err, ShouldBeNil) 73 | So(stats.Cgroups.CpuSetStats.MemoryMigrate, ShouldEqual, 1) 74 | So(stats.Cgroups.CpuSetStats.CpuExclusive, ShouldEqual, 2) 75 | So(stats.Cgroups.CpuSetStats.MemoryExclusive, ShouldEqual, 3) 76 | So(stats.Cgroups.CpuSetStats.Mems, ShouldEqual, "4") 77 | So(stats.Cgroups.CpuSetStats.Cpus, ShouldEqual, "5") 78 | }) 79 | } 80 | 81 | func (suite *CpuSetSuite) writeFile(path string, content []byte) { 82 | err := ioutil.WriteFile(path, content, 0700) 83 | if err != nil { 84 | suite.T().Fatal(err) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /container/cgroupfs/hugetlb.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "io/ioutil" 26 | "path/filepath" 27 | "strings" 28 | 29 | "github.com/docker/go-units" 30 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 31 | ) 32 | 33 | var hpControlDir = "/sys/kernel/mm/hugepages" 34 | 35 | // HugeTlb implements StatGetter interface 36 | type HugeTlb struct{} 37 | 38 | // GetStats reads huge table metrics from Hugetlb Group 39 | func (h *HugeTlb) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 40 | path, err := opts.GetStringValue("cgroup_path") 41 | if err != nil { 42 | return err 43 | } 44 | 45 | hugePageSizes, err := getHugePageSize(hpControlDir) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | for _, pageSize := range hugePageSizes { 51 | usage, err := parseIntValue(filepath.Join(path, strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, "."))) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | maxUsage, err := parseIntValue(filepath.Join(path, strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, "."))) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | failcnt, err := parseIntValue(filepath.Join(path, strings.Join([]string{"hugetlb", pageSize, "failcnt"}, "."))) 62 | if err != nil { 63 | return err 64 | } 65 | 66 | stats.Cgroups.HugetlbStats[pageSize] = container.HugetlbStats{ 67 | Usage: usage, 68 | MaxUsage: maxUsage, 69 | Failcnt: failcnt, 70 | } 71 | } 72 | 73 | return nil 74 | } 75 | 76 | func getHugePageSize(controlDir string) ([]string, error) { 77 | var pageSizes []string 78 | sizeList := []string{"B", "kB", "MB", "GB", "TB", "PB"} 79 | files, err := ioutil.ReadDir(controlDir) 80 | if err != nil { 81 | return pageSizes, err 82 | } 83 | for _, st := range files { 84 | nameArray := strings.Split(st.Name(), "-") 85 | pageSize, err := units.RAMInBytes(nameArray[1]) 86 | if err != nil { 87 | return nil, err 88 | } 89 | sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList) 90 | pageSizes = append(pageSizes, sizeString) 91 | } 92 | 93 | return pageSizes, nil 94 | } 95 | -------------------------------------------------------------------------------- /container/cgroupfs/hugetlb_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "fmt" 26 | "io/ioutil" 27 | "os" 28 | "path/filepath" 29 | "testing" 30 | 31 | . "github.com/smartystreets/goconvey/convey" 32 | "github.com/stretchr/testify/suite" 33 | 34 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 35 | ) 36 | 37 | const ( 38 | hugetlbUsageContents = "128\n" 39 | hugetlbMaxUsageContents = "256\n" 40 | hugetlbFailcnt = "100\n" 41 | 42 | hpSize1GB = "hugepages-1048576kB" 43 | hpSize1MB = "hugepages-2048kB" 44 | ) 45 | 46 | var ( 47 | usage = "hugetlb.%s.usage_in_bytes" 48 | maxUsage = "hugetlb.%s.max_usage_in_bytes" 49 | failcnt = "hugetlb.%s.failcnt" 50 | ) 51 | 52 | type HugePagesSuite struct { 53 | suite.Suite 54 | hugepagesPath string 55 | pageSizes []string 56 | } 57 | 58 | func (suite *HugePagesSuite) SetupSuite() { 59 | suite.hugepagesPath = "/tmp/hugepages_test" 60 | hpControlDir = filepath.Join(suite.hugepagesPath, "control_dir") 61 | 62 | err := os.Mkdir(suite.hugepagesPath, 0700) 63 | if err != nil { 64 | suite.T().Fatal(err) 65 | } 66 | err = os.Mkdir(hpControlDir, 0700) 67 | if err != nil { 68 | suite.T().Fatal(err) 69 | } 70 | err = os.Mkdir(filepath.Join(suite.hugepagesPath, hpSize1MB), 0700) 71 | if err != nil { 72 | suite.T().Fatal(err) 73 | } 74 | err = os.Mkdir(filepath.Join(suite.hugepagesPath, hpSize1GB), 0700) 75 | if err != nil { 76 | suite.T().Fatal(err) 77 | } 78 | 79 | suite.pageSizes = []string{"1GB", "2MB"} 80 | for _, pageSize := range suite.pageSizes { 81 | suite.writeFile(filepath.Join(suite.hugepagesPath, fmt.Sprintf(usage, pageSize)), []byte(hugetlbUsageContents)) 82 | suite.writeFile(filepath.Join(suite.hugepagesPath, fmt.Sprintf(maxUsage, pageSize)), []byte(hugetlbMaxUsageContents)) 83 | suite.writeFile(filepath.Join(suite.hugepagesPath, fmt.Sprintf(failcnt, pageSize)), []byte(hugetlbFailcnt)) 84 | } 85 | } 86 | 87 | func (suite *HugePagesSuite) TearDownSuite() { 88 | err := os.RemoveAll(suite.hugepagesPath) 89 | if err != nil { 90 | suite.T().Fatal(err) 91 | } 92 | } 93 | 94 | func TestHugePagesSuite(t *testing.T) { 95 | suite.Run(t, &HugePagesSuite{}) 96 | } 97 | 98 | func (suite *HugePagesSuite) TestGetStats() { 99 | Convey("", suite.T(), func() { 100 | stats := container.NewStatistics() 101 | opts := container.GetStatOpt{"cgroup_path": suite.hugepagesPath} 102 | hugetlb := HugeTlb{} 103 | err := hugetlb.GetStats(stats, opts) 104 | So(err, ShouldBeNil) 105 | }) 106 | } 107 | 108 | func (suite *HugePagesSuite) writeFile(path string, content []byte) { 109 | err := ioutil.WriteFile(path, content, 0700) 110 | if err != nil { 111 | suite.T().Fatal(err) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /container/cgroupfs/memory.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "bufio" 26 | "os" 27 | "path/filepath" 28 | "strings" 29 | 30 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 31 | ) 32 | 33 | // Memory implements StatGetter interface 34 | type Memory struct{} 35 | 36 | // GetStats reads general memory metrics from Memory Group from memory.stat 37 | func (mem *Memory) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 38 | path, err := opts.GetStringValue("cgroup_path") 39 | if err != nil { 40 | return err 41 | } 42 | 43 | f, err := os.Open(filepath.Join(path, "memory.stat")) 44 | if err != nil { 45 | return err 46 | } 47 | defer f.Close() 48 | scan := bufio.NewScanner(f) 49 | for scan.Scan() { 50 | param, value, err := parseEntry(scan.Text()) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | stats.Cgroups.MemoryStats.Stats[param] = value 56 | } 57 | 58 | // calculate additional stats memory:working_set based on memory_stats 59 | var workingSet uint64 60 | if totalInactiveAnon, ok := stats.Cgroups.MemoryStats.Stats["total_inactive_anon"]; ok { 61 | workingSet = stats.Cgroups.MemoryStats.Usage.Usage 62 | if workingSet < totalInactiveAnon { 63 | workingSet = 0 64 | } else { 65 | workingSet -= totalInactiveAnon 66 | } 67 | 68 | if totalInactiveFile, ok := stats.Cgroups.MemoryStats.Stats["total_inactive_file"]; ok { 69 | if workingSet < totalInactiveFile { 70 | workingSet = 0 71 | } else { 72 | workingSet -= totalInactiveFile 73 | } 74 | } 75 | } 76 | stats.Cgroups.MemoryStats.Stats["working_set"] = workingSet 77 | 78 | return nil 79 | } 80 | 81 | // MemoryCache implements StatGetter interface 82 | type MemoryCache struct{} 83 | 84 | // GetStats reads memory cache metric from Memory Group from memory.stat 85 | func (memCa *MemoryCache) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 86 | path, err := opts.GetStringValue("cgroup_path") 87 | if err != nil { 88 | return err 89 | } 90 | // if memory.stat where already collected, check for cache in map 91 | if stats.Cgroups.MemoryStats.Stats["cache"] != 0 { 92 | stats.Cgroups.MemoryStats.Cache = stats.Cgroups.MemoryStats.Stats["cache"] 93 | return nil 94 | } 95 | 96 | f, err := os.Open(filepath.Join(path, "memory.stat")) 97 | if err != nil { 98 | return err 99 | } 100 | defer f.Close() 101 | 102 | scan := bufio.NewScanner(f) 103 | for scan.Scan() { 104 | line := scan.Text() 105 | if strings.Contains(line, "cache") { 106 | _, val, err := parseEntry(line) 107 | if err != nil { 108 | return err 109 | } 110 | stats.Cgroups.MemoryStats.Cache = val 111 | break 112 | } 113 | } 114 | 115 | return nil 116 | } 117 | 118 | // MemoryUsage implements StatGetter interface 119 | type MemoryUsage struct{} 120 | 121 | // GetStats reads memory usage metrics from Memory Group from memory.usage_in_bytes, memory.failcnt, memory.max_usage_in_bytes 122 | func (memu *MemoryUsage) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 123 | path, err := opts.GetStringValue("cgroup_path") 124 | if err != nil { 125 | return err 126 | } 127 | 128 | memoryData, err := getMemoryData(path, "") 129 | if err != nil { 130 | return err 131 | } 132 | stats.Cgroups.MemoryStats.Usage = memoryData 133 | 134 | return nil 135 | } 136 | 137 | // SwapMemUsage implements StatGetter interface 138 | type SwapMemUsage struct{} 139 | 140 | // GetStats reads memory swap usage metrics from Memory Group from memory.memsw.usage_in_bytes, memory.memsw.failcnt, memory.memsw.max_usage_in_bytes 141 | func (memu *SwapMemUsage) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 142 | path, err := opts.GetStringValue("cgroup_path") 143 | if err != nil { 144 | return err 145 | } 146 | 147 | memoryData, err := getMemoryData(path, "memsw") 148 | if err != nil { 149 | return err 150 | } 151 | stats.Cgroups.MemoryStats.SwapUsage = memoryData 152 | 153 | return nil 154 | } 155 | 156 | // KernelMemUsage implements StatGetter interface 157 | type KernelMemUsage struct{} 158 | 159 | // GetStats reads memory kernel usage metrics from Memory Group from memory.kmem.usage_in_bytes, memory.kmem.failcnt, memory.kmem.max_usage_in_bytes 160 | func (memu *KernelMemUsage) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 161 | path, err := opts.GetStringValue("cgroup_path") 162 | if err != nil { 163 | return err 164 | } 165 | 166 | memoryData, err := getMemoryData(path, "kmem") 167 | if err != nil { 168 | return err 169 | } 170 | stats.Cgroups.MemoryStats.KernelUsage = memoryData 171 | 172 | return nil 173 | } 174 | 175 | func getMemoryData(path, name string) (container.MemoryData, error) { 176 | moduleName := "memory" 177 | if name != "" { 178 | moduleName = strings.Join([]string{"memory", name}, ".") 179 | } 180 | 181 | memoryData := container.MemoryData{} 182 | 183 | usage, err := parseIntValue(filepath.Join(path, strings.Join([]string{moduleName, "usage_in_bytes"}, "."))) 184 | if err != nil { 185 | return memoryData, err 186 | } 187 | 188 | maxUsage, err := parseIntValue(filepath.Join(path, strings.Join([]string{moduleName, "max_usage_in_bytes"}, "."))) 189 | if err != nil { 190 | return memoryData, err 191 | } 192 | 193 | failcnt, err := parseIntValue(filepath.Join(path, strings.Join([]string{moduleName, "failcnt"}, "."))) 194 | if err != nil { 195 | return memoryData, err 196 | } 197 | 198 | memoryData.Usage = usage 199 | memoryData.MaxUsage = maxUsage 200 | memoryData.Failcnt = failcnt 201 | 202 | return memoryData, nil 203 | } 204 | -------------------------------------------------------------------------------- /container/cgroupfs/memory_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "fmt" 26 | "io/ioutil" 27 | "os" 28 | "path/filepath" 29 | "testing" 30 | 31 | . "github.com/smartystreets/goconvey/convey" 32 | "github.com/stretchr/testify/suite" 33 | 34 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 35 | ) 36 | 37 | const ( 38 | memoryStatContent = `cache 1111 39 | rss 2222 40 | rss_huge 3333 41 | mapped_file 4444 42 | dirty 5555 43 | writeback 6666 44 | pgpgin 7777 45 | pgpgout 8888 46 | pgfault 9999 47 | pgmajfault 11111 48 | inactive_anon 22222 49 | active_anon 33333 50 | inactive_file 44444 51 | active_file 55555 52 | unevictable 66666 53 | hierarchical_memory_limit 77777 54 | total_cache 88888 55 | total_rss 99999 56 | total_rss_huge 111 57 | total_mapped_file 222 58 | total_dirty 333 59 | total_writeback 444 60 | total_pgpgin 555 61 | total_pgpgout 666 62 | total_pgfault 777 63 | total_pgmajfault 888 64 | total_inactive_anon 999 65 | total_active_anon 11 66 | total_inactive_file 22 67 | total_active_file 33 68 | total_unevictable 44 69 | ` 70 | ) 71 | 72 | type MemorySuite struct { 73 | suite.Suite 74 | memoryPath string 75 | } 76 | 77 | func (suite *MemorySuite) SetupSuite() { 78 | suite.memoryPath = "/tmp/memory_test" 79 | 80 | err := os.Mkdir(suite.memoryPath, 0700) 81 | if err != nil { 82 | suite.T().Fatal(err) 83 | } 84 | modules := []string{"", ".kmem", ".memsw"} 85 | suite.writeFile(filepath.Join(suite.memoryPath, "memory.stat"), []byte(memoryStatContent)) 86 | for _, module := range modules { 87 | suite.writeFile(filepath.Join(suite.memoryPath, fmt.Sprintf("memory%s.usage_in_bytes", module)), []byte("111")) 88 | suite.writeFile(filepath.Join(suite.memoryPath, fmt.Sprintf("memory%s.max_usage_in_bytes", module)), []byte("222")) 89 | suite.writeFile(filepath.Join(suite.memoryPath, fmt.Sprintf("memory%s.failcnt", module)), []byte("333")) 90 | } 91 | 92 | } 93 | 94 | func (suite *MemorySuite) TearDownSuite() { 95 | err := os.RemoveAll(suite.memoryPath) 96 | if err != nil { 97 | suite.T().Fatal(err) 98 | } 99 | } 100 | 101 | func TestMemorySuite(t *testing.T) { 102 | suite.Run(t, &MemorySuite{}) 103 | } 104 | 105 | func (suite *MemorySuite) TestMemoryGetStats() { 106 | Convey("", suite.T(), func() { 107 | stats := container.NewStatistics() 108 | opts := container.GetStatOpt{"cgroup_path": suite.memoryPath} 109 | memory := Memory{} 110 | err := memory.GetStats(stats, opts) 111 | So(err, ShouldBeNil) 112 | So(len(stats.Cgroups.MemoryStats.Stats), ShouldEqual, 35) 113 | So(stats.Cgroups.MemoryStats.Stats["total_mapped_file"], ShouldEqual, 222) 114 | So(stats.Cgroups.MemoryStats.Stats["inactive_anon"], ShouldEqual, 22222) 115 | So(stats.Cgroups.MemoryStats.Stats["total_active_anon"], ShouldEqual, 11) 116 | So(stats.Cgroups.MemoryStats.Stats["rss_huge"], ShouldEqual, 3333) 117 | So(stats.Cgroups.MemoryStats.Stats["total_writeback"], ShouldEqual, 444) 118 | So(stats.Cgroups.MemoryStats.Stats["dirty"], ShouldEqual, 5555) 119 | So(stats.Cgroups.MemoryStats.Stats["working_set"], ShouldEqual, 0) 120 | So(stats.Cgroups.MemoryStats.Stats["cache"], ShouldEqual, 1111) 121 | }) 122 | } 123 | 124 | func (suite *MemorySuite) TestMemoryCacheGetStats() { 125 | Convey("", suite.T(), func() { 126 | stats := container.NewStatistics() 127 | opts := container.GetStatOpt{"cgroup_path": suite.memoryPath} 128 | memory := MemoryCache{} 129 | err := memory.GetStats(stats, opts) 130 | So(err, ShouldBeNil) 131 | //So(stats.Cgroups.MemoryStats.Cache, ShouldEqual, 1111) 132 | }) 133 | } 134 | 135 | func (suite *MemorySuite) TestMemoryUsageGetStats() { 136 | Convey("", suite.T(), func() { 137 | stats := container.NewStatistics() 138 | opts := container.GetStatOpt{"cgroup_path": suite.memoryPath} 139 | memory := MemoryUsage{} 140 | err := memory.GetStats(stats, opts) 141 | So(err, ShouldBeNil) 142 | So(stats.Cgroups.MemoryStats.Usage.Usage, ShouldEqual, 111) 143 | So(stats.Cgroups.MemoryStats.Usage.MaxUsage, ShouldEqual, 222) 144 | So(stats.Cgroups.MemoryStats.Usage.Failcnt, ShouldEqual, 333) 145 | }) 146 | } 147 | 148 | func (suite *MemorySuite) TestMemoryKernelUsageGetStats() { 149 | Convey("", suite.T(), func() { 150 | stats := container.NewStatistics() 151 | opts := container.GetStatOpt{"cgroup_path": suite.memoryPath} 152 | memory := KernelMemUsage{} 153 | err := memory.GetStats(stats, opts) 154 | So(err, ShouldBeNil) 155 | So(stats.Cgroups.MemoryStats.KernelUsage.Usage, ShouldEqual, 111) 156 | So(stats.Cgroups.MemoryStats.KernelUsage.MaxUsage, ShouldEqual, 222) 157 | So(stats.Cgroups.MemoryStats.KernelUsage.Failcnt, ShouldEqual, 333) 158 | }) 159 | } 160 | 161 | func (suite *MemorySuite) TestMemorySwapUsageGetStats() { 162 | Convey("", suite.T(), func() { 163 | stats := container.NewStatistics() 164 | opts := container.GetStatOpt{"cgroup_path": suite.memoryPath} 165 | memory := SwapMemUsage{} 166 | err := memory.GetStats(stats, opts) 167 | So(err, ShouldBeNil) 168 | So(stats.Cgroups.MemoryStats.SwapUsage.Usage, ShouldEqual, 111) 169 | So(stats.Cgroups.MemoryStats.SwapUsage.MaxUsage, ShouldEqual, 222) 170 | So(stats.Cgroups.MemoryStats.SwapUsage.Failcnt, ShouldEqual, 333) 171 | }) 172 | } 173 | 174 | func (suite *MemorySuite) writeFile(path string, content []byte) { 175 | err := ioutil.WriteFile(path, content, 0700) 176 | if err != nil { 177 | suite.T().Fatal(err) 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /container/cgroupfs/pids.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "path/filepath" 26 | "strconv" 27 | "strings" 28 | 29 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 30 | ) 31 | 32 | // Pids implements StatGetter interface 33 | type Pids struct{} 34 | 35 | // GetStats reads pids metrics from Pids Group 36 | func (p *Pids) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 37 | path, err := opts.GetStringValue("cgroup_path") 38 | if err != nil { 39 | return err 40 | } 41 | 42 | current, err := parseIntValue(filepath.Join(path, "pids.current")) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | limit, err := parseStrValue(filepath.Join(path, "pids.max")) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | stats.Cgroups.PidsStats.Current = current 53 | 54 | var max uint64 55 | if limit != "max" { 56 | max, err = strconv.ParseUint(strings.TrimSpace(string(limit)), 10, 64) 57 | if err != nil { 58 | return err 59 | } 60 | } 61 | stats.Cgroups.PidsStats.Limit = max 62 | 63 | return nil 64 | } 65 | -------------------------------------------------------------------------------- /container/cgroupfs/pids_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "io/ioutil" 26 | "os" 27 | "path/filepath" 28 | "testing" 29 | 30 | . "github.com/smartystreets/goconvey/convey" 31 | "github.com/stretchr/testify/suite" 32 | 33 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 34 | ) 35 | 36 | type PidsSuite struct { 37 | suite.Suite 38 | pidsPath string 39 | } 40 | 41 | func (suite *PidsSuite) SetupSuite() { 42 | suite.pidsPath = "/tmp/pids_test" 43 | err := os.Mkdir(suite.pidsPath, 0700) 44 | if err != nil { 45 | suite.T().Fatal(err) 46 | } 47 | 48 | suite.writeFile(filepath.Join(suite.pidsPath, "pids.current"), []byte("1")) 49 | suite.writeFile(filepath.Join(suite.pidsPath, "pids.max"), []byte("2")) 50 | 51 | } 52 | 53 | func (suite *PidsSuite) TearDownSuite() { 54 | err := os.RemoveAll(suite.pidsPath) 55 | if err != nil { 56 | suite.T().Fatal(err) 57 | } 58 | } 59 | 60 | func TestPidsSuite(t *testing.T) { 61 | suite.Run(t, &PidsSuite{}) 62 | } 63 | 64 | func (suite *PidsSuite) TestPidsGetStats() { 65 | Convey("collecting data from cpuset controller", suite.T(), func() { 66 | stats := container.NewStatistics() 67 | opts := container.GetStatOpt{"cgroup_path": suite.pidsPath} 68 | pids := Pids{} 69 | err := pids.GetStats(stats, opts) 70 | So(err, ShouldBeNil) 71 | So(stats.Cgroups.PidsStats.Current, ShouldEqual, 1) 72 | So(stats.Cgroups.PidsStats.Limit, ShouldEqual, 2) 73 | }) 74 | } 75 | 76 | func (suite *PidsSuite) writeFile(path string, content []byte) { 77 | err := ioutil.WriteFile(path, content, 0700) 78 | if err != nil { 79 | suite.T().Fatal(err) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /container/cgroupfs/utils.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package cgroupfs 23 | 24 | import ( 25 | "fmt" 26 | "io/ioutil" 27 | "strconv" 28 | "strings" 29 | ) 30 | 31 | func parseEntry(line string) (name string, value uint64, err error) { 32 | fields := strings.Fields(line) 33 | if len(fields) != 2 { 34 | return name, value, fmt.Errorf("Invalid format: %s", line) 35 | } 36 | 37 | value, err = strconv.ParseUint(fields[1], 10, 64) 38 | if err != nil { 39 | return name, value, err 40 | } 41 | 42 | return fields[0], value, nil 43 | } 44 | 45 | func parseIntValue(file string) (uint64, error) { 46 | raw, err := ioutil.ReadFile(file) 47 | if err != nil { 48 | return 0, err 49 | } 50 | 51 | return strconv.ParseUint(strings.TrimSpace(string(raw)), 10, 64) 52 | 53 | } 54 | 55 | func parseStrValue(file string) (string, error) { 56 | raw, err := ioutil.ReadFile(file) 57 | if err != nil { 58 | return "", err 59 | } 60 | 61 | return strings.TrimSpace(string(raw)), nil 62 | 63 | } 64 | -------------------------------------------------------------------------------- /container/client.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package container 23 | 24 | import ( 25 | "bufio" 26 | "fmt" 27 | "os" 28 | "path/filepath" 29 | "strconv" 30 | "strings" 31 | "sync" 32 | "time" 33 | 34 | "github.com/fsouza/go-dockerclient" 35 | "github.com/intelsdi-x/snap-plugin-collector-docker/config" 36 | log "github.com/sirupsen/logrus" 37 | ) 38 | 39 | const ( 40 | dockerVersionKey string = "Version" 41 | ) 42 | 43 | // DockerClientInterface provides methods i.a. for interaction with the docker API. 44 | type DockerClientInterface interface { 45 | ListContainersAsMap() (map[string]*ContainerData, error) 46 | InspectContainer(string) (*docker.Container, error) 47 | FindCgroupMountpoint(string, string) (string, error) 48 | FindControllerMountpoint(string, string, string) (string, error) 49 | GetDockerParams(...string) (map[string]string, error) 50 | } 51 | 52 | // DockerClient holds go-dockerclient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`, 53 | // cache instance which is used to store output from docker container inspect (to avoid execute inspect request multiply times, it is called only once per container) 54 | type DockerClient struct { 55 | cl *docker.Client 56 | inspectCache map[string]*docker.Container 57 | inspectMutex sync.Mutex 58 | } 59 | 60 | type deviceInfo struct { 61 | device string 62 | major string 63 | minor string 64 | } 65 | 66 | // NewDockerClient returns dockerClient instance ready for communication with the server endpoint `unix:///var/run/docker.sock` 67 | func NewDockerClient(endpoint string) (*DockerClient, error) { 68 | client, err := docker.NewClient(endpoint) 69 | if err != nil { 70 | return nil, fmt.Errorf("Cannot initialize docker client instance with the given endpoint `%s`, err=%v", endpoint, err) 71 | } 72 | 73 | err = client.Ping() 74 | if err != nil { 75 | return nil, err 76 | } 77 | 78 | dc := &DockerClient{ 79 | cl: client, 80 | inspectCache: map[string]*docker.Container{}, 81 | } 82 | 83 | config.DockerVersion, err = dc.version() 84 | if err != nil { 85 | return nil, err 86 | } 87 | 88 | return dc, nil 89 | } 90 | 91 | // InspectContainer returns details information about running container 92 | func (dc *DockerClient) InspectContainer(id string) (*docker.Container, error) { 93 | dc.inspectMutex.Lock() 94 | defer dc.inspectMutex.Unlock() 95 | 96 | // check if the inspect info is already stored in inspectCache 97 | if info, haveInfo := dc.inspectCache[id]; haveInfo { 98 | return info, nil 99 | } 100 | 101 | info, err := dc.cl.InspectContainer(id) 102 | if err != nil { 103 | return nil, err 104 | } 105 | dc.inspectCache[id] = info 106 | 107 | return info, nil 108 | } 109 | 110 | // GetDockerParam returns given map of parameter/value from running docker engine 111 | func (dc *DockerClient) GetDockerParams(params ...string) (map[string]string, error) { 112 | env, err := dc.cl.Info() 113 | if err != nil { 114 | return nil, err 115 | } 116 | 117 | vals := make(map[string]string, len(params)) 118 | 119 | for _, param := range params { 120 | if !env.Exists(param) { 121 | return nil, fmt.Errorf("%s not found", param) 122 | } 123 | vals[param] = env.Get(param) 124 | } 125 | 126 | return vals, nil 127 | } 128 | 129 | // GetShortID returns short container ID (12 chars) 130 | func GetShortID(dockerID string) (string, error) { 131 | if dockerID == "root" { 132 | return dockerID, nil 133 | } 134 | 135 | if len(dockerID) < 12 { 136 | return "", fmt.Errorf("Docker id %v is too short (the length of id should equal at least 12)", dockerID) 137 | } 138 | 139 | return dockerID[:12], nil 140 | } 141 | 142 | // ListContainersAsMap returns list of all available docker containers and base information about them (status, uptime, etc.) 143 | func (dc *DockerClient) ListContainersAsMap() (map[string]*ContainerData, error) { 144 | containers := make(map[string]*ContainerData) 145 | 146 | containerList, err := dc.cl.ListContainers(docker.ListContainersOptions{}) 147 | 148 | if err != nil { 149 | return nil, err 150 | } 151 | 152 | for _, c := range containerList { 153 | shortID, err := GetShortID(c.ID) 154 | if err != nil { 155 | return nil, err 156 | } 157 | 158 | spec := Specification{ 159 | Status: c.Status, 160 | Created: time.Unix(c.Created, 0).Format("2006-01-02T15:04:05Z07:00"), 161 | Image: c.Image, 162 | SizeRw: c.SizeRw, 163 | SizeRootFs: c.SizeRootFs, 164 | Labels: c.Labels, 165 | } 166 | 167 | containerData := ContainerData{ 168 | ID: c.ID, 169 | Specification: spec, 170 | Stats: NewStatistics(), 171 | } 172 | 173 | containers[shortID] = &containerData 174 | } 175 | 176 | if len(containers) == 0 { 177 | log.WithFields(log.Fields{ 178 | "block": "client", 179 | "function": "ListContainersAsMap", 180 | }).Warnf("no running containers on host") 181 | } 182 | 183 | containers["root"] = &ContainerData{ID: "/", Stats: NewStatistics()} 184 | 185 | return containers, nil 186 | } 187 | 188 | // FindCgroupMountpoint returns cgroup mountpoint of a given subsystem 189 | func (dc *DockerClient) FindCgroupMountpoint(procfs string, subsystem string) (string, error) { 190 | f, err := os.Open(filepath.Join(procfs, "self/mountinfo")) 191 | if err != nil { 192 | return "", err 193 | } 194 | defer f.Close() 195 | 196 | scanner := bufio.NewScanner(f) 197 | for scanner.Scan() { 198 | txt := scanner.Text() 199 | fields := strings.Fields(txt) 200 | for _, opt := range strings.Split(fields[len(fields)-1], ",") { 201 | if opt == subsystem { 202 | return fields[4], nil 203 | } 204 | } 205 | } 206 | if err := scanner.Err(); err != nil { 207 | return "", err 208 | } 209 | 210 | return "", fmt.Errorf("Cgroup {%s} mountpoint not found", subsystem) 211 | } 212 | 213 | // FindControllerMountpoint returns mountpoints of a given controller and container PID 214 | func (dc *DockerClient) FindControllerMountpoint(subsystem, pid, procfs string) (string, error) { 215 | f, err := os.Open(filepath.Join(procfs, pid, "mountinfo")) 216 | if err != nil { 217 | return "", err 218 | } 219 | defer f.Close() 220 | 221 | scanner := bufio.NewScanner(f) 222 | 223 | for scanner.Scan() { 224 | txt := scanner.Text() 225 | fields := strings.Fields(txt) 226 | for _, opt := range strings.Split(fields[len(fields)-1], ",") { 227 | if opt == subsystem { 228 | return filepath.Join(filepath.Dir(fields[4]), subsystem, fields[3]), nil 229 | } 230 | } 231 | } 232 | if err := scanner.Err(); err != nil { 233 | return "", err 234 | } 235 | 236 | return "", fmt.Errorf("can't find mountpoint for controller {%s} for container pid {%s}", subsystem, pid) 237 | 238 | } 239 | 240 | // version returns version of docker engine 241 | func (dc *DockerClient) version() (version []int, _ error) { 242 | version = []int{0, 0} 243 | env, err := dc.cl.Version() 244 | if err != nil { 245 | return version, err 246 | } 247 | parseInt := func(str string, defVal int) int { 248 | val, err := strconv.ParseInt(str, 10, 64) 249 | if err != nil { 250 | return defVal 251 | } 252 | return int(val) 253 | } 254 | 255 | for _, kv := range *env { 256 | kvs := strings.Split(kv, "=") 257 | if len(kvs) < 2 { 258 | return nil, fmt.Errorf("Cannot retrive the version of docker engine, is `%v`, expected e.g.`Version = 1.10`", kv) 259 | } 260 | 261 | if kvs[0] != dockerVersionKey { 262 | continue 263 | } 264 | 265 | versionSplit := strings.Split(kvs[1], ".") 266 | if len(versionSplit) < 2 { 267 | return nil, fmt.Errorf("Invalid format of docker engine version, is `%v`, expected e.g. `1.10", kvs[1]) 268 | } 269 | version := []int{parseInt(versionSplit[0], 0), parseInt(versionSplit[1], 0)} 270 | return version, nil 271 | } 272 | return version, nil 273 | } 274 | -------------------------------------------------------------------------------- /container/fs/fs_mocks.go: -------------------------------------------------------------------------------- 1 | /* 2 | http://www.apache.org/licenses/LICENSE-2.0.txt 3 | 4 | 5 | Copyright 2017 Intel Corporation 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | 19 | This file incorporates work covered by the following copyright and permission notice: 20 | Copyright 2014 Google Inc. All Rights Reserved. 21 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 22 | this file except in compliance with the License. You may obtain a copy of the License at 23 | 24 | http://www.apache.org/licenses/LICENSE-2.0 25 | 26 | Unless required by applicable law or agreed to in writing, software 27 | distributed under the License is distributed on an "AS IS" BASIS, 28 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 29 | See the License for the specific language governing permissions and 30 | limitations under the License. 31 | */ 32 | package fs 33 | 34 | import ( 35 | "github.com/moby/moby/pkg/mount" 36 | ) 37 | 38 | var mockedMounts = []*mount.Info{ 39 | { 40 | ID: 19, 41 | Parent: 25, 42 | Major: 0, 43 | Minor: 18, 44 | Root: "/", 45 | Mountpoint: "/sys", 46 | Opts: "rw,nosuid,nodev,noexec,relatime", 47 | Optional: "shared:7", 48 | Fstype: "sysfs", 49 | Source: "sysfs", 50 | VfsOpts: "rw", 51 | }, 52 | { 53 | ID: 20, 54 | Parent: 25, 55 | Major: 0, 56 | Minor: 4, 57 | Root: "/", 58 | Mountpoint: "/proc", 59 | Opts: "rw,nosuid,nodev,noexec,realtime", 60 | Optional: "shared:12", 61 | Fstype: "proc", 62 | Source: "proc", 63 | VfsOpts: "rw", 64 | }, 65 | { 66 | ID: 21, 67 | Parent: 25, 68 | Major: 0, 69 | Minor: 6, 70 | Root: "/", 71 | Mountpoint: "/dev", 72 | Opts: "rw,nosuid,relatime", 73 | Optional: "shared:2", 74 | Fstype: "devtmpfs", 75 | Source: "udev", 76 | VfsOpts: "rw,size=16392172k,nr_inodes=4098043,mode=755", 77 | }, 78 | { 79 | ID: 22, 80 | Parent: 21, 81 | Major: 0, 82 | Minor: 14, 83 | Root: "/", 84 | Mountpoint: "/dev/pts", 85 | Opts: "rw,nosuid,noexec,relatime", 86 | Optional: "shared:3", 87 | Fstype: "devpts", 88 | Source: "devpts", 89 | VfsOpts: "rw,gid=5,mode=620,ptmxmode=000", 90 | }, 91 | { 92 | ID: 23, 93 | Parent: 25, 94 | Major: 0, 95 | Minor: 19, 96 | Root: "/", 97 | Mountpoint: "/run", 98 | Opts: "rw,nosuid,noexec,relatime", 99 | Optional: "shared:5", 100 | Fstype: "tmpfs", 101 | Source: "tmpfs", 102 | VfsOpts: "rw,size=3282484k,mode=755", 103 | }, 104 | { 105 | ID: 25, 106 | Parent: 0, 107 | Major: 8, 108 | Minor: 1, 109 | Root: "/", 110 | Mountpoint: "/", 111 | Opts: "rw,relatime", 112 | Optional: "shared:1", 113 | Fstype: "ext4", 114 | Source: "/dev/sda1", 115 | VfsOpts: "rw,errors=remount-ro,data=ordered", 116 | }, 117 | { 118 | ID: 296, 119 | Parent: 23, 120 | Major: 0, 121 | Minor: 69, 122 | Root: "/", 123 | Mountpoint: "/run/cgmanager/fs", 124 | Opts: "rw,relatime", 125 | Optional: "shared:155", 126 | Fstype: "tmpfs", 127 | Source: "cgmfs", 128 | VfsOpts: "rw,size=100k,mode=755", 129 | }, 130 | { 131 | ID: 142, 132 | Parent: 25, 133 | Major: 8, 134 | Minor: 1, 135 | Root: "/tmp/var/lib/docker/aufs", 136 | Mountpoint: "/tmp/var/lib/docker/aufs", 137 | Opts: "rw,relatime", 138 | Fstype: "ext4", 139 | Source: "/dev/sda1", 140 | VfsOpts: "rw,errors=remount-ro,data=ordered", 141 | }, 142 | { 143 | ID: 152, 144 | Parent: 142, 145 | Major: 0, 146 | Minor: 42, 147 | Root: "/", 148 | Mountpoint: "/tmp/aufs/diff/27fa0900fe22", 149 | Opts: "rw,relatime", 150 | Fstype: "ext4", 151 | Source: "/dev/sda1", 152 | VfsOpts: "rw,si=dd417b17e9d4a58b,dio,dirperm1", 153 | }, 154 | { 155 | ID: 153, 156 | Parent: 25, 157 | Major: 0, 158 | Minor: 43, 159 | Root: "/", 160 | Mountpoint: "/tmp/var/lib/docker/containers/27fa0900fe22/shm", 161 | Opts: "rw,nosuid,nodev,noexec,relatime", 162 | Optional: "shared:129", 163 | Fstype: "tmpfs", 164 | Source: "shm", 165 | VfsOpts: "rw,size=65536k", 166 | }, 167 | { 168 | ID: 159, 169 | Parent: 25, 170 | Major: 0, 171 | Minor: 44, 172 | Root: "/", 173 | Mountpoint: "/tmp/var/lib/docker/containers/27fa0900fe22/zfs", 174 | Opts: "rw,nosuid,nodev,noexec,relatime", 175 | Optional: "shared:129", 176 | Fstype: "zfs", 177 | Source: "snapzfs", 178 | VfsOpts: "rw,size=65536k", 179 | }, 180 | } 181 | -------------------------------------------------------------------------------- /container/fs/fs_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2017 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | 21 | This file incorporates work covered by the following copyright and permission notice: 22 | Copyright 2014 Google Inc. All Rights Reserved. 23 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 24 | this file except in compliance with the License. You may obtain a copy of the License at 25 | 26 | http://www.apache.org/licenses/LICENSE-2.0 27 | 28 | Unless required by applicable law or agreed to in writing, software 29 | distributed under the License is distributed on an "AS IS" BASIS, 30 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 31 | See the License for the specific language governing permissions and 32 | limitations under the License. 33 | */ 34 | package fs 35 | 36 | import ( 37 | "io/ioutil" 38 | "os" 39 | "path/filepath" 40 | "syscall" 41 | "testing" 42 | 43 | "github.com/moby/moby/pkg/mount" 44 | . "github.com/smartystreets/goconvey/convey" 45 | "github.com/stretchr/testify/suite" 46 | 47 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 48 | ) 49 | 50 | const ( 51 | procContents = `8 1 sdd2 40 0 280 223 7 0 22 108 0 330 330` 52 | aufsContents = `0 42 sdd2 40 0 280 223 7 0 22 108 0 330 330` 53 | overlayContents = `8 1 sdd2 40 0 280 223 7 0 22 108 0 330 330` 54 | usageContent = `6379736 /tmp/usage` 55 | usageContentInvalid = `six /tmp/usage` 56 | invalidContents = `8 1 sdd2 40 0 280 223 7 0 108 0 330 330` 57 | invalidContentsParse = `a 1 sdd2 40 0 280 223 7 0 22 108 0 330 330` 58 | ) 59 | 60 | type FsSuite struct { 61 | suite.Suite 62 | fsPath string 63 | } 64 | 65 | func TestFsSuite(t *testing.T) { 66 | suite.Run(t, &FsSuite{}) 67 | } 68 | 69 | func (s *FsSuite) SetupSuite() { 70 | err := os.MkdirAll("/tmp/aufs/diff/27fa0900fe22", 0700) 71 | if err != nil { 72 | s.T().Fatal(err) 73 | } 74 | err = os.MkdirAll("/tmp/overlay/27fa0900fe22", 0700) 75 | if err != nil { 76 | s.T().Fatal(err) 77 | } 78 | err = os.Mkdir("/tmp/overlay/root", 0700) 79 | if err != nil { 80 | s.T().Fatal(err) 81 | } 82 | err = os.Mkdir("/tmp/aufs/root", 0700) 83 | if err != nil { 84 | s.T().Fatal(err) 85 | } 86 | err = os.Mkdir("/tmp/proc", 0700) 87 | if err != nil { 88 | s.T().Fatal(err) 89 | } 90 | err = os.MkdirAll("/tmp/var/lib/docker/containers/27fa0900fe22", 0700) 91 | if err != nil { 92 | s.T().Fatal(err) 93 | } 94 | err = os.MkdirAll("/tmp/var/lib/docker/aufs/diff", 0700) 95 | if err != nil { 96 | s.T().Fatal(err) 97 | } 98 | err = os.MkdirAll("/tmp/var/lib/docker/overlay", 0700) 99 | if err != nil { 100 | s.T().Fatal(err) 101 | } 102 | err = os.MkdirAll("/tmp/var/lib/docker/zfs", 0700) 103 | if err != nil { 104 | s.T().Fatal(err) 105 | } 106 | err = os.MkdirAll("/tmp/var/lib/docker/zfs", 0700) 107 | if err != nil { 108 | s.T().Fatal(err) 109 | } 110 | err = os.MkdirAll("/tmp/test/dev/sdf100", 0700) 111 | if err != nil { 112 | s.T().Fatal(err) 113 | } 114 | s.writeFile(filepath.Join("/tmp/test", "usage"), []byte(usageContent)) 115 | s.writeFile(filepath.Join("/tmp/test", "usage_invalid_parse"), []byte(usageContentInvalid)) 116 | s.writeFile(filepath.Join("/tmp/proc", "diskstats"), []byte(procContents)) 117 | s.writeFile(filepath.Join("/tmp/proc", "diskstat_invalid_1"), []byte(invalidContents)) 118 | s.writeFile(filepath.Join("/tmp/proc", "diskstat_invalid_2"), []byte(invalidContentsParse)) 119 | s.writeFile(filepath.Join("/tmp/overlay/27fa0900fe22", "diskstats"), []byte(overlayContents)) 120 | s.writeFile(filepath.Join("/tmp/aufs/diff/27fa0900fe22", "diskstats"), []byte(aufsContents)) 121 | buf := new(syscall.Stat_t) 122 | err = syscall.Stat("/tmp/test", buf) 123 | if err != nil { 124 | s.T().Fatal(err) 125 | } 126 | mockValidMount := mount.Info{ 127 | ID: 250, 128 | Parent: 0, 129 | Major: int(major(buf.Dev)), 130 | Minor: int(minor(buf.Dev)), 131 | Root: "/tmp/test", 132 | Mountpoint: "/", 133 | Opts: "rw,relatime", 134 | Optional: "shared:1", 135 | Fstype: "ext4", 136 | Source: "/tmp/test/dev/sdf100", 137 | VfsOpts: "rw,errors=remount-ro,data=ordered", 138 | } 139 | mockedMounts = append(mockedMounts, &mockValidMount) 140 | } 141 | 142 | func (s *FsSuite) TearDownSuite() { 143 | err := os.RemoveAll("/tmp/aufs/") 144 | if err != nil { 145 | s.T().Fatal(err) 146 | } 147 | err = os.RemoveAll("/tmp/overlay/") 148 | if err != nil { 149 | s.T().Fatal(err) 150 | } 151 | err = os.RemoveAll("/tmp/proc/") 152 | if err != nil { 153 | s.T().Fatal(err) 154 | } 155 | err = os.RemoveAll("/tmp/var/") 156 | if err != nil { 157 | s.T().Fatal(err) 158 | } 159 | err = os.RemoveAll("/tmp/test/") 160 | if err != nil { 161 | s.T().Fatal(err) 162 | } 163 | } 164 | 165 | func (s *FsSuite) TestFS() { 166 | Convey("Check usage parser valid options", s.T(), func() { 167 | usage, err := diskUsage("cat", []string{"/tmp/test/usage"}) 168 | So(usage, ShouldEqual, 6379736) 169 | So(err, ShouldBeNil) 170 | }) 171 | Convey("Check usage parser invalid options", s.T(), func() { 172 | usage, err := diskUsage("cat", []string{"/tmp/test/usage_invalid"}) 173 | So(usage, ShouldEqual, 0) 174 | So(err, ShouldNotBeNil) 175 | }) 176 | Convey("Check usage parser invalid data", s.T(), func() { 177 | usage, err := diskUsage("cat", []string{"/tmp/test/usage_invalid_parse"}) 178 | So(usage, ShouldEqual, 0) 179 | So(err, ShouldNotBeNil) 180 | }) 181 | Convey("Check getDiskStatsMap", s.T(), func() { 182 | dsm, err := getDiskStatsMap("/tmp/proc/diskstats") 183 | So(err, ShouldBeNil) 184 | So(dsm[DeviceId{Major: 8, Minor: 1}].ReadsCompleted, ShouldEqual, 40) 185 | So(dsm[DeviceId{Major: 8, Minor: 1}].ReadsMerged, ShouldEqual, 0) 186 | So(dsm[DeviceId{Major: 8, Minor: 1}].SectorsRead, ShouldEqual, 280) 187 | So(dsm[DeviceId{Major: 8, Minor: 1}].ReadTime, ShouldEqual, 223) 188 | So(dsm[DeviceId{Major: 8, Minor: 1}].WritesCompleted, ShouldEqual, 7) 189 | So(dsm[DeviceId{Major: 8, Minor: 1}].WritesMerged, ShouldEqual, 0) 190 | So(dsm[DeviceId{Major: 8, Minor: 1}].SectorsWritten, ShouldEqual, 22) 191 | So(dsm[DeviceId{Major: 8, Minor: 1}].WriteTime, ShouldEqual, 108) 192 | So(dsm[DeviceId{Major: 8, Minor: 1}].IoInProgress, ShouldEqual, 0) 193 | So(dsm[DeviceId{Major: 8, Minor: 1}].IoTime, ShouldEqual, 330) 194 | So(dsm[DeviceId{Major: 8, Minor: 1}].WeightedIoTime, ShouldEqual, 330) 195 | }) 196 | Convey("Check getDiskStatsMap", s.T(), func() { 197 | _, err := getDiskStatsMap("/tmp/proc/diskstat") 198 | So(err, ShouldBeNil) 199 | }) 200 | Convey("Check getDiskStatsMap invalid file content (too short)", s.T(), func() { 201 | dsm, err := getDiskStatsMap("/tmp/proc/diskstat_invalid_1") 202 | So(dsm, ShouldBeNil) 203 | So(err, ShouldNotBeNil) 204 | }) 205 | Convey("Check getDiskStatsMap invalid file content (non numeric value)", s.T(), func() { 206 | dsm, err := getDiskStatsMap("/tmp/proc/diskstat_invalid_2") 207 | So(dsm, ShouldBeNil) 208 | So(err, ShouldNotBeNil) 209 | }) 210 | Convey("FS tests", s.T(), func() { 211 | fsInfo, err := newFsInfo("test") 212 | So(fsInfo, ShouldNotBeNil) 213 | So(err, ShouldBeNil) 214 | Convey("GetDirFsDevice", func() { 215 | Convey("Valid path", func() { 216 | dir, err := fsInfo.GetDirFsDevice("/tmp/test") 217 | So(dir, ShouldNotBeNil) 218 | So(err, ShouldBeNil) 219 | }) 220 | Convey("Invalid path", func() { 221 | dir, err := fsInfo.GetDirFsDevice("/invalid") 222 | So(dir, ShouldBeNil) 223 | So(err, ShouldNotBeNil) 224 | }) 225 | }) 226 | Convey("GetGlobalFsInfo", func() { 227 | Convey("Valid path", func() { 228 | _, err := fsInfo.GetGlobalFsInfo("/tmp/proc/") 229 | So(err, ShouldBeNil) 230 | }) 231 | }) 232 | 233 | }) 234 | usage := make(map[string]uint64) 235 | du := DiskUsageCollector{DiskUsage: usage} 236 | Convey("GetStats tests", s.T(), func() { 237 | du.DiskUsage["/tmp"] = 2000 238 | du.DiskUsage["/tmp/aufs/diff/27fa0900fe22"] = 250 239 | du.DiskUsage["/tmp/overlay/27fa0900fe22"] = 929 240 | Convey("Check invalid root dir", func() { 241 | stats := container.NewStatistics() 242 | err := du.GetStats(stats, container.GetStatOpt{"container_id": "27fa0900fe22", "container_drv": "aufs", "procfs": "/tmp/proc", "root_dir": "/invalid_dir"}) 243 | // Even if root_dir is invalid there is no error returned. 244 | // But there is logged message started with: `Os.Stat failed` 245 | So(err, ShouldBeNil) 246 | }) 247 | Convey("Check valid aufs driver", func() { 248 | stats := container.NewStatistics() 249 | err := du.GetStats(stats, container.GetStatOpt{"container_id": "27fa0900fe22", "container_drv": "aufs", "procfs": "/tmp/proc", "root_dir": "/tmp"}) 250 | So(err, ShouldBeNil) 251 | }) 252 | Convey("Check valid overlay driver", func() { 253 | stats := container.NewStatistics() 254 | err := du.GetStats(stats, container.GetStatOpt{"container_id": "27fa0900fe22", "container_drv": "overlay", "procfs": "/tmp/proc", "root_dir": "/tmp"}) 255 | So(err, ShouldBeNil) 256 | }) 257 | Convey("Check invalid driver", func() { 258 | stats := container.NewStatistics() 259 | err := du.GetStats(stats, container.GetStatOpt{"container_id": "27fa0900fe22", "container_drv": "ext", "procfs": "/tmp/proc", "root_dir": "/tmp"}) 260 | So(err, ShouldNotBeNil) 261 | }) 262 | Convey("Check root valid aufs driver", func() { 263 | stats := container.NewStatistics() 264 | err := du.GetStats(stats, container.GetStatOpt{"container_id": "root", "container_drv": "aufs", "procfs": "/tmp/proc", "root_dir": "/tmp"}) 265 | So(err, ShouldBeNil) 266 | }) 267 | Convey("Check root valid overlay driver", func() { 268 | stats := container.NewStatistics() 269 | err := du.GetStats(stats, container.GetStatOpt{"container_id": "root", "container_drv": "overlay", "procfs": "/tmp/proc", "root_dir": "/tmp"}) 270 | So(err, ShouldBeNil) 271 | }) 272 | }) 273 | } 274 | 275 | func (s *FsSuite) writeFile(path string, content []byte) { 276 | err := ioutil.WriteFile(path, content, 0700) 277 | if err != nil { 278 | s.T().Fatal(err) 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /container/fs/types.go: -------------------------------------------------------------------------------- 1 | /* 2 | http://www.apache.org/licenses/LICENSE-2.0.txt 3 | 4 | 5 | Copyright 2016 Intel Corporation 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | 19 | This file incorporates work covered by the following copyright and permission notice: 20 | Copyright 2014 Google Inc. All Rights Reserved. 21 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 22 | this file except in compliance with the License. You may obtain a copy of the License at 23 | 24 | http://www.apache.org/licenses/LICENSE-2.0 25 | 26 | Unless required by applicable law or agreed to in writing, software 27 | distributed under the License is distributed on an "AS IS" BASIS, 28 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 29 | See the License for the specific language governing permissions and 30 | limitations under the License. 31 | */ 32 | 33 | // Package contains code from Google cAdvisor (https://github.com/google/cadvisor) with following: 34 | // - structures holding information about disk usage 35 | 36 | package fs 37 | 38 | import "time" 39 | 40 | const ( 41 | ZFS FsType = "zfs" 42 | DeviceMapper FsType = "devicemapper" 43 | VFS FsType = "vfs" 44 | ) 45 | 46 | // Identifies device in system 47 | type DeviceId struct { 48 | Major uint 49 | Minor uint 50 | } 51 | 52 | // DeviceInfo holds device name and major and minor numbers 53 | type DeviceInfo struct { 54 | Device string 55 | Major uint 56 | Minor uint 57 | } 58 | 59 | // FsType is a docker filesystem type, supported: zfs, vfs and devicemapper 60 | type FsType string 61 | 62 | func (ft FsType) String() string { 63 | return string(ft) 64 | } 65 | 66 | // Fs holds information about device (name, minor, major), type, capacity, etc. 67 | type Fs struct { 68 | DeviceInfo 69 | Type FsType 70 | Capacity uint64 71 | Free uint64 72 | Available uint64 73 | Inodes uint64 74 | InodesFree uint64 75 | DiskStats DiskStats 76 | } 77 | 78 | // DiskStats holds disk statistics 79 | type DiskStats struct { 80 | ReadsCompleted uint64 81 | ReadsMerged uint64 82 | SectorsRead uint64 83 | ReadTime uint64 84 | WritesCompleted uint64 85 | WritesMerged uint64 86 | SectorsWritten uint64 87 | WriteTime uint64 88 | IoInProgress uint64 89 | IoTime uint64 90 | WeightedIoTime uint64 91 | } 92 | 93 | // FsInfo specifies methods to get filesystem information and statistics 94 | type FsInfo interface { 95 | // Returns capacity and free space, in bytes, of all the ext2, ext3, ext4 filesystems on the host. 96 | GetGlobalFsInfo(procfs string) ([]Fs, error) 97 | 98 | // Returns capacity and free space, in bytes, of the set of mounts passed. 99 | GetFsInfoForPath(mountSet map[string]struct{}, procfs string) ([]Fs, error) 100 | 101 | // Returns number of bytes occupied by 'dir'. 102 | GetDirUsage(dir string, timeout time.Duration) (uint64, error) 103 | 104 | // Returns the block device info of the filesystem on which 'dir' resides. 105 | GetDirFsDevice(dir string) (*DeviceInfo, error) 106 | } 107 | -------------------------------------------------------------------------------- /container/network/network.go: -------------------------------------------------------------------------------- 1 | /* 2 | http://www.apache.org/licenses/LICENSE-2.0.txt 3 | 4 | 5 | Copyright 2016 Intel Corporation 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | 19 | This file incorporates work covered by the following copyright and permission notice: 20 | Copyright 2014 Google Inc. All Rights Reserved. 21 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 22 | this file except in compliance with the License. You may obtain a copy of the License at 23 | 24 | http://www.apache.org/licenses/LICENSE-2.0 25 | 26 | Unless required by applicable law or agreed to in writing, software 27 | distributed under the License is distributed on an "AS IS" BASIS, 28 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 29 | See the License for the specific language governing permissions and 30 | limitations under the License. 31 | */ 32 | 33 | // Package contains code from Google Cadvisor (https://github.com/google/cadvisor) with following: 34 | // - functions collecting network statistics 35 | 36 | // Package network provides network statistics 37 | package network 38 | 39 | import ( 40 | "bufio" 41 | "errors" 42 | "fmt" 43 | "io/ioutil" 44 | "os" 45 | "path/filepath" 46 | "strconv" 47 | "strings" 48 | 49 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 50 | utils "github.com/intelsdi-x/snap-plugin-utilities/ns" 51 | 52 | log "github.com/sirupsen/logrus" 53 | ) 54 | 55 | const ( 56 | // expected format of the line of net stats file 57 | numberOfFields = 17 58 | // expected 4 type of stats: (Rx/Tx) packets, bytes, dropped, errors 59 | numberOfStatsType = 4 60 | indexOfRxStats = 1 61 | indexOfTxStats = 9 62 | ) 63 | 64 | var ( 65 | // networkInterfaceDir points to network devices and its stats (declaring as var for mock purpose) 66 | networkInterfacesDir = "/sys/class/net" 67 | 68 | // networkMetrics is a list of available network metrics (rx_bytes, tx_bytes, etc.) 69 | networkMetrics = getListOfNetworkMetrics() 70 | ) 71 | 72 | func getListOfNetworkMetrics() []string { 73 | metrics := []string{} 74 | utils.FromCompositionTags(container.NetworkInterface{}, "", &metrics) 75 | return metrics 76 | } 77 | 78 | type Network struct{} 79 | 80 | func (n *Network) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 81 | pid, err := opts.GetIntValue("pid") 82 | if err != nil { 83 | return err 84 | } 85 | 86 | isHost, err := opts.GetBoolValue("is_host") 87 | if err != nil { 88 | return err 89 | } 90 | 91 | procfs, err := opts.GetStringValue("procfs") 92 | if err != nil { 93 | return err 94 | } 95 | 96 | if !isHost { 97 | path := filepath.Join(procfs, strconv.Itoa(pid)) 98 | stats.Network, err = NetworkStatsFromProc(path) 99 | if err != nil { 100 | // only log error message 101 | log.WithFields(log.Fields{ 102 | "module": "network", 103 | "block": "GetStats", 104 | }).Errorf("Unable to get network stats, pid %d: %s", pid, err) 105 | } 106 | 107 | } else { 108 | stats.Network, err = NetworkStatsFromRoot() 109 | if err != nil { 110 | // only log error message 111 | log.WithFields(log.Fields{ 112 | "module": "network", 113 | "block": "GetStats", 114 | }).Errorf("Unable to get network stats for host: %s", err) 115 | } 116 | } 117 | 118 | return nil 119 | } 120 | 121 | // NetworkStatsFromProc returns network statistics (e.g. tx_bytes, rx_bytes, etc.) per each interface and aggregated in total 122 | // for a given path combined from given rootFs and pid of docker process as `///net/dev` 123 | func NetworkStatsFromProc(path string) ([]container.NetworkInterface, error) { 124 | netStatsFile := filepath.Join(path, "/net/dev") 125 | ifaceStats, err := scanInterfaceStats(netStatsFile) 126 | if err != nil { 127 | return nil, fmt.Errorf("couldn't read network stats: %v", err) 128 | } 129 | 130 | if len(ifaceStats) == 0 { 131 | return nil, errors.New("No network interface found") 132 | } 133 | 134 | return totalNetworkStats(ifaceStats), nil 135 | } 136 | 137 | // NetworkStatsFromRoot returns network statistics (e.g. tx_bytes, rx_bytes, etc.) per each interface and aggregated in total 138 | // for root (a docker host) 139 | func NetworkStatsFromRoot() (ifaceStats []container.NetworkInterface, _ error) { 140 | devNames, err := listRootNetworkDevices() 141 | if err != nil { 142 | return nil, err 143 | } 144 | ifaceStats = []container.NetworkInterface{} 145 | for _, name := range devNames { 146 | if isIgnoredDevice(name) { 147 | continue 148 | } 149 | if stats, err := interfaceStatsFromDir(name); err != nil { 150 | return nil, err 151 | } else { 152 | ifaceStats = append(ifaceStats, *stats) 153 | } 154 | } 155 | return totalNetworkStats(ifaceStats), nil 156 | } 157 | 158 | // totalNetworkStats calculates summary of network stats (sum over all net interfaces) and returns 159 | func totalNetworkStats(ifaceStats []container.NetworkInterface) (ifaceStatsInTotal []container.NetworkInterface) { 160 | total := container.NetworkInterface{ 161 | Name: "total", 162 | } 163 | 164 | for _, iface := range ifaceStats { 165 | total.RxBytes += iface.RxBytes 166 | total.RxPackets += iface.RxPackets 167 | total.RxDropped += iface.RxDropped 168 | total.RxErrors += iface.RxErrors 169 | total.TxBytes += iface.TxBytes 170 | total.TxPackets += iface.TxPackets 171 | total.TxDropped += iface.TxDropped 172 | total.TxErrors += iface.TxErrors 173 | } 174 | 175 | return append(ifaceStats, total) 176 | } 177 | 178 | func listRootNetworkDevices() (devNames []string, _ error) { 179 | entries, err := ioutil.ReadDir(networkInterfacesDir) 180 | if err != nil { 181 | return nil, err 182 | } 183 | devNames = []string{} 184 | for _, e := range entries { 185 | if e.Mode()&os.ModeSymlink == os.ModeSymlink { 186 | e, err = os.Stat(filepath.Join(networkInterfacesDir, e.Name())) 187 | if err != nil || !e.IsDir() { 188 | continue 189 | } 190 | devNames = append(devNames, e.Name()) 191 | } else if e.IsDir() { 192 | devNames = append(devNames, e.Name()) 193 | } 194 | } 195 | return devNames, nil 196 | } 197 | 198 | func interfaceStatsFromDir(ifaceName string) (*container.NetworkInterface, error) { 199 | stats := container.NetworkInterface{Name: ifaceName} 200 | statsValues := map[string]uint64{} 201 | for _, metric := range networkMetrics { 202 | if metric == "name" { 203 | continue 204 | } 205 | val, err := readUintFromFile(filepath.Join(networkInterfacesDir, ifaceName, "statistics", metric), 64) 206 | if err != nil { 207 | return nil, fmt.Errorf("couldn't read interface statistics %s/%s: %v", ifaceName, metric, err) 208 | } 209 | statsValues[metric] = val 210 | } 211 | setIfaceStatsFromMap(&stats, statsValues) 212 | return &stats, nil 213 | } 214 | 215 | func setIfaceStatsFromMap(stats *container.NetworkInterface, values map[string]uint64) { 216 | stats.RxBytes = values["rx_bytes"] 217 | stats.RxErrors = values["rx_errors"] 218 | stats.RxPackets = values["rx_packets"] 219 | stats.RxDropped = values["rx_dropped"] 220 | stats.TxBytes = values["tx_bytes"] 221 | stats.TxErrors = values["tx_errors"] 222 | stats.TxPackets = values["tx_packets"] 223 | stats.TxDropped = values["tx_dropped"] 224 | } 225 | 226 | func isIgnoredDevice(ifName string) bool { 227 | ignoredDevicePrefixes := []string{"lo", "veth", "docker"} 228 | for _, prefix := range ignoredDevicePrefixes { 229 | if strings.HasPrefix(strings.ToLower(ifName), prefix) { 230 | return true 231 | } 232 | } 233 | return false 234 | } 235 | 236 | func scanInterfaceStats(netStatsFile string) ([]container.NetworkInterface, error) { 237 | file, err := os.Open(netStatsFile) 238 | if err != nil { 239 | return nil, fmt.Errorf("failure opening %s: %v", netStatsFile, err) 240 | } 241 | defer file.Close() 242 | 243 | scanner := bufio.NewScanner(file) 244 | 245 | // Discard header lines 246 | for i := 0; i < 2; i++ { 247 | if b := scanner.Scan(); !b { 248 | return nil, scanner.Err() 249 | } 250 | } 251 | 252 | stats := []container.NetworkInterface{} 253 | for scanner.Scan() { 254 | line := scanner.Text() 255 | line = strings.Replace(line, ":", "", -1) 256 | 257 | fields := strings.Fields(line) 258 | // If the format of the line is invalid then don't trust any of the stats 259 | // in this file. 260 | if len(fields) != numberOfFields { 261 | return nil, fmt.Errorf("invalid interface stats line: %v", line) 262 | } 263 | 264 | devName := fields[0] 265 | 266 | if isIgnoredDevice(devName) { 267 | continue 268 | } 269 | 270 | i := container.NetworkInterface{ 271 | Name: devName, 272 | } 273 | 274 | // take fields [1:5] for rx stats and [9:13] for tx stats 275 | rxStatsFields := fields[indexOfRxStats : indexOfRxStats+numberOfStatsType] 276 | txStatsFields := fields[indexOfTxStats : indexOfTxStats+numberOfStatsType] 277 | 278 | statFields := append(rxStatsFields, txStatsFields...) 279 | statPointers := []*uint64{ 280 | &i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped, 281 | &i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped, 282 | } 283 | 284 | err := setInterfaceStatValues(statFields, statPointers) 285 | if err != nil { 286 | return nil, fmt.Errorf("cannot parse interface stats (%v): %v", err, line) 287 | } 288 | 289 | stats = append(stats, i) 290 | } 291 | 292 | return stats, nil 293 | } 294 | 295 | func setInterfaceStatValues(fields []string, pointers []*uint64) error { 296 | for i, v := range fields { 297 | val, err := strconv.ParseUint(v, 10, 64) 298 | if err != nil { 299 | return err 300 | } 301 | *pointers[i] = val 302 | } 303 | return nil 304 | } 305 | 306 | func readUintFromFile(path string, bits int) (uint64, error) { 307 | valb, err := ioutil.ReadFile(path) 308 | if err != nil { 309 | return 0, err 310 | } 311 | 312 | return strconv.ParseUint(strings.TrimSpace(string(valb)), 10, bits) 313 | } 314 | -------------------------------------------------------------------------------- /container/network/network_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2016 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package network 23 | 24 | import ( 25 | "fmt" 26 | "os" 27 | "path/filepath" 28 | "strconv" 29 | "testing" 30 | 31 | . "github.com/smartystreets/goconvey/convey" 32 | 33 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 34 | ) 35 | 36 | const ( 37 | mockNetworkInterfacesDir = "/tmp/mock_sys_class_net" 38 | mockProcfsDir = "/tmp/mock_proc" 39 | ) 40 | 41 | func TestGetListOfNetworkMetrics(t *testing.T) { 42 | 43 | Convey("List of available network metrics", t, func() { 44 | networkMetrics := getListOfNetworkMetrics() 45 | So(len(networkMetrics), ShouldBeGreaterThan, 0) 46 | 47 | Convey("confirm availability of TX metrics", func() { 48 | So(networkMetrics, ShouldContain, "tx_bytes") 49 | So(networkMetrics, ShouldContain, "tx_packets") 50 | So(networkMetrics, ShouldContain, "tx_dropped") 51 | So(networkMetrics, ShouldContain, "tx_errors") 52 | }) 53 | 54 | Convey("confirm availability of RX metrics", func() { 55 | So(networkMetrics, ShouldContain, "rx_bytes") 56 | So(networkMetrics, ShouldContain, "rx_packets") 57 | So(networkMetrics, ShouldContain, "rx_dropped") 58 | So(networkMetrics, ShouldContain, "rx_errors") 59 | }) 60 | 61 | }) 62 | } 63 | 64 | func TestIgnoringDevice(t *testing.T) { 65 | 66 | Convey("Validate devices expected as be ignored", t, func() { 67 | // device with name started with `lo`, `veth` or `docker` should be ignored 68 | So(isIgnoredDevice("lo"), ShouldBeTrue) 69 | So(isIgnoredDevice("docker0"), ShouldBeTrue) 70 | So(isIgnoredDevice("veth0123456"), ShouldBeTrue) 71 | }) 72 | 73 | Convey("Validate devices expected as NOT be ignored", t, func() { 74 | So(isIgnoredDevice("eth0"), ShouldBeFalse) 75 | So(isIgnoredDevice("eno1"), ShouldBeFalse) 76 | So(isIgnoredDevice("enp2s0"), ShouldBeFalse) 77 | }) 78 | } 79 | 80 | func TestListRootNetworkDevices(t *testing.T) { 81 | 82 | Convey("List root network devices", t, func() { 83 | 84 | So(func() { listRootNetworkDevices() }, ShouldNotPanic) 85 | 86 | Convey("return an error when network interface directory is invalid", func() { 87 | // set invalid networkInterfaceDir (the path does not exist) 88 | networkInterfacesDir = "/tmp/invalid/path/to/network/devices" 89 | devs, err := listRootNetworkDevices() 90 | So(devs, ShouldBeNil) 91 | So(err, ShouldNotBeNil) 92 | }) 93 | 94 | Convey("return an empty list when there is no device entry", func() { 95 | networkInterfacesDir = mockNetworkInterfacesDir 96 | createMockDeviceEntries([]string{}) 97 | defer deleteMockFiles() 98 | devs, err := listRootNetworkDevices() 99 | So(err, ShouldBeNil) 100 | So(devs, ShouldBeEmpty) 101 | }) 102 | 103 | Convey("return not empty list when devices entries are available", func() { 104 | networkInterfacesDir = mockNetworkInterfacesDir 105 | mockNetworkDevices := []string{"lo", "docker0", "veth0", "eno1", "eth0", "enp2s0"} 106 | createMockDeviceEntries(mockNetworkDevices) 107 | defer deleteMockFiles() 108 | devs, err := listRootNetworkDevices() 109 | So(err, ShouldBeNil) 110 | So(devs, ShouldNotBeEmpty) 111 | So(len(devs), ShouldEqual, len(mockNetworkDevices)) 112 | }) 113 | 114 | }) 115 | } 116 | 117 | func TestTotalNetworkStats(t *testing.T) { 118 | 119 | Convey("Append `total` to network stats as statistics in total", t, func() { 120 | 121 | Convey("when there is no network interface", func() { 122 | ifaceStatsInTotal := totalNetworkStats([]container.NetworkInterface{}) 123 | 124 | Convey("total stats should be appended", func() { 125 | So(ifaceStatsInTotal, ShouldNotBeNil) 126 | So(len(ifaceStatsInTotal), ShouldEqual, 1) 127 | So(ifaceStatsInTotal[0].Name, ShouldEqual, "total") 128 | 129 | Convey("values of total stats are expected to equal zero", func() { 130 | So(ifaceStatsInTotal[0].RxBytes, ShouldBeZeroValue) 131 | So(ifaceStatsInTotal[0].RxPackets, ShouldBeZeroValue) 132 | So(ifaceStatsInTotal[0].RxErrors, ShouldBeZeroValue) 133 | So(ifaceStatsInTotal[0].RxDropped, ShouldBeZeroValue) 134 | 135 | So(ifaceStatsInTotal[0].TxBytes, ShouldBeZeroValue) 136 | So(ifaceStatsInTotal[0].TxPackets, ShouldBeZeroValue) 137 | So(ifaceStatsInTotal[0].TxErrors, ShouldBeZeroValue) 138 | So(ifaceStatsInTotal[0].TxDropped, ShouldBeZeroValue) 139 | }) 140 | }) 141 | 142 | }) 143 | 144 | Convey("calulate total stats based on network interface stats", func() { 145 | // mock network stats per interface 146 | mockIfaceStats := []container.NetworkInterface{ 147 | container.NetworkInterface{ 148 | Name: "mockNetInterface1", 149 | RxBytes: 1, 150 | RxPackets: 1, 151 | RxErrors: 1, 152 | RxDropped: 1, 153 | TxBytes: 1, 154 | TxPackets: 1, 155 | TxErrors: 1, 156 | TxDropped: 1, 157 | }, 158 | 159 | container.NetworkInterface{ 160 | Name: "mockNetInterface2", 161 | RxBytes: 1, 162 | RxPackets: 1, 163 | RxErrors: 1, 164 | RxDropped: 1, 165 | TxBytes: 1, 166 | TxPackets: 1, 167 | TxErrors: 1, 168 | TxDropped: 1, 169 | }, 170 | } 171 | 172 | ifaceStatsInTotal := totalNetworkStats(mockIfaceStats) 173 | 174 | Convey("total stats should be appended", func() { 175 | So(ifaceStatsInTotal, ShouldNotBeNil) 176 | So(len(ifaceStatsInTotal), ShouldEqual, len(mockIfaceStats)+1) 177 | 178 | for _, ifaceStats := range ifaceStatsInTotal { 179 | if ifaceStats.Name == "total" { 180 | Convey("validate values of total stats", func() { 181 | // there are two mockNetInterfaces with values equal `1` for each metric, 182 | // so stats in total should equal `2` 183 | So(ifaceStats.RxBytes, ShouldEqual, 2) 184 | So(ifaceStats.RxPackets, ShouldEqual, 2) 185 | So(ifaceStats.RxErrors, ShouldEqual, 2) 186 | So(ifaceStats.RxDropped, ShouldEqual, 2) 187 | 188 | So(ifaceStats.TxBytes, ShouldEqual, 2) 189 | So(ifaceStats.TxPackets, ShouldEqual, 2) 190 | So(ifaceStats.TxErrors, ShouldEqual, 2) 191 | So(ifaceStats.TxDropped, ShouldEqual, 2) 192 | }) 193 | continue 194 | } 195 | } 196 | 197 | }) 198 | 199 | }) 200 | 201 | }) 202 | 203 | } 204 | 205 | func TestInterfaceStatsFromDir(t *testing.T) { 206 | defer deleteMockFiles() 207 | networkInterfacesDir = mockNetworkInterfacesDir 208 | mockNetworkDevices := []string{"eno1", "eth0", "enp2s0"} 209 | mockStatsContent := []byte(`1234`) 210 | 211 | Convey("Get interface stats from networkInterfacesDir", t, func() { 212 | 213 | Convey("create statistics for mock devices", func() { 214 | err := createMockDeviceStatistics(mockNetworkDevices, mockStatsContent) 215 | So(err, ShouldBeNil) 216 | }) 217 | 218 | Convey("successful retrieving statistics for available devices", func() { 219 | for _, device := range mockNetworkDevices { 220 | stats, err := interfaceStatsFromDir(device) 221 | So(err, ShouldBeNil) 222 | So(stats, ShouldNotBeNil) 223 | So(stats.RxBytes, ShouldEqual, 1234) 224 | So(stats.TxBytes, ShouldEqual, 1234) 225 | } 226 | }) 227 | 228 | Convey("return an error when requested device is not available", func() { 229 | stats, err := interfaceStatsFromDir("invalid_device") 230 | So(err, ShouldNotBeNil) 231 | So(stats, ShouldBeNil) 232 | }) 233 | 234 | }) 235 | } 236 | 237 | func TestNetworkStatsFromRoot(t *testing.T) { 238 | defer deleteMockFiles() 239 | 240 | networkInterfacesDir = mockNetworkInterfacesDir 241 | mockNetworkDevices := []string{"eno1", "eth0", "enp2s0"} 242 | mockNetworkDevicesIgnored := []string{"lo", "docker0", "veth0"} 243 | mockStatsContent := []byte(`1234`) 244 | 245 | Convey("Get network stats from root", t, func() { 246 | 247 | Convey("create statistics for mock devices", func() { 248 | err := createMockDeviceStatistics(append(mockNetworkDevices, mockNetworkDevicesIgnored...), mockStatsContent) 249 | So(err, ShouldBeNil) 250 | }) 251 | 252 | Convey("successful retrieving statistics for available devices", func() { 253 | stats, err := NetworkStatsFromRoot() 254 | So(err, ShouldBeNil) 255 | So(stats, ShouldNotBeEmpty) 256 | // 4 stats should be returned: for `eno1`, `eth0`, `enp2s0` and `total` 257 | So(len(stats), ShouldEqual, len(mockNetworkDevices)+1) 258 | }) 259 | 260 | Convey("return an error when there is no available device", func() { 261 | deleteMockFiles() 262 | stats, err := NetworkStatsFromRoot() 263 | So(err, ShouldNotBeNil) 264 | So(stats, ShouldBeEmpty) 265 | }) 266 | 267 | Convey("return an error when statistics file is not available in device entry path", func() { 268 | deleteMockFiles() 269 | createMockDeviceEntries(mockNetworkDevices) 270 | stats, err := NetworkStatsFromRoot() 271 | So(err, ShouldNotBeNil) 272 | So(stats, ShouldBeEmpty) 273 | }) 274 | 275 | }) 276 | } 277 | 278 | func TestNetworkStatsFromProc(t *testing.T) { 279 | defer deleteMockFiles() 280 | 281 | // docker container's process ID points to its network stats in /proc/{pid}/net/dev 282 | mockPids := []int{1234, 5678, 91011} 283 | mockDevContent := []byte(`Inter-| Receive | Transmit 284 | face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed 285 | eth0: 424999 4499 0 0 0 0 0 0 648 8 0 0 0 0 0 0 286 | lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0`) 287 | mockDevContentLoopback := []byte(`Inter-| Receive | Transmit 288 | face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed 289 | lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0`) 290 | 291 | Convey("Get network stats from root", t, func() { 292 | 293 | Convey("create statistics for mock devices", func() { 294 | err := createMockProcfsNetDev(mockPids, mockDevContent) 295 | So(err, ShouldBeNil) 296 | }) 297 | 298 | Convey("successful retrieving statistics for available devices", func() { 299 | for _, pid := range mockPids { 300 | path := filepath.Join(mockProcfsDir, strconv.Itoa(pid)) 301 | stats, err := NetworkStatsFromProc(path) 302 | So(err, ShouldBeNil) 303 | So(stats, ShouldNotBeEmpty) 304 | // stats should be returned: for `eth0` and `total`; `lo` should be ignored 305 | So(len(stats), ShouldEqual, 2) 306 | } 307 | 308 | }) 309 | 310 | Convey("return an error when the given PID does not exist", func() { 311 | path := filepath.Join(mockProcfsDir, strconv.Itoa(0)) 312 | stats, err := NetworkStatsFromProc(path) 313 | So(err, ShouldNotBeNil) 314 | So(stats, ShouldBeEmpty) 315 | }) 316 | 317 | Convey("return an error when no network interface found", func() { 318 | mockPid := 1 319 | Convey("create net/dev which contains only loopback", func() { 320 | err := createMockProcfsNetDev([]int{mockPid}, mockDevContentLoopback) 321 | So(err, ShouldBeNil) 322 | }) 323 | path := filepath.Join(mockProcfsDir, strconv.Itoa(mockPid)) 324 | stats, err := NetworkStatsFromProc(path) 325 | So(err, ShouldNotBeNil) 326 | So(stats, ShouldBeEmpty) 327 | So(err.Error(), ShouldEqual, "No network interface found") 328 | }) 329 | }) 330 | } 331 | 332 | // createMockDeviceStatistics creates for the given devices' names statistics file with given content 333 | // under the following path: /mockNetworkInterfacesDir/{device}/statistics 334 | func createMockDeviceStatistics(devices []string, content []byte) error { 335 | deleteMockFiles() 336 | for _, device := range devices { 337 | pathToDeviceStats := filepath.Join(mockNetworkInterfacesDir, device, "statistics") 338 | if err := os.MkdirAll(pathToDeviceStats, os.ModePerm); err != nil { 339 | return err 340 | } 341 | 342 | for _, statName := range networkMetrics { 343 | if err := createFile(pathToDeviceStats, statName, content); err != nil { 344 | return err 345 | } 346 | } 347 | } 348 | 349 | return nil 350 | } 351 | 352 | // createMockProcfsNetDev creates for the given process IDs net/dev statistics with given content 353 | // under the following path: /mockProcfsDir/{pid}/net/dev 354 | func createMockProcfsNetDev(pids []int, content []byte) error { 355 | deleteMockFiles() 356 | for _, pid := range pids { 357 | pathToProcessNetDev := filepath.Join(mockProcfsDir, fmt.Sprintf("%d", pid), "net") 358 | if err := os.MkdirAll(pathToProcessNetDev, os.ModePerm); err != nil { 359 | return err 360 | } 361 | 362 | if err := createFile(pathToProcessNetDev, "dev", content); err != nil { 363 | return err 364 | } 365 | } 366 | 367 | return nil 368 | } 369 | 370 | // createMockDeviceEntries creates folder named as device for the given devices 371 | // under the following path: /mockNetworkInterfacesDir/{device} 372 | func createMockDeviceEntries(devices []string) error { 373 | deleteMockFiles() 374 | 375 | if err := os.MkdirAll(mockNetworkInterfacesDir, os.ModePerm); err != nil { 376 | return err 377 | } 378 | 379 | for _, device := range devices { 380 | devEntry := filepath.Join(mockNetworkInterfacesDir, device) 381 | if err := os.Mkdir(devEntry, os.ModePerm); err != nil { 382 | return err 383 | } 384 | } 385 | 386 | return nil 387 | } 388 | 389 | // createMockProcfsNetTCP creates for the given process IDs net/tcp and net/tcp6 statistics with given content 390 | // under the following path: /mockProcfsDir/{pid}/net/tcp and /mockProcfsDir/{pid}/net/tcp6 391 | func createMockProcfsNetTCP(pids []int, content []byte) error { 392 | deleteMockFiles() 393 | for _, pid := range pids { 394 | pathToProcessNetDev := filepath.Join(mockProcfsDir, fmt.Sprintf("%d", pid), "net") 395 | if err := os.MkdirAll(pathToProcessNetDev, os.ModePerm); err != nil { 396 | return err 397 | } 398 | 399 | // create tcp file 400 | if err := createFile(pathToProcessNetDev, "tcp", content); err != nil { 401 | return err 402 | } 403 | 404 | // create tcp6 file 405 | if err := createFile(pathToProcessNetDev, "tcp6", content); err != nil { 406 | return err 407 | } 408 | } 409 | 410 | return nil 411 | } 412 | 413 | // deleteMockFiles removes mock files 414 | func deleteMockFiles() { 415 | os.RemoveAll(mockNetworkInterfacesDir) 416 | os.RemoveAll(mockProcfsDir) 417 | } 418 | 419 | // createFile creates file and writes to it a given content 420 | func createFile(path string, name string, content []byte) error { 421 | // create file in a given path 422 | f, err := os.Create(filepath.Join(path, name)) 423 | if err == nil { 424 | // when file was created successfully, write a content to it 425 | _, err = f.Write(content) 426 | } 427 | 428 | return err 429 | } 430 | -------------------------------------------------------------------------------- /container/network/tcp.go: -------------------------------------------------------------------------------- 1 | /* 2 | http://www.apache.org/licenses/LICENSE-2.0.txt 3 | 4 | 5 | Copyright 2016 Intel Corporation 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | 19 | This file incorporates work covered by the following copyright and permission notice: 20 | Copyright 2014 Google Inc. All Rights Reserved. 21 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 22 | this file except in compliance with the License. You may obtain a copy of the License at 23 | 24 | http://www.apache.org/licenses/LICENSE-2.0 25 | 26 | Unless required by applicable law or agreed to in writing, software 27 | distributed under the License is distributed on an "AS IS" BASIS, 28 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 29 | See the License for the specific language governing permissions and 30 | limitations under the License. 31 | */ 32 | 33 | // Package contains code from Google Cadvisor (https://github.com/google/cadvisor) with following: 34 | // - functions collecting network statistics 35 | 36 | // Package network provides network Statistics (included TCP and TCP6 stats) 37 | package network 38 | 39 | import ( 40 | "bufio" 41 | "fmt" 42 | "io/ioutil" 43 | "path/filepath" 44 | "strconv" 45 | "strings" 46 | 47 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 48 | 49 | log "github.com/sirupsen/logrus" 50 | ) 51 | 52 | type Tcp struct { 53 | StatsFile string 54 | } 55 | 56 | func (tcp *Tcp) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 57 | pid, err := opts.GetIntValue("pid") 58 | if err != nil { 59 | return err 60 | } 61 | 62 | isHost, err := opts.GetBoolValue("is_host") 63 | if err != nil { 64 | return err 65 | } 66 | 67 | procfs, err := opts.GetStringValue("procfs") 68 | if err != nil { 69 | return err 70 | } 71 | 72 | if !isHost { 73 | path := filepath.Join(procfs, strconv.Itoa(pid), tcp.StatsFile) 74 | 75 | switch tcp.StatsFile { 76 | case "net/tcp": 77 | stats.Connection.Tcp, err = tcpStatsFromProc(path) 78 | case "net/tcp6": 79 | stats.Connection.Tcp6, err = tcpStatsFromProc(path) 80 | default: 81 | log.WithFields(log.Fields{ 82 | "module": "network", 83 | "block": "GetStats", 84 | }).Errorf("Unknown tcp stats file %s", tcp.StatsFile) 85 | return fmt.Errorf("Unknown tcp stats file %s", tcp.StatsFile) 86 | } 87 | 88 | if err != nil { 89 | // only log error message 90 | log.WithFields(log.Fields{ 91 | "module": "network", 92 | "block": "GetStats", 93 | }).Errorf("Unable to get network stats, pid %d, stats file %s: %s", pid, tcp.StatsFile, err) 94 | } 95 | 96 | } 97 | 98 | return nil 99 | } 100 | 101 | func tcpStatsFromProc(tcpStatsFile string) (container.TcpStat, error) { 102 | tcpStats, err := scanTcpStats(tcpStatsFile) 103 | if err != nil { 104 | return tcpStats, fmt.Errorf("Cannot obtain tcp stats: %v", err) 105 | } 106 | 107 | return tcpStats, nil 108 | } 109 | 110 | func scanTcpStats(tcpStatsFile string) (container.TcpStat, error) { 111 | var stats container.TcpStat 112 | 113 | data, err := ioutil.ReadFile(tcpStatsFile) 114 | if err != nil { 115 | return stats, fmt.Errorf("Cannot open %s: %v", tcpStatsFile, err) 116 | } 117 | 118 | tcpStateMap := map[string]uint64{ 119 | "01": 0, //ESTABLISHED 120 | "02": 0, //SYN_SENT 121 | "03": 0, //SYN_RECV 122 | "04": 0, //FIN_WAIT1 123 | "05": 0, //FIN_WAIT2 124 | "06": 0, //TIME_WAIT 125 | "07": 0, //CLOSE 126 | "08": 0, //CLOSE_WAIT 127 | "09": 0, //LAST_ACK 128 | "0A": 0, //LISTEN 129 | "0B": 0, //CLOSING 130 | } 131 | 132 | reader := strings.NewReader(string(data)) 133 | scanner := bufio.NewScanner(reader) 134 | 135 | scanner.Split(bufio.ScanLines) 136 | 137 | // Discard header line 138 | if b := scanner.Scan(); !b { 139 | return stats, scanner.Err() 140 | } 141 | 142 | for scanner.Scan() { 143 | line := scanner.Text() 144 | state := strings.Fields(line) 145 | 146 | if len(state) < 4 { 147 | return stats, fmt.Errorf("invalid format of TCP stats file %s: %v", tcpStatsFile, line) 148 | } 149 | 150 | // TCP state is the 4th field. 151 | // Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 152 | tcpState := state[3] 153 | _, ok := tcpStateMap[tcpState] 154 | if !ok { 155 | return stats, fmt.Errorf("invalid TCP stats line: %v", line) 156 | } 157 | tcpStateMap[tcpState]++ 158 | } 159 | 160 | stats = container.TcpStat{ 161 | Established: tcpStateMap["01"], 162 | SynSent: tcpStateMap["02"], 163 | SynRecv: tcpStateMap["03"], 164 | FinWait1: tcpStateMap["04"], 165 | FinWait2: tcpStateMap["05"], 166 | TimeWait: tcpStateMap["06"], 167 | Close: tcpStateMap["07"], 168 | CloseWait: tcpStateMap["08"], 169 | LastAck: tcpStateMap["09"], 170 | Listen: tcpStateMap["0A"], 171 | Closing: tcpStateMap["0B"], 172 | } 173 | 174 | return stats, nil 175 | } 176 | -------------------------------------------------------------------------------- /container/network/tcp_test.go: -------------------------------------------------------------------------------- 1 | // +build small 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2016 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package network 23 | 24 | import ( 25 | "path/filepath" 26 | "strconv" 27 | "testing" 28 | 29 | . "github.com/smartystreets/goconvey/convey" 30 | ) 31 | 32 | // docker container's process ID points to its tcp stats in /proc/{pid}/net/tcp 33 | var mockPids = []int{1234, 5678, 91011} 34 | 35 | //mockTcpContent := []byte(`ivnalidsl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode`) 36 | 37 | var mockTcpContent = []byte(`sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 38 | 0: 0100007F:F76E 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 2638360 1 ffff8805af412800 100 0 0 10 0 39 | 1: 00000000:1771 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 24271 1 ffff8807e0d9a800 100 0 0 10 0 40 | 2: 0101007F:0035 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 16914 1 ffff8807f79d0800 100 0 0 10 0 41 | 3: 00000000:0016 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 9574802 1 ffff8807df908800 100 0 0 10 0 42 | 4: 0100007F:0277 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 10216612 1 ffff8800a959c000 100 0 0 10 0 43 | 5: 0100007F:1B1E 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 2646436 1 ffff8807e0d9d000 100 0 0 10 0 44 | 6: 0100007F:B1FE 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 19907 1 ffff8807e0fc0000 100 0 0 10 0 45 | 7: 1E7E5B0A:8CE4 315D1332:01BB 01 00000000:00000000 00:00000000 00000000 0 0 5118203 1 ffff8806a1817800 20 0 0 10 -1`) 46 | 47 | func TestTcpStatsFromProc(t *testing.T) { 48 | defer deleteMockFiles() 49 | 50 | Convey("Get TCP/TCP6 stats from procfs", t, func() { 51 | 52 | Convey("create statistics for mock devices", func() { 53 | err := createMockProcfsNetTCP(mockPids, mockTcpContent) 54 | So(err, ShouldBeNil) 55 | }) 56 | 57 | Convey("successful retrieving TCP statistics", func() { 58 | for _, pid := range mockPids { 59 | path := filepath.Join(mockProcfsDir, strconv.Itoa(pid), "net/tcp") 60 | tcpStats, err := tcpStatsFromProc(path) 61 | So(err, ShouldBeNil) 62 | So(tcpStats, ShouldNotBeEmpty) 63 | So(tcpStats.Established, ShouldEqual, 1) 64 | So(tcpStats.Listen, ShouldEqual, 7) 65 | } 66 | 67 | }) 68 | 69 | Convey("successful retrieving TCP6 statistics", func() { 70 | for _, pid := range mockPids { 71 | path := filepath.Join(mockProcfsDir, strconv.Itoa(pid), "net/tcp6") 72 | tcpStats, err := tcpStatsFromProc(path) 73 | So(err, ShouldBeNil) 74 | So(tcpStats, ShouldNotBeEmpty) 75 | So(tcpStats.Established, ShouldEqual, 1) 76 | So(tcpStats.Listen, ShouldEqual, 7) 77 | } 78 | 79 | }) 80 | 81 | Convey("return an error when the given PID does not exist", func() { 82 | path := filepath.Join(mockProcfsDir, strconv.Itoa(0), "net/tcp") 83 | tcpStats, err := tcpStatsFromProc(path) 84 | So(err, ShouldNotBeNil) 85 | So(tcpStats, ShouldBeZeroValue) 86 | }) 87 | 88 | Convey("return an error when content is invalid", func() { 89 | mockPid := 1 90 | err := createMockProcfsNetTCP([]int{mockPid}, []byte(`invalid`)) 91 | So(err, ShouldBeNil) 92 | path := filepath.Join(mockProcfsDir, strconv.Itoa(mockPid), "net/tcp") 93 | tcpStats, err := tcpStatsFromProc(path) 94 | //So(err, ShouldNotBeNil) 95 | So(tcpStats, ShouldBeZeroValue) 96 | }) 97 | 98 | Convey("return an error when content is empty", func() { 99 | mockPid := 2 100 | err := createMockProcfsNetTCP([]int{mockPid}, []byte(``)) 101 | So(err, ShouldBeNil) 102 | path := filepath.Join(mockProcfsDir, strconv.Itoa(mockPid), "net/tcp") 103 | tcpStats, err := tcpStatsFromProc(path) 104 | So(tcpStats, ShouldBeZeroValue) 105 | }) 106 | 107 | }) 108 | } 109 | -------------------------------------------------------------------------------- /container/statistics.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | 21 | This file incorporates work covered by the following copyright and permission notice: 22 | Copyright 2014 Docker, Inc. 23 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 24 | this file except in compliance with the License. You may obtain a copy of the License at 25 | 26 | http://www.apache.org/licenses/LICENSE-2.0 27 | 28 | Unless required by applicable law or agreed to in writing, software 29 | distributed under the License is distributed on an "AS IS" BASIS, 30 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 31 | See the License for the specific language governing permissions and 32 | limitations under the License. 33 | */ 34 | 35 | // Package contains code from OCI/opencontainers (https://github.com/opencontainers/runc) with following: 36 | // - structure Statistics and its compositions 37 | 38 | package container 39 | 40 | import "fmt" 41 | 42 | type GetStatOpt map[string]interface{} 43 | 44 | func (opt GetStatOpt) GetStringValue(key string) (string, error) { 45 | val, exists := opt[key] 46 | if exists { 47 | if res, ok := val.(string); ok { 48 | return res, nil 49 | } 50 | return "", fmt.Errorf("value %v seems not be of string type", val) 51 | 52 | } 53 | 54 | return "", fmt.Errorf("could not find value for key %s", key) 55 | } 56 | 57 | func (opt GetStatOpt) GetIntValue(key string) (int, error) { 58 | val, exists := opt[key] 59 | if exists { 60 | if res, ok := val.(int); ok { 61 | return res, nil 62 | } 63 | return 0, fmt.Errorf("value %v seems not be of integer type", val) 64 | } 65 | 66 | return 0, fmt.Errorf("could not find value for key %s", key) 67 | } 68 | 69 | func (opt GetStatOpt) GetBoolValue(key string) (bool, error) { 70 | val, exists := opt[key] 71 | if exists { 72 | return val.(bool), nil 73 | } 74 | 75 | return false, fmt.Errorf("could not find value for key %s", key) 76 | } 77 | 78 | type StatGetter interface { 79 | GetStats(*Statistics, GetStatOpt) error 80 | } 81 | 82 | type ContainerData struct { 83 | ID string `json:"-"` 84 | // Basic info about the container (status, creation time, image name, etc.) 85 | Specification Specification `json:"spec,omitempty"` 86 | 87 | // Container's statistics (cpu usage, memory usage, network stats, etc.) 88 | Stats *Statistics `json:"stats,omitempty"` 89 | } 90 | 91 | type Statistics struct { 92 | Cgroups *Cgroups `json:"cgroups,omitempty"` 93 | Network []NetworkInterface `json:"network,omitempty"` 94 | Connection TcpInterface `json:"connection,omitempty"` 95 | Filesystem map[string]FilesystemInterface `json:"filesystem,omitempty"` 96 | } 97 | 98 | // Specification holds docker container specification 99 | type Specification struct { 100 | Status string `json:"status,omitempty"` 101 | Created string `json:"creation_time,omitempty"` 102 | Image string `json:"image_name,omitempty"` 103 | SizeRw int64 `json:"size_rw,omitempty"` 104 | SizeRootFs int64 `json:"size_root_fs,omitempty"` 105 | Labels map[string]string `json:"labels,omitempty"` 106 | } 107 | 108 | type Cgroups struct { 109 | CpuStats CpuStats `json:"cpu_stats,omitempty"` 110 | MemoryStats MemoryStats `json:"memory_stats,omitempty"` 111 | BlkioStats BlkioStats `json:"blkio_stats, omitempty"` 112 | HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"` 113 | PidsStats PidsStats `json:"pids_stats,omitempty"` 114 | CpuSetStats CpuSetStats `json:"cpuset_stats,omitempty"` 115 | } 116 | 117 | type CpuStats struct { 118 | CpuUsage CpuUsage `json:"cpu_usage,omitempty"` 119 | ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` 120 | CpuShares uint64 `json:"cpu_shares,omitempty"` 121 | } 122 | 123 | type CpuUsage struct { 124 | Total uint64 `json:"total,omitempty"` 125 | UserMode uint64 `json:"user_mode,omitempty"` 126 | KernelMode uint64 `json:"kernel_mode,omitempty"` 127 | PerCpu []uint64 `json:"per_cpu,omitempty"` 128 | } 129 | 130 | type ThrottlingData struct { 131 | NrPeriods uint64 `json:"nr_periods,omitempty"` 132 | NrThrottled uint64 `json:"nr_throttled,omitempty"` 133 | ThrottledTime uint64 `json:"throttled_time,omitempty"` 134 | } 135 | 136 | type MemoryStats struct { 137 | Cache uint64 `json:"cache,omitempty"` 138 | Usage MemoryData `json:"usage,omitempty"` 139 | SwapUsage MemoryData `json:"swap_usage,omitempty"` 140 | KernelUsage MemoryData `json:"kernel_usage,omitempty"` 141 | Stats map[string]uint64 `json:"statistics,omitempty"` 142 | } 143 | 144 | type MemoryData struct { 145 | Usage uint64 `json:"usage,omitempty"` 146 | MaxUsage uint64 `json:"max_usage,omitempty"` 147 | Failcnt uint64 `json:"failcnt,omitempty"` 148 | } 149 | 150 | type BlkioStats struct { 151 | // number of bytes tranferred to and from the block device 152 | IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` 153 | IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` 154 | IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` 155 | IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` 156 | IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` 157 | IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` 158 | IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` 159 | SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` 160 | } 161 | 162 | type BlkioStatEntry struct { 163 | Major uint64 `json:"major,omitempty"` 164 | Minor uint64 `json:"minor,omitempty"` 165 | Op string `json:"op,omitempty"` 166 | Value uint64 `json:"value,omitempty"` 167 | } 168 | 169 | type HugetlbStats struct { 170 | // current res_counter usage for hugetlb 171 | Usage uint64 `json:"usage,omitempty"` 172 | // maximum usage ever recorded. 173 | MaxUsage uint64 `json:"max_usage,omitempty"` 174 | // number of times htgetlb usage allocation failure. 175 | Failcnt uint64 `json:"failcnt,omitempty"` 176 | } 177 | 178 | type PidsStats struct { 179 | Current uint64 `json:"current,omitempty"` 180 | Limit uint64 `json:"limit,omitempty"` 181 | } 182 | 183 | // CpuSet stores information regarding subsystem assignment of individual CPUs and memory nodes 184 | type CpuSetStats struct { 185 | Cpus string `json:"cpus,omitempty"` 186 | Mems string `json:"mems,omitempty"` 187 | MemoryMigrate uint64 `json:"memory_migrate,omitempty"` 188 | CpuExclusive uint64 `json:"cpu_exclusive,omitempty"` 189 | MemoryExclusive uint64 `json:"memory_exclusive,omitempty"` 190 | } 191 | 192 | // NetworkInterface holds name of network interface and its statistics (rx_bytes, tx_bytes, etc.) 193 | type NetworkInterface struct { 194 | // Name is the name of the network interface. 195 | Name string `json:"-"` 196 | 197 | RxBytes uint64 `json:"rx_bytes,omitempty"` 198 | RxPackets uint64 `json:"rx_packets,omitempty"` 199 | RxErrors uint64 `json:"rx_errors,omitempty"` 200 | RxDropped uint64 `json:"rx_dropped,omitempty"` 201 | TxBytes uint64 `json:"tx_bytes,omitempty"` 202 | TxPackets uint64 `json:"tx_packets,omitempty"` 203 | TxErrors uint64 `json:"tx_errors,omitempty"` 204 | TxDropped uint64 `json:"tx_dropped,omitempty"` 205 | } 206 | 207 | // FilesystemInterface holds statistics about filesystem device, capacity, usage, etc. 208 | type FilesystemInterface struct { 209 | // The block device name associated with the filesystem 210 | Device string `json:"device_name,omitempty"` 211 | 212 | // Type of the filesystem 213 | Type string `json:"type,omitempty"` 214 | 215 | // Number of bytes that can be consumed on this filesystem 216 | Limit uint64 `json:"capacity,omitempty"` 217 | 218 | // Number of bytes that is consumed on this filesystem 219 | Usage uint64 `json:"usage,omitempty"` 220 | 221 | // Base Usage that is consumed by the container's writable layer 222 | BaseUsage uint64 `json:"base_usage,omitempty"` 223 | 224 | // Number of bytes available for non-root user 225 | Available uint64 `json:"available,omitempty"` 226 | 227 | // Number of available Inodes 228 | InodesFree uint64 `json:"inodes_free,omitempty"` 229 | 230 | // This is the total number of reads completed successfully 231 | ReadsCompleted uint64 `json:"reads_completed,omitempty"` 232 | 233 | // This is the total number of reads merged successfully. This field lets you know how often this was done 234 | ReadsMerged uint64 `json:"reads_merged,omitempty"` 235 | 236 | // This is the total number of sectors read successfully 237 | SectorsRead uint64 `json:"sectors_read,omitempty"` 238 | 239 | // This is the total number of milliseconds spent reading 240 | ReadTime uint64 `json:"read_time,omitempty"` 241 | 242 | // This is the total number of writes completed successfully 243 | WritesCompleted uint64 `json:"writes_completed,omitempty"` 244 | 245 | // This is the total number of writes merged successfully 246 | WritesMerged uint64 `json:"writes_merged,omitempty"` 247 | 248 | // This is the total number of sectors written successfully 249 | SectorsWritten uint64 `json:"sectors_written,omitempty"` 250 | 251 | // This is the total number of milliseconds spent writing 252 | WriteTime uint64 `json:"write_time,omitempty"` 253 | 254 | // Number of I/Os currently in progress 255 | IoInProgress uint64 `json:"io_in_progress,omitempty"` 256 | 257 | // Number of milliseconds spent doing I/Os 258 | IoTime uint64 `json:"io_time,omitempty"` 259 | 260 | // weighted number of milliseconds spent doing I/Os 261 | // This field is incremented at each I/O start, I/O completion, I/O 262 | // merge, or read of these stats by the number of I/Os in progress 263 | // (field 9) times the number of milliseconds spent doing I/O since the 264 | // last update of this field. This can provide an easy measure of both 265 | // I/O completion time and the backlog that may be accumulating. 266 | WeightedIoTime uint64 `json:"weighted_io_time,omitempty"` 267 | } 268 | 269 | // TcpInterface holds tcp and tcp6 statistics 270 | type TcpInterface struct { 271 | Tcp TcpStat `json:"tcp,omitempty"` // TCP connection stats (Established, Listen, etc.) 272 | Tcp6 TcpStat `json:"tcp6,omitempty"` // TCP6 connection stats (Established, Listen, etc.) 273 | } 274 | 275 | // TcpStat holds statistics about count of connections in different states 276 | type TcpStat struct { 277 | //Count of TCP connections in state "Established" 278 | Established uint64 `json:"established,omitempty"` 279 | //Count of TCP connections in state "Syn_Sent" 280 | SynSent uint64 `json:"syn_sent,omitempty"` 281 | //Count of TCP connections in state "Syn_Recv" 282 | SynRecv uint64 `json:"syn_recv,omitempty"` 283 | //Count of TCP connections in state "Fin_Wait1" 284 | FinWait1 uint64 `json:"fin_wait1,omitempty"` 285 | //Count of TCP connections in state "Fin_Wait2" 286 | FinWait2 uint64 `json:"fin_wait2,omitempty"` 287 | //Count of TCP connections in state "Time_Wait 288 | TimeWait uint64 `json:"time_wait,omitempty"` 289 | //Count of TCP connections in state "Close" 290 | Close uint64 `json:"close,omitempty"` 291 | //Count of TCP connections in state "Close_Wait" 292 | CloseWait uint64 `json:"close_wait,omitempty"` 293 | //Count of TCP connections in state "Listen_Ack" 294 | LastAck uint64 `json:"last_ack,omitempty"` 295 | //Count of TCP connections in state "Listen" 296 | Listen uint64 `json:"listen,omitempty"` 297 | //Count of TCP connections in state "Closing" 298 | Closing uint64 `json:"closing,omitempty"` 299 | } 300 | 301 | // NewStatistics returns pointer to initialized Statistics 302 | func NewStatistics() *Statistics { 303 | return &Statistics{ 304 | Network: []NetworkInterface{}, 305 | Cgroups: newCgroupsStats(), 306 | Connection: TcpInterface{ 307 | Tcp: TcpStat{}, 308 | Tcp6: TcpStat{}, 309 | }, 310 | Filesystem: map[string]FilesystemInterface{}, 311 | } 312 | } 313 | 314 | func newCgroupsStats() *Cgroups { 315 | cgroups := Cgroups{ 316 | MemoryStats: MemoryStats{Stats: make(map[string]uint64)}, 317 | HugetlbStats: make(map[string]HugetlbStats), 318 | } 319 | for _, memstatName := range listOfMemoryStats { 320 | cgroups.MemoryStats.Stats[memstatName] = 0 321 | } 322 | return &cgroups 323 | } 324 | 325 | var listOfMemoryStats = []string{ 326 | "active_anon", "active_file", "inactive_anon", "inactive_file", "cache", "dirty", "swap", 327 | "hierarchical_memory_limit", "hierarchical_memsw_limit", "mapped_file", "pgfault", "pgmajfault", "pgpgin", 328 | "pgpgout", "rss", "rss_huge", "total_active_anon", "total_active_file", "total_cache", "total_dirty", 329 | "total_inactive_anon", "total_inactive_file", "total_mapped_file", "total_pgfault", "total_pgmajfault", 330 | "total_pgpgin", "total_pgpgout", "total_rss", "total_rss_huge", "total_swap", "total_unevictable", 331 | "total_writeback", "unevictable", "working_set", "writeback", 332 | } 333 | -------------------------------------------------------------------------------- /examples/.setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | set -o pipefail 6 | 7 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | 9 | # check for dependencies 10 | EXIT_ON_ERROR=0 11 | command -v docker >/dev/null 2>&1 || { echo >&2 "Error: docker needs to be installed."; EXIT_ON_ERROR=1; } 12 | command -v docker-compose >/dev/null 2>&1 || { echo >&2 "Error: docker-compose needs to be installed."; EXIT_ON_ERROR=1; } 13 | docker version >/dev/null 2>&1 || { echo >&2 "Error: docker needs to be configured."; EXIT_ON_ERROR=1; } 14 | if [[ $EXIT_ON_ERROR > 0 ]]; then 15 | exit 1 16 | fi 17 | 18 | # docker compose the Snap container 19 | (cd $__dir && docker-compose up -d) 20 | 21 | # clean up containers on exit 22 | function finish { 23 | (cd $__dir && docker-compose down) 24 | } 25 | trap finish EXIT INT TERM 26 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Example tasks 2 | 3 | [This](tasks/docker-file.json) example task will publish metrics to **file** 4 | from the mock plugin. 5 | 6 | ## Running the example 7 | 8 | ### Requirements 9 | * `docker` and `docker-compose` are **installed** and **configured** 10 | 11 | ### Collecting from docker in docker 12 | Run the script `./run-docker-file.sh`. 13 | 14 | ### Collecting from your docker 15 | Run the script `./run-dockerception.sh`. 16 | 17 | ## Files 18 | 19 | - [run-docker-file.sh](run-docker-file.sh) 20 | - This script launchs docker in docker 21 | - [run-dockerception.sh](run-dockerception.sh) 22 | - This script runs the example inside the Snap container 23 | - [tasks/docker-file.json](tasks/docker-file.json) 24 | - Snap task definition 25 | - [docker-compose.yml](docker-compose.yml) 26 | - A docker compose file which defines the "docker" container. 27 | - [docker-file.sh](docker-file.sh) 28 | - Downloads `snapteld`, `snaptel`, `snap-plugin-publisher-file`, 29 | `snap-plugin-collector-docker` and starts the task 30 | [tasks/docker-file.json](tasks/docker-file.json). 31 | - [.setup.sh](.setup.sh) 32 | - Verifies dependencies and starts the containers. It's called 33 | by [run-docker-file.sh](run-docker-file.sh). -------------------------------------------------------------------------------- /examples/configs/config.yaml: -------------------------------------------------------------------------------- 1 | control: 2 | plugins: 3 | collector: 4 | docker: 5 | endpoint: "unix:///var/run/docker.sock" 6 | -------------------------------------------------------------------------------- /examples/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | docker: 4 | container_name: docker 5 | image: docker:dind 6 | environment: 7 | - SNAP_VERSION=latest 8 | - PLUGIN_SRC=${PLUGIN_DEST} 9 | volumes: 10 | - ${PLUGIN_SRC}:${PLUGIN_DEST} 11 | - /etc/resolv.conf:/etc/resolv.conf 12 | privileged: true -------------------------------------------------------------------------------- /examples/docker-file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | set -o pipefail 6 | 7 | # get the directory the script exists in 8 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 9 | 10 | # source the common bash script 11 | . "${__dir}/../scripts/common.sh" 12 | 13 | # ensure PLUGIN_PATH is set 14 | TMPDIR=${TMPDIR:-"/tmp"} 15 | PLUGIN_PATH=${PLUGIN_PATH:-"${TMPDIR}/snap/plugins"} 16 | mkdir -p $PLUGIN_PATH 17 | 18 | init_snap 19 | snapteld -t ${SNAP_TRUST_LEVEL} -l ${SNAP_LOG_LEVEL} & 20 | 21 | _info "Get latest plugins" 22 | (cd $PLUGIN_PATH && curl -sfLSO http://snap.ci.snap-telemetry.io/plugins/snap-plugin-publisher-file/master/latest/linux/x86_64/snap-plugin-publisher-file && chmod 755 snap-plugin-publisher-file) 23 | (cd $PLUGIN_PATH && curl -sfLSO http://snap.ci.snap-telemetry.io/plugins/snap-plugin-collector-docker/latest_build/linux/x86_64/snap-plugin-collector-docker && chmod 755 snap-plugin-collector-docker) 24 | 25 | SNAP_FLAG=0 26 | 27 | # this block will wait check if snaptel and snapteld are loaded before the plugins are loaded and the task is started 28 | for i in `seq 1 5`; do 29 | if [[ -f /usr/local/bin/snaptel && -f /usr/local/sbin/snapteld ]]; 30 | then 31 | 32 | _info "loading plugins" 33 | snaptel plugin load "${PLUGIN_PATH}/snap-plugin-publisher-file" 34 | snaptel plugin load "${PLUGIN_PATH}/snap-plugin-collector-docker" 35 | 36 | _info "creating and starting a task" 37 | snaptel task create -t "${__dir}/tasks/docker-file.json" 38 | 39 | SNAP_FLAG=1 40 | 41 | break 42 | fi 43 | 44 | _info "snaptel and/or snapteld are unavailable, sleeping for 3 seconds" 45 | sleep 3 46 | done 47 | 48 | 49 | # check if snaptel/snapteld have loaded 50 | if [ $SNAP_FLAG -eq 0 ] 51 | then 52 | echo "Could not load snaptel or snapteld" 53 | exit 1 54 | fi 55 | -------------------------------------------------------------------------------- /examples/run-docker-file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | set -o pipefail 6 | 7 | # get the directory the script exists in 8 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 9 | __proj_dir="$(cd $__dir && cd ../ && pwd)" 10 | __proj_name="$(basename $__proj_dir)" 11 | 12 | export PLUGIN_SRC="${__proj_dir}" 13 | export PLUGIN_DEST="/${__proj_name}" 14 | 15 | # source the common bash script 16 | . "${__proj_dir}/scripts/common.sh" 17 | 18 | # verifies dependencies and starts bind 19 | . "${__proj_dir}/examples/.setup.sh" 20 | 21 | # dockerception.sh will create the Snap container and run the $RUN_SCRIPT 22 | cd "${__proj_dir}/examples" && docker-compose exec docker sh -c "${PLUGIN_DEST}/examples/run-dockerception.sh" -------------------------------------------------------------------------------- /examples/run-dockerception.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | set -u 5 | set -o pipefail 6 | 7 | # PROCFS_MOUNT is the path where /proc will be mounted on the Snap container 8 | PROCFS_MOUNT="${PROCFS_MOUNT:-"/proc_host"}" 9 | SNAP_VERSION="${SNAP_VERSION:-"latest"}" 10 | 11 | # define the default plugin folder locations 12 | PLUGIN_SRC="${PLUGIN_SRC:-"$(cd "$(dirname "$0")"/../ && pwd)"}" 13 | PLUGIN_DEST="${PLUGIN_DEST:-$PLUGIN_SRC}" 14 | 15 | # docker-file.sh will download plugins, starts snap, load plugins and start a task 16 | DEFAULT_SCRIPT="${PLUGIN_DEST}/examples/docker-file.sh && printf \"\n\nhint: type 'snaptel task list'\ntype 'exit' when your done\n\n\" && bash" 17 | RUN_SCRIPT="export SNAP_VERSION=${SNAP_VERSION} && export PROCFS_MOUNT=${PROCFS_MOUNT} && ${RUN_SCRIPT:-$DEFAULT_SCRIPT}" 18 | 19 | 20 | docker run -i --name dockerception -v /proc:${PROCFS_MOUNT} -v /var/lib/docker:/var/lib/docker -v /sys/fs/cgroup:/sys/fs/cgroup -v /var/run/docker.sock:/var/run/docker.sock -v ${PLUGIN_SRC}:${PLUGIN_DEST} intelsdi/snap:alpine_test bash -c "$RUN_SCRIPT" 21 | docker rm dockerception -------------------------------------------------------------------------------- /examples/tasks/docker-file.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "schedule": { 4 | "type": "simple", 5 | "interval": "5s" 6 | }, 7 | "workflow": { 8 | "collect": { 9 | "metrics": { 10 | "/intel/docker/*/spec/*": {}, 11 | "/intel/docker/*/stats/cgroups/cpu_stats/*": {}, 12 | "/intel/docker/*/stats/cgroups/memory_stats/*": {} 13 | }, 14 | "config": {}, 15 | "publish": [ 16 | { 17 | "plugin_name": "file", 18 | "config": { 19 | "file": "/tmp/snap-docker-file.log" 20 | } 21 | } 22 | ] 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /examples/tasks/docker-file.yaml: -------------------------------------------------------------------------------- 1 | max-failures: 10 2 | schedule: 3 | interval: 5s 4 | type: simple 5 | version: 1 6 | workflow: 7 | collect: 8 | config: 9 | /intel/docker: 10 | endpoint: tcp://192.168.0.114:2375 11 | metrics: 12 | /intel/docker/*/spec/*: {} 13 | /intel/docker/*/stats/cgroups/cpu_stats/*: {} 14 | /intel/docker/*/stats/cgroups/memory_stats/*: {} 15 | publish: 16 | - 17 | config: 18 | file: /tmp/snap-docker-file.log 19 | plugin_name: file 20 | -------------------------------------------------------------------------------- /glide.lock: -------------------------------------------------------------------------------- 1 | hash: e4767bb4ab4e95246fcf537fe41cb7d7954d6a944c17949349ed83bc3a6d971f 2 | updated: 2017-11-02T05:51:48.080910199+08:00 3 | imports: 4 | - name: github.com/Azure/go-ansiterm 5 | version: d6e3b3328b783f23731bc4d058875b0371ff8109 6 | subpackages: 7 | - winterm 8 | - name: github.com/containerd/continuity 9 | version: 1bed1ecb1dc42d8f4d2ac8c23e5cac64749e82c9 10 | subpackages: 11 | - pathdriver 12 | - name: github.com/davecgh/go-spew 13 | version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 14 | subpackages: 15 | - spew 16 | - name: github.com/docker/docker 17 | version: ba99c19b593bdb9e7b90793681fe89b0a91781ed 18 | subpackages: 19 | - api/types 20 | - api/types/blkiodev 21 | - api/types/container 22 | - api/types/filters 23 | - api/types/mount 24 | - api/types/network 25 | - api/types/registry 26 | - api/types/strslice 27 | - api/types/swarm 28 | - api/types/swarm/runtime 29 | - api/types/versions 30 | - opts 31 | - pkg/archive 32 | - pkg/fileutils 33 | - pkg/homedir 34 | - pkg/idtools 35 | - pkg/ioutils 36 | - pkg/jsonmessage 37 | - pkg/longpath 38 | - pkg/mount 39 | - pkg/pools 40 | - pkg/stdcopy 41 | - pkg/system 42 | - pkg/term 43 | - pkg/term/windows 44 | - name: github.com/docker/go-connections 45 | version: 3ede32e2033de7505e6500d6c868c2b9ed9f169d 46 | subpackages: 47 | - nat 48 | - name: github.com/docker/go-units 49 | version: 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 50 | - name: github.com/fsouza/go-dockerclient 51 | version: 412c004d923b7b89701e7a1632de83f843657a03 52 | - name: github.com/gogo/protobuf 53 | version: 117892bf1866fbaa2318c03e50e40564c8845457 54 | subpackages: 55 | - proto 56 | - name: github.com/golang/protobuf 57 | version: 748d386b5c1ea99658fd69fe9f03991ce86a90c1 58 | subpackages: 59 | - proto 60 | - ptypes 61 | - ptypes/any 62 | - ptypes/duration 63 | - ptypes/timestamp 64 | - name: github.com/intelsdi-x/snap-plugin-lib-go 65 | version: 69934c200c23811291535a804852ff2231bf85f0 66 | subpackages: 67 | - v1/plugin 68 | - v1/plugin/rpc 69 | - name: github.com/intelsdi-x/snap-plugin-utilities 70 | version: 53c6d26990688f3f277511f5501518167f4cb798 71 | subpackages: 72 | - ns 73 | - str 74 | - name: github.com/julienschmidt/httprouter 75 | version: 8c199fb6259ffc1af525cc3ad52ee60ba8359669 76 | - name: github.com/Microsoft/go-winio 77 | version: 78439966b38d69bf38227fbf57ac8a6fee70f69a 78 | - name: github.com/mistifyio/go-zfs 79 | version: cdc0f941c4d0e0e94d85348285568d921891e138 80 | - name: github.com/moby/moby 81 | version: 092cba3727bb9b4a2f0e922cd6c0f93ea270e363 82 | subpackages: 83 | - pkg/mount 84 | - name: github.com/Nvveen/Gotty 85 | version: cd527374f1e5bff4938207604a14f2e38a9cf512 86 | - name: github.com/oleiade/reflections 87 | version: 632977f98cd34d217c4b57d0840ec188b3d3dcaf 88 | - name: github.com/opencontainers/go-digest 89 | version: 279bed98673dd5bef374d3b6e4b09e2af76183bf 90 | - name: github.com/opencontainers/image-spec 91 | version: 7c889fafd04a893f5c5f50b7ab9963d5d64e5242 92 | subpackages: 93 | - specs-go 94 | - specs-go/v1 95 | - name: github.com/opencontainers/runc 96 | version: 8211e8a2668241108ea9a71d9855a0c1bef6bf63 97 | subpackages: 98 | - libcontainer/system 99 | - libcontainer/user 100 | - name: github.com/pkg/errors 101 | version: 645ef00459ed84a119197bfb8d8205042c6df63d 102 | - name: github.com/pmezard/go-difflib 103 | version: d8ed2627bdf02c080bf22230dbb337003b7aba2d 104 | subpackages: 105 | - difflib 106 | - name: github.com/sirupsen/logrus 107 | version: f006c2ac4710855cf0f916dd6b77acf6b048dc6e 108 | - name: github.com/stretchr/objx 109 | version: cbeaeb16a013161a98496fad62933b1d21786672 110 | - name: github.com/stretchr/testify 111 | version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 112 | subpackages: 113 | - assert 114 | - mock 115 | - require 116 | - suite 117 | - name: github.com/urfave/cli 118 | version: cfb38830724cc34fedffe9a2a29fb54fa9169cd1 119 | - name: golang.org/x/crypto 120 | version: aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 121 | subpackages: 122 | - ssh/terminal 123 | - name: golang.org/x/net 124 | version: f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f 125 | subpackages: 126 | - context 127 | - context/ctxhttp 128 | - http2 129 | - http2/hpack 130 | - idna 131 | - internal/timeseries 132 | - lex/httplex 133 | - trace 134 | - name: golang.org/x/sys 135 | version: c8bc69bc2db9c57ccf979550bc69655df5039a8a 136 | subpackages: 137 | - unix 138 | - windows 139 | - name: golang.org/x/text 140 | version: 6eab0e8f74e86c598ec3b6fad4888e0c11482d48 141 | subpackages: 142 | - secure/bidirule 143 | - transform 144 | - unicode/bidi 145 | - unicode/norm 146 | - name: google.golang.org/genproto 147 | version: f676e0f3ac6395ff1a529ae59a6670878a8371a6 148 | subpackages: 149 | - googleapis/rpc/status 150 | - name: google.golang.org/grpc 151 | version: 61d37c5d657a47e4404fd6823bd598341a2595de 152 | subpackages: 153 | - balancer 154 | - codes 155 | - connectivity 156 | - credentials 157 | - grpclb/grpc_lb_v1/messages 158 | - grpclog 159 | - internal 160 | - keepalive 161 | - metadata 162 | - naming 163 | - peer 164 | - resolver 165 | - stats 166 | - status 167 | - tap 168 | - transport 169 | testImports: 170 | - name: github.com/gopherjs/gopherjs 171 | version: 4b53e1bddba0e2f734514aeb6c02db652f4c6fe8 172 | subpackages: 173 | - js 174 | - name: github.com/jtolds/gls 175 | version: 8ddce2a84170772b95dd5d576c48d517b22cac63 176 | - name: github.com/smartystreets/assertions 177 | version: 4ea54c1f28ad3ae597e76607dea3871fa177e263 178 | subpackages: 179 | - internal/go-render/render 180 | - internal/oglematchers 181 | - name: github.com/smartystreets/goconvey 182 | version: 9e8dc3f972df6c8fcc0375ef492c24d0bb204857 183 | subpackages: 184 | - convey 185 | - convey/gotest 186 | - convey/reporting 187 | -------------------------------------------------------------------------------- /glide.yaml: -------------------------------------------------------------------------------- 1 | package: github.com/intelsdi-x/snap-plugin-collector-docker 2 | import: 3 | - package: github.com/sirupsen/logrus 4 | version: ^1.0.2 5 | - package: github.com/docker/go-units 6 | version: ^0.3.1 7 | - package: github.com/fsouza/go-dockerclient 8 | - package: github.com/intelsdi-x/snap-plugin-lib-go 9 | subpackages: 10 | - v1/plugin 11 | - package: github.com/intelsdi-x/snap-plugin-utilities 12 | subpackages: 13 | - ns 14 | - package: github.com/mistifyio/go-zfs 15 | version: ^2.1.1 16 | - package: github.com/moby/moby 17 | version: ^1.13.0-rc4 18 | subpackages: 19 | - pkg/mount 20 | - package: github.com/stretchr/testify 21 | version: ^1.1.4 22 | subpackages: 23 | - mock 24 | testImport: 25 | - package: github.com/smartystreets/goconvey 26 | version: ^1.6.2 27 | subpackages: 28 | - convey 29 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // +build linux 2 | 3 | /* 4 | http://www.apache.org/licenses/LICENSE-2.0.txt 5 | 6 | 7 | Copyright 2015 Intel Corporation 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | package main 23 | 24 | import ( 25 | "github.com/intelsdi-x/snap-plugin-lib-go/v1/plugin" 26 | 27 | "github.com/intelsdi-x/snap-plugin-collector-docker/collector" 28 | ) 29 | 30 | func main() { 31 | 32 | plugin.StartCollector(collector.New(), collector.PLUGIN_NAME, collector.PLUGIN_VERSION) 33 | } 34 | -------------------------------------------------------------------------------- /metadata.yml: -------------------------------------------------------------------------------- 1 | name: docker 2 | type: collector 3 | maintainer: core 4 | license: Apache-2.0 5 | description: "Collects Docker container runtime metrics." 6 | badge: 7 | - "[![Build Status](https://travis-ci.org/intelsdi-x/snap-plugin-collector-docker.svg?branch=master)](https://travis-ci.org/intelsdi-x/snap-plugin-collector-docker)" 8 | ci: 9 | - https://travis-ci.org/intelsdi-x/snap-plugin-collector-docker 10 | -------------------------------------------------------------------------------- /mocks/mocks.go: -------------------------------------------------------------------------------- 1 | /* 2 | http://www.apache.org/licenses/LICENSE-2.0.txt 3 | 4 | 5 | Copyright 2015 Intel Corporation 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | */ 19 | 20 | package mocks 21 | 22 | import ( 23 | "github.com/fsouza/go-dockerclient" 24 | "github.com/stretchr/testify/mock" 25 | 26 | "github.com/intelsdi-x/snap-plugin-collector-docker/container" 27 | ) 28 | 29 | type ClientMock struct { 30 | mock.Mock 31 | } 32 | 33 | func (cm *ClientMock) FindCgroupMountpoint(procfs string, subsystem string) (string, error) { 34 | ret := cm.Called(procfs, subsystem) 35 | 36 | return ret.String(0), ret.Error(1) 37 | } 38 | 39 | func (cm *ClientMock) FindControllerMountpoint(cgroupPath, pid, procfs string) (string, error) { 40 | ret := cm.Called(cgroupPath, pid, procfs) 41 | return ret.String(0), ret.Error(1) 42 | } 43 | 44 | func (cm *ClientMock) NewDockerClient() (*container.DockerClient, error) { 45 | args := cm.Called() 46 | 47 | var r0 *container.DockerClient 48 | if args.Get(0) != nil { 49 | r0 = args.Get(0).(*container.DockerClient) 50 | } 51 | return r0, args.Error(1) 52 | } 53 | 54 | func (cm *ClientMock) ListContainersAsMap() (map[string]*container.ContainerData, error) { 55 | args := cm.Called() 56 | 57 | var r0 map[string]*container.ContainerData 58 | if args.Get(0) != nil { 59 | r0 = args.Get(0).(map[string]*container.ContainerData) 60 | } 61 | return r0, args.Error(1) 62 | } 63 | 64 | func (cm *ClientMock) InspectContainer(string) (*docker.Container, error) { 65 | args := cm.Called() 66 | 67 | var r0 *docker.Container 68 | 69 | if args.Get(0) != nil { 70 | r0 = args.Get(0).(*docker.Container) 71 | } 72 | 73 | return r0, args.Error(1) 74 | } 75 | 76 | func (cm *ClientMock) GetDockerParams(params ...string) (map[string]string, error) { 77 | ret := cm.Called(params) 78 | return ret.Get(0).(map[string]string), ret.Error(1) 79 | } 80 | 81 | var MockGetters map[string]container.StatGetter = map[string]container.StatGetter{ 82 | "cpu_usage": &MockCpuAcct{}, 83 | "cache": &MockMemCache{}, 84 | "usage": &MockMemUsage{}, 85 | "statistics": &MockMemStats{}, 86 | "network": &MockNet{}, 87 | "tcp": &MockTcp{}, 88 | "tcp6": &MockTcp{}, 89 | } 90 | 91 | type MockCpuAcct struct{} 92 | 93 | func (m *MockCpuAcct) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 94 | stats.Cgroups.CpuStats.CpuUsage.PerCpu = []uint64{1111, 2222, 3333, 4444} 95 | return nil 96 | } 97 | 98 | type MockMemCache struct{} 99 | 100 | func (m *MockMemCache) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 101 | stats.Cgroups.MemoryStats.Cache = 1111 102 | return nil 103 | } 104 | 105 | type MockMemUsage struct{} 106 | 107 | func (m *MockMemUsage) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 108 | stats.Cgroups.MemoryStats.Usage.Failcnt = 1111 109 | stats.Cgroups.MemoryStats.Usage.Usage = 2222 110 | stats.Cgroups.MemoryStats.Usage.MaxUsage = 3333 111 | return nil 112 | } 113 | 114 | type MockMemStats struct{} 115 | 116 | func (m *MockMemStats) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 117 | stats.Cgroups.MemoryStats.Stats = map[string]uint64{"pgpgin": 11111} 118 | return nil 119 | } 120 | 121 | type MockNet struct{} 122 | 123 | func (m *MockNet) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 124 | stats.Network = []container.NetworkInterface{{Name: "eth0", TxBytes: 1111, RxBytes: 2222}} 125 | return nil 126 | } 127 | 128 | type MockTcp struct{} 129 | 130 | func (m *MockTcp) GetStats(stats *container.Statistics, opts container.GetStatOpt) error { 131 | stats.Connection.Tcp.Established = 1111 132 | return nil 133 | } 134 | -------------------------------------------------------------------------------- /scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # File managed by pluginsync 3 | 4 | # http://www.apache.org/licenses/LICENSE-2.0.txt 5 | # 6 | # 7 | # Copyright 2016 Intel Corporation 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | set -e 22 | set -u 23 | set -o pipefail 24 | 25 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 26 | __proj_dir="$(dirname "$__dir")" 27 | 28 | # shellcheck source=scripts/common.sh 29 | . "${__dir}/common.sh" 30 | 31 | plugin_name=${__proj_dir##*/} 32 | build_dir="${__proj_dir}/build" 33 | go_build=(go build -ldflags "-w") 34 | 35 | _info "project path: ${__proj_dir}" 36 | _info "plugin name: ${plugin_name}" 37 | 38 | export CGO_ENABLED=0 39 | 40 | # rebuild binaries: 41 | _debug "removing: ${build_dir:?}/*" 42 | rm -rf "${build_dir:?}/"* 43 | 44 | _info "building plugin: ${plugin_name}" 45 | export GOOS=linux 46 | export GOARCH=amd64 47 | mkdir -p "${build_dir}/${GOOS}/x86_64" 48 | "${go_build[@]}" -o "${build_dir}/${GOOS}/x86_64/${plugin_name}" . || exit 1 49 | -------------------------------------------------------------------------------- /scripts/common.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # File managed by pluginsync 3 | 4 | # http://www.apache.org/licenses/LICENSE-2.0.txt 5 | # 6 | # 7 | # Copyright 2016 Intel Corporation 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | set -e 22 | set -u 23 | set -o pipefail 24 | 25 | LOG_LEVEL="${LOG_LEVEL:-6}" 26 | NO_COLOR="${NO_COLOR:-}" 27 | NO_GO_TEST=${NO_GO_TEST:-'-not -path "./.*" -not -path "*/_*" -not -path "./Godeps/*" -not -path "./vendor/*"'} 28 | 29 | trap_exitcode() { 30 | exit $? 31 | } 32 | 33 | trap trap_exitcode SIGINT 34 | 35 | _fmt () { 36 | local color_debug="\x1b[35m" 37 | local color_info="\x1b[32m" 38 | local color_notice="\x1b[34m" 39 | local color_warning="\x1b[33m" 40 | local color_error="\x1b[31m" 41 | local colorvar=color_$1 42 | 43 | local color="${!colorvar:-$color_error}" 44 | local color_reset="\x1b[0m" 45 | if [ "${NO_COLOR}" = "true" ] || [[ "${TERM:-}" != "xterm"* ]] || [ -t 1 ]; then 46 | # Don't use colors on pipes or non-recognized terminals 47 | color=""; color_reset="" 48 | fi 49 | echo -e "$(date -u +"%Y-%m-%d %H:%M:%S UTC") ${color}$(printf "[%9s]" "${1}")${color_reset}"; 50 | } 51 | 52 | _debug () { [ "${LOG_LEVEL}" -ge 7 ] && echo "$(_fmt debug) ${*}" 1>&2 || true; } 53 | _info () { [ "${LOG_LEVEL}" -ge 6 ] && echo "$(_fmt info) ${*}" 1>&2 || true; } 54 | _notice () { [ "${LOG_LEVEL}" -ge 5 ] && echo "$(_fmt notice) ${*}" 1>&2 || true; } 55 | _warning () { [ "${LOG_LEVEL}" -ge 4 ] && echo "$(_fmt warning) ${*}" 1>&2 || true; } 56 | _error () { [ "${LOG_LEVEL}" -ge 3 ] && echo "$(_fmt error) ${*}" 1>&2 || true; exit 1; } 57 | 58 | _test_files() { 59 | local test_files=$(sh -c "find . -type f -name '*.go' ${NO_GO_TEST} -print") 60 | _debug "go source files ${test_files}" 61 | echo "${test_files}" 62 | } 63 | 64 | _test_dirs() { 65 | local test_dirs=$(sh -c "find . -type f -name '*.go' ${NO_GO_TEST} -print0" | xargs -0 -n1 dirname | sort -u) 66 | _debug "go code directories ${test_dirs}" 67 | echo "${test_dirs}" 68 | } 69 | 70 | _go_get() { 71 | local _url=$1 72 | local _util 73 | 74 | _util=$(basename "${_url}") 75 | 76 | type -p "${_util}" > /dev/null || go get "${_url}" && _debug "go get ${_util} ${_url}" 77 | } 78 | 79 | _gofmt() { 80 | test -z "$(gofmt -l -d $(_test_files) | tee /dev/stderr)" 81 | } 82 | 83 | _goimports() { 84 | _go_get golang.org/x/tools/cmd/goimports 85 | test -z "$(goimports -l -d $(_test_files) | tee /dev/stderr)" 86 | } 87 | 88 | _golint() { 89 | _go_get github.com/golang/lint/golint 90 | golint ./... 91 | } 92 | 93 | _go_vet() { 94 | go vet $(_test_dirs) 95 | } 96 | 97 | _go_race() { 98 | go test -race ./... 99 | } 100 | 101 | _go_test() { 102 | _info "running test type: ${TEST_TYPE}" 103 | # Standard go tooling behavior is to ignore dirs with leading underscors 104 | for dir in $(_test_dirs); 105 | do 106 | if [[ -z ${go_cover+x} ]]; then 107 | _debug "running go test with cover in ${dir}" 108 | go test -v --tags="${TEST_TYPE}" -covermode=count -coverprofile="${dir}/profile.tmp" "${dir}" 109 | if [ -f "${dir}/profile.tmp" ]; then 110 | tail -n +2 "${dir}/profile.tmp" >> profile.cov 111 | rm "${dir}/profile.tmp" 112 | fi 113 | else 114 | _debug "running go test without cover in ${dir}" 115 | go test -v --tags="${TEST_TYPE}" "${dir}" 116 | fi 117 | done 118 | } 119 | 120 | _go_cover() { 121 | go tool cover -func profile.cov 122 | } 123 | -------------------------------------------------------------------------------- /scripts/config/docker-deployment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: docker-deployment 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | app: docker 12 | spec: 13 | containers: 14 | - name: main 15 | image: docker:dind 16 | env: 17 | - name: SNAP_VERSION 18 | value: "latest" 19 | imagePullPolicy: "IfNotPresent" 20 | securityContext: 21 | privileged: true -------------------------------------------------------------------------------- /scripts/deps.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # File managed by pluginsync 3 | 4 | # http://www.apache.org/licenses/LICENSE-2.0.txt 5 | # 6 | # 7 | # Copyright 2016 Intel Corporation 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | set -e 22 | set -u 23 | set -o pipefail 24 | 25 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 26 | __proj_dir="$(dirname "$__dir")" 27 | 28 | # shellcheck source=scripts/common.sh 29 | . "${__dir}/common.sh" 30 | 31 | detect_go_dep() { 32 | [[ -f "${__proj_dir}/Godeps/Godeps.json" ]] && _dep='godep' 33 | [[ -f "${__proj_dir}/glide.yaml" ]] && _dep='glide' 34 | [[ -f "${__proj_dir}/vendor/vendor.json" ]] && _dep='govendor' 35 | _info "golang dependency tool: ${_dep}" 36 | echo "${_dep}" 37 | } 38 | 39 | install_go_dep() { 40 | local _dep=${_dep:=$(_detect_dep)} 41 | _info "ensuring ${_dep} is available" 42 | case $_dep in 43 | godep) 44 | _go_get github.com/tools/godep 45 | ;; 46 | glide) 47 | _go_get github.com/Masterminds/glide 48 | ;; 49 | govendor) 50 | _go_get github.com/kardianos/govendor 51 | ;; 52 | esac 53 | } 54 | 55 | restore_go_dep() { 56 | local _dep=${_dep:=$(_detect_dep)} 57 | _info "restoring dependency with ${_dep}" 58 | case $_dep in 59 | godep) 60 | (cd "${__proj_dir}" && godep restore) 61 | ;; 62 | glide) 63 | (cd "${__proj_dir}" && glide install) 64 | ;; 65 | govendor) 66 | (cd "${__proj_dir}" && govendor sync) 67 | ;; 68 | esac 69 | } 70 | 71 | _dep=$(detect_go_dep) 72 | install_go_dep 73 | restore_go_dep 74 | -------------------------------------------------------------------------------- /scripts/large.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # File managed by pluginsync 3 | 4 | set -e 5 | set -u 6 | set -o pipefail 7 | 8 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 9 | __proj_dir="$(dirname "$__dir")" 10 | __proj_name="$(basename "$__proj_dir")" 11 | 12 | # shellcheck source=scripts/common.sh 13 | . "${__dir}/common.sh" 14 | 15 | _verify_docker() { 16 | type -p docker > /dev/null 2>&1 || _error "docker needs to be installed" 17 | type -p docker-compose > /dev/null 2>&1 || _error "docker-compose needs to be installed" 18 | docker version >/dev/null 2>&1 || _error "docker needs to be configured/running" 19 | } 20 | 21 | _verify_docker 22 | 23 | [[ -f "${__proj_dir}/build/linux/x86_64/${__proj_name}" ]] || (cd "${__proj_dir}" && make) 24 | 25 | SNAP_VERSION=${SNAP_VERSION:-"latest"} 26 | OS=${OS:-"alpine"} 27 | PLUGIN_PATH=${PLUGIN_PATH:-"${__proj_dir}"} 28 | DEMO=${DEMO:-"false"} 29 | TASK=${TASK:-""} 30 | 31 | if [[ ${DEBUG:-} == "true" ]]; then 32 | cmd="cd /plugin/scripts && rescue rspec ./test/*_spec.rb" 33 | else 34 | cmd="cd /plugin/scripts && rspec ./test/*_spec.rb" 35 | fi 36 | 37 | _info "running large test" 38 | docker run -v /var/run/docker.sock:/var/run/docker.sock -v "${__proj_dir}":/plugin -e DEMO="${DEMO}" -e TASK="${TASK}" -e PLUGIN_PATH="${PLUGIN_PATH}" -e SNAP_VERSION="${SNAP_VERSION}" -e OS="${OS}" -ti intelsdi/serverspec:alpine /bin/sh -c "${cmd}" 39 | -------------------------------------------------------------------------------- /scripts/large_compose.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #http://www.apache.org/licenses/LICENSE-2.0.txt 4 | # 5 | # 6 | #Copyright 2016 Intel Corporation 7 | # 8 | #Licensed under the Apache License, Version 2.0 (the "License"); 9 | #you may not use this file except in compliance with the License. 10 | #You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | #Unless required by applicable law or agreed to in writing, software 15 | #distributed under the License is distributed on an "AS IS" BASIS, 16 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | #See the License for the specific language governing permissions and 18 | #limitations under the License. 19 | 20 | set -e 21 | set -u 22 | set -o pipefail 23 | 24 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 25 | __proj_dir="$(dirname "$__dir")" 26 | __proj_name="$(basename $__proj_dir)" 27 | 28 | . "${__dir}/common.sh" 29 | 30 | # NOTE: these variables control the docker-compose image. 31 | export PLUGIN_SRC="${__proj_dir}" 32 | export PROJ_NAME="${__proj_name}" 33 | export LOG_LEVEL="${LOG_LEVEL:-"7"}" 34 | export PLUGIN_DEST="/${__proj_name}" 35 | 36 | TEST_TYPE="${TEST_TYPE:-"large"}" 37 | 38 | docker_folder="${__proj_dir}/examples/tasks" 39 | 40 | _docker_project () { 41 | (cd "${docker_folder}" && "$@") 42 | } 43 | 44 | _info "docker folder : $docker_folder" 45 | 46 | _debug "running docker compose images" 47 | _docker_project docker-compose up -d 48 | _debug "running test: ${TEST_TYPE}" 49 | 50 | set +e 51 | 52 | RUN_TESTS="\"${PLUGIN_DEST}/scripts/large_tests.sh\"" 53 | _docker_project docker-compose exec docker sh -c "export LOG_LEVEL=$LOG_LEVEL; export RUN_SCRIPT=$RUN_TESTS ; /${__proj_name}/examples/tasks/run-dockerception.sh" 54 | test_res=$? 55 | set -e 56 | echo "exit code from large_compose $test_res" 57 | _debug "stopping and removing containers" 58 | _docker_project docker-compose down 59 | 60 | exit $test_res 61 | -------------------------------------------------------------------------------- /scripts/large_k8s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | #http://www.apache.org/licenses/LICENSE-2.0.txt 4 | # 5 | # 6 | #Copyright 2015 Intel Corporation 7 | # 8 | #Licensed under the Apache License, Version 2.0 (the "License"); 9 | #you may not use this file except in compliance with the License. 10 | #You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | #Unless required by applicable law or agreed to in writing, software 15 | #distributed under the License is distributed on an "AS IS" BASIS, 16 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | #See the License for the specific language governing permissions and 18 | #limitations under the License. 19 | 20 | set -e 21 | set -u 22 | set -o pipefail 23 | 24 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 25 | __proj_dir="$(dirname "$__dir")" 26 | __proj_name="$(basename $__proj_dir)" 27 | __deployment_file="$__dir/config/docker-deployment.yml" 28 | __deployment_name="docker-deployment" 29 | 30 | . "${__dir}/common.sh" 31 | 32 | _debug "__dir ${__dir}" 33 | _debug "__proj_dir ${__proj_dir}" 34 | _debug "__proj_name ${__proj_name}" 35 | 36 | _debug "start k8 deployment $__deployment_file" 37 | kubectl create -f $__deployment_file 38 | while ! [ "$(kubectl get po --no-headers | grep $__deployment_name | grep Running | awk '{print $2}')" = "1/1" ]; do 39 | kubectl get po --no-headers | grep $__deployment_name | grep CrashLoopBackOff && echo 'container failed' && exit 1 40 | echo 'waiting for pod to come up' 41 | sleep 5 42 | done 43 | _debug "copying the src into the runner" 44 | kubectl exec $(kubectl get po --no-headers | grep $__deployment_name | grep Running | awk '{print $1}') -c main -i -- mkdir /src 45 | tar c . | kubectl exec $(kubectl get po --no-headers | grep $__deployment_name | grep Running | awk '{print $1}') -c main -i -- tar -x -C /src 46 | 47 | set +e 48 | _debug "running tests through the runner" 49 | kubectl exec $(kubectl get po --no-headers | grep $__deployment_name | grep Running | awk '{print $1}') -c main -i -- /src/examples/run-dockerception.sh 50 | test_res=$? 51 | set -e 52 | _debug "exit code $test_res" 53 | _debug "removing k8 deployment" 54 | kubectl delete -f $__deployment_file 55 | exit $test_res 56 | 57 | -------------------------------------------------------------------------------- /scripts/large_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #http://www.apache.org/licenses/LICENSE-2.0.txt 4 | # 5 | # 6 | #Copyright 2016 Intel Corporation 7 | # 8 | #Licensed under the Apache License, Version 2.0 (the "License"); 9 | #you may not use this file except in compliance with the License. 10 | #You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | #Unless required by applicable law or agreed to in writing, software 15 | #distributed under the License is distributed on an "AS IS" BASIS, 16 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | #See the License for the specific language governing permissions and 18 | #limitations under the License. 19 | 20 | set -e 21 | set -u 22 | set -o pipefail 23 | 24 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 25 | __proj_dir="$(dirname "$__dir")" 26 | 27 | . "${__dir}/common.sh" 28 | 29 | _info "running the example ${__proj_dir}/examples/docker-file.sh" 30 | export PLUGIN_PATH="/etc/snap/path" 31 | source "${__proj_dir}/examples/docker-file.sh" 32 | 33 | _debug "sleeping for 10 seconds so the task can do some work" 34 | sleep 20 35 | 36 | # begin assertions 37 | return_code=0 38 | echo -n "[task is running] " 39 | task_list=$(snaptel task list | tail -1) 40 | if echo $task_list | grep -q Running; then 41 | echo "ok" 42 | else 43 | echo "not ok" 44 | return_code=-1 45 | fi 46 | 47 | echo -n "[task is hitting] " 48 | if [ $(echo $task_list | awk '{print $4}') -gt 0 ]; then 49 | echo "ok" 50 | else 51 | _debug $task_list 52 | echo "not ok" 53 | return_code=-1 54 | fi 55 | 56 | echo -n "[task has no errors] " 57 | if [ $(echo $task_list | awk '{print $6}') -eq 0 ]; then 58 | echo "ok" 59 | else 60 | echo "not ok" 61 | return_code=-1 62 | fi 63 | 64 | exit $return_code 65 | -------------------------------------------------------------------------------- /scripts/pre_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # File managed by pluginsync 3 | 4 | # http://www.apache.org/licenses/LICENSE-2.0.txt 5 | # 6 | # 7 | # Copyright 2016 Intel Corporation 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | set -e 22 | set -u 23 | set -o pipefail 24 | 25 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 26 | __proj_dir="$(dirname "$__dir")" 27 | 28 | # shellcheck source=scripts/common.sh 29 | . "${__dir}/common.sh" 30 | 31 | build_path="${__proj_dir}/build" 32 | _info "build_path: ${build_path}" 33 | _debug "$(find "${build_path}")" 34 | 35 | plugin_name="${__proj_dir##*/}" 36 | git_sha=$(git log --pretty=format:"%H" -1) 37 | s3_path="${__proj_dir}/s3/${plugin_name}" 38 | 39 | set +u 40 | if [ -z "$TRAVIS_TAG" ]; then 41 | set -u 42 | git_path="${s3_path}/${git_sha}" 43 | latest_path="${s3_path}/latest_build" 44 | mkdir -p "${git_path}" 45 | mkdir -p "${latest_path}" 46 | 47 | _info "copying plugin binaries to ${git_path}" 48 | cp -rp "${build_path}/"* "${git_path}" 49 | _info "copying plugin binaries to ${latest_path}" 50 | cp -rp "${build_path}/"* "${latest_path}" 51 | else 52 | set -u 53 | tag_path="${s3_path}/${TRAVIS_TAG}" 54 | latest_path="${s3_path}/latest" 55 | mkdir -p "${tag_path}" 56 | mkdir -p "${latest_path}" 57 | 58 | _info "copying plugin binaries to ${tag_path}" 59 | cp -rp "${build_path}/"* "${tag_path}" 60 | _info "copying plugin binaries to ${latest_path}" 61 | cp -rp "${build_path}/"* "${latest_path}" 62 | fi 63 | 64 | release_path="${SNAP_PATH:-"${__proj_dir}/release"}" 65 | mkdir -p "${release_path}" 66 | 67 | _info "moving plugin binaries to ${release_path}" 68 | 69 | for file in "${build_path}"/**/*/snap-plugin-* ; do 70 | filename="${file##*/}" 71 | parent="${file%/*}" 72 | arch="${parent##*/}" 73 | parent="${parent%/*}" 74 | os="${parent##*/}" 75 | cp "${file}" "${release_path}/${filename}_${os}_${arch}" 76 | done 77 | 78 | _debug "$(find "${build_path}")" 79 | _debug "$(find "${s3_path}")" 80 | _debug "$(find "${release_path}")" 81 | -------------------------------------------------------------------------------- /scripts/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # File managed by pluginsync 3 | 4 | # http://www.apache.org/licenses/LICENSE-2.0.txt 5 | # 6 | # 7 | # Copyright 2016 Intel Corporation 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | # Support travis.ci environment matrix: 22 | TEST_TYPE="${TEST_TYPE:-$1}" 23 | UNIT_TEST="${UNIT_TEST:-"gofmt goimports go_vet go_test go_cover"}" 24 | TEST_K8S="${TEST_K8S:-0}" 25 | 26 | set -e 27 | set -u 28 | set -o pipefail 29 | 30 | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 31 | __proj_dir="$(dirname "$__dir")" 32 | 33 | # shellcheck source=scripts/common.sh 34 | . "${__dir}/common.sh" 35 | 36 | _debug "script directory ${__dir}" 37 | _debug "project directory ${__proj_dir}" 38 | _info "skipping go test in following directories: ${NO_GO_TEST}" 39 | 40 | [[ "$TEST_TYPE" =~ ^(small|medium|large|legacy|build)$ ]] || _error "invalid TEST_TYPE (value must be 'small', 'medium', 'large', 'legacy', or 'build' recieved:${TEST_TYPE}" 41 | 42 | 43 | test_unit() { 44 | # The script does automatic checking on a Go package and its sub-packages, including: 45 | # 1. gofmt (http://golang.org/cmd/gofmt/) 46 | # 2. goimports (https://github.com/bradfitz/goimports) 47 | # 3. golint (https://github.com/golang/lint) 48 | # 4. go vet (http://golang.org/cmd/vet) 49 | # 5. race detector (http://blog.golang.org/race-detector) 50 | # 6. go test 51 | # 7. test coverage (http://blog.golang.org/cover) 52 | local go_tests 53 | go_tests=(gofmt goimports golint go_vet go_race go_test go_cover) 54 | 55 | _debug "available unit tests: ${go_tests[*]}" 56 | _debug "user specified tests: ${UNIT_TEST}" 57 | 58 | ((n_elements=${#go_tests[@]}, max=n_elements - 1)) 59 | 60 | for ((i = 0; i <= max; i++)); do 61 | if [[ "${UNIT_TEST}" =~ (^| )"${go_tests[i]}"( |$) ]]; then 62 | _info "running ${go_tests[i]}" 63 | _"${go_tests[i]}" 64 | else 65 | _debug "skipping ${go_tests[i]}" 66 | fi 67 | done 68 | } 69 | 70 | if [[ $TEST_TYPE == "legacy" ]]; then 71 | UNIT_TEST="go_test go_cover" 72 | echo "mode: count" > profile.cov 73 | export TEST_TYPE="unit" 74 | test_unit 75 | elif [[ $TEST_TYPE == "small" ]]; then 76 | if [[ -f "${__dir}/small.sh" ]]; then 77 | . "${__dir}/small.sh" 78 | else 79 | echo "mode: count" > profile.cov 80 | test_unit 81 | fi 82 | elif [[ $TEST_TYPE == "medium" ]]; then 83 | if [[ -f "${__dir}/medium.sh" ]]; then 84 | . "${__dir}/medium.sh" 85 | else 86 | UNIT_TEST="go_test go_cover" 87 | echo "mode: count" > profile.cov 88 | test_unit 89 | fi 90 | elif [[ $TEST_TYPE == "large" ]]; then 91 | if [[ "${TEST_K8S}" != "0" && -f "$__dir/large_k8s.sh" ]]; then 92 | . "${__dir}/large_k8s.sh" 93 | elif [[ -f "${__dir}/large_compose.sh" ]]; then 94 | . "${__dir}/large_compose.sh" 95 | else 96 | . "${__dir}/large.sh" 97 | fi 98 | elif [[ $TEST_TYPE == "build" ]]; then 99 | "${__dir}/build.sh" 100 | fi 101 | -------------------------------------------------------------------------------- /scripts/test/large_spec.rb: -------------------------------------------------------------------------------- 1 | # File managed by pluginsync 2 | 3 | require_relative './spec_helper' 4 | require 'specinfra/backend/docker_compose' 5 | 6 | compose_yml = File.expand_path(File.join(__FILE__, "../docker-compose.yml")) 7 | raise(Exception, "Missing docker-compose file: #{compose_yml}") unless File.exists? compose_yml 8 | 9 | # NOTE: scan docker compose file and pull latest containers: 10 | images = File.readlines(compose_yml).select {|l| l =~ /^\s*image:/} 11 | images = images.collect{|l| l.split('image:').last.strip }.uniq 12 | images.each do |i| 13 | puts `docker pull #{i}` 14 | end 15 | 16 | set :docker_compose_container, :snap 17 | 18 | describe docker_compose(compose_yml) do 19 | 20 | # NOTE: If you need to wait for a service or create a database perform it in setup.rb 21 | setup = File.expand_path(File.join(__FILE__, '../setup.rb')) 22 | eval File.read setup if File.exists? setup 23 | 24 | its_container(:snap) do 25 | describe 'docker-compose.yml run' do 26 | TIMEOUT = 60 27 | 28 | describe "download Snap" do 29 | it { 30 | expect(cmd_with_retry("/opt/snap/bin/snaptel --version", :timeout => TIMEOUT).exit_status).to eq 0 31 | expect(cmd_with_retry("/opt/snap/sbin/snapteld --version", :timeout => TIMEOUT).exit_status).to eq 0 32 | } 33 | end 34 | 35 | if os[:family] == 'alpine' 36 | describe port(8181) do 37 | it { should be_listening } 38 | end 39 | end 40 | 41 | context "load Snap plugins" do 42 | describe command("snaptel plugin list") do 43 | it { load_all_plugins } 44 | its(:exit_status) { should eq 0 } 45 | its(:stdout) { 46 | plugins.each do |p| 47 | _ , name = p 48 | should contain(/#{name}/) 49 | end 50 | } 51 | end 52 | end 53 | 54 | describe file("/opt/snap/sbin/snapteld") do 55 | it { should be_file } 56 | it { should be_executable } 57 | end 58 | 59 | describe file("/opt/snap/bin/snaptel") do 60 | it { should be_file } 61 | it { should be_executable } 62 | end 63 | 64 | describe command("snapteld --version") do 65 | its(:exit_status) { should eq 0 } 66 | its(:stdout) { should contain(/#{ENV['SNAP_VERSION']}/) } 67 | end if ENV['SNAP_VERSION'] =~ /^\d+.\d+.\d+$/ 68 | 69 | SnapUtils.tasks.each do |t| 70 | context "Snap task #{t}" do 71 | task_id = nil 72 | 73 | describe command("snaptel task create -t /plugin/examples/tasks/#{t}") do 74 | its(:exit_status) { should eq 0 } 75 | its(:stdout) { should contain(/Task created/) } 76 | it { 77 | id = subject.stdout.split("\n").find{|l|l=~/^ID:/} 78 | task_id = $1 if id.match(/^ID: (.*)$/) 79 | expect(task_id).to_not be_nil 80 | # NOTE we need a short pause before checking task state in case it fails: 81 | sleep 3 82 | } 83 | end 84 | 85 | describe command("snaptel task list") do 86 | its(:exit_status) { should eq 0 } 87 | its(:stdout) { should contain(/Running/) } 88 | end 89 | 90 | describe "Metrics in running tasks" do 91 | it { 92 | binding.pry if ENV["DEMO"] == "true" 93 | 94 | data = curl_json_api("http://127.0.0.1:8181/v1/tasks") 95 | task = data["body"]["ScheduledTasks"].find{|i| i['id'] == task_id} 96 | expect(task['id']).to eq task_id 97 | data = curl_json_api(task['href']) 98 | collect_metrics = data["body"]["workflow"]["collect"]["metrics"].collect{|k,v| k} 99 | 100 | config = load_yaml(SnapUtils.examples/"tasks/#{t}") 101 | config_metrics = config['workflow']['collect']['metrics'].collect{|k,v| k} 102 | 103 | config_metrics.each do |m| 104 | expect(collect_metrics).to include(m) 105 | end 106 | } 107 | end 108 | 109 | # NOTE: can not use the normal describe command(...) since we need to access task_id 110 | describe "Stop task" do 111 | it { 112 | c = command("snaptel task stop #{task_id}") 113 | expect(c.exit_status).to eq 0 114 | expect(c.stdout).to match(/Task stopped/) 115 | } 116 | end 117 | 118 | describe "Remove task" do 119 | it { 120 | c = command("snaptel task remove #{task_id}") 121 | expect(c.exit_status).to eq 0 122 | expect(c.stdout).to match(/Task removed/) 123 | } 124 | end 125 | end 126 | end 127 | end 128 | end 129 | 130 | # NOTE: If you need to perform additional checks such as database verification it be done at the end: 131 | verify = File.expand_path(File.join(__FILE__, '../verify.rb')) 132 | eval File.read verify if File.exists? verify 133 | end 134 | -------------------------------------------------------------------------------- /scripts/test/spec_helper.rb: -------------------------------------------------------------------------------- 1 | # File managed by pluginsync 2 | 3 | require 'hashie' 4 | require 'json' 5 | require 'pathname' 6 | require 'yaml' 7 | require 'rspec/retry' 8 | require 'dockerspec/serverspec' 9 | 10 | begin 11 | require 'pry' 12 | rescue LoadError 13 | end 14 | 15 | module SnapUtils 16 | def sh(arg) 17 | c = command(arg) 18 | puts c.stderr 19 | puts c.stdout 20 | end 21 | 22 | def build_path 23 | File.expand_path(File.join(__FILE__, '../../../build/linux/x86_64')) 24 | end 25 | 26 | def local_plugins 27 | Dir.chdir(build_path) 28 | @local_plugins ||= Dir.glob("snap-plugin-*") 29 | end 30 | 31 | def load_plugin(type, name, version="latest") 32 | plugin_name = "snap-plugin-#{type}-#{name}" 33 | 34 | # NOTE: use mock2 plugin when mock is requested, in general we should avoid mock collector. 35 | case name 36 | when 'mock' # TODO: revisit how we handle mock plugins 37 | url = "https://s3-us-west-2.amazonaws.com/snap.ci.snap-telemetry.io/snap/#{version}/linux/x86_64/#{plugin_name}2" 38 | when 'mock1', 'mock2', 'mock2-grpc', 'passthru', 'passthru-grpc', 'mock-file', 'mock-file-grpc' 39 | url = "https://s3-us-west-2.amazonaws.com/snap.ci.snap-telemetry.io/snap/#{version}/linux/x86_64/#{plugin_name}" 40 | else 41 | url = "https://s3-us-west-2.amazonaws.com/snap.ci.snap-telemetry.io/plugins/#{plugin_name}/#{version}/linux/x86_64/#{plugin_name}" 42 | end 43 | 44 | if local_plugins.include? plugin_name 45 | command("snaptel plugin load #{build_path}/#{plugin_name}").exit_status 46 | else 47 | command("curl -sfL #{url} -o /opt/snap/plugins/#{plugin_name}").exit_status 48 | command("snaptel plugin load /opt/snap/plugins/#{plugin_name}").exit_status 49 | end 50 | end 51 | 52 | def cmd_with_retry(arg, opt={ :timeout => 30 }) 53 | cmd = command(arg) 54 | while cmd.exit_status != 0 or cmd.stdout == '' and opt[:timeout] > 0 55 | sleep 5 56 | opt[:timeout] -= 5 57 | cmd = command(arg) 58 | end 59 | return cmd 60 | end 61 | 62 | def curl_json_api(url) 63 | output = cmd_with_retry("curl #{url}").stdout 64 | if output.size > 0 65 | JSON.parse(output) 66 | else 67 | {} 68 | end 69 | end 70 | 71 | def load_json(file) 72 | file = File.expand_path file 73 | raise ArgumentError, "Invalid json file path: #{file}" unless File.exist? file 74 | JSON.parse(gsub_env(File.read file)) 75 | end 76 | 77 | def load_yaml(file) 78 | file = File.expand_path file 79 | raise ArgumentError, "Invalid json file path: #{file}" unless File.exist? file 80 | YAML.load(gsub_env(File.read file)) 81 | end 82 | 83 | def gsub_env(content) 84 | content.gsub(/\$([a-zA-Z_]+[a-zA-Z0-9_]*)|\$\{(.+)\}/) { ENV[$1 || $2] } 85 | end 86 | 87 | def self.examples 88 | Pathname.new(File.expand_path(File.join(__FILE__,'../../../examples'))) 89 | end 90 | 91 | def self.tasks 92 | if ENV["TASK"] != "" 93 | pattern="#{examples}/tasks/#{ENV["TASK"]}" 94 | else 95 | pattern="#{examples}/tasks/*.y{a,}ml" 96 | end 97 | Dir.glob(pattern).collect{|f| File.basename f} 98 | end 99 | 100 | def add_plugins(plugins, type) 101 | plugins.flatten.compact.uniq.each do |name| 102 | @plugins.add([type, name]) 103 | end 104 | end 105 | 106 | def parse_task(t) 107 | t.extend Hashie::Extensions::DeepFetch 108 | t.extend Hashie::Extensions::DeepFind 109 | 110 | m = t.deep_fetch("workflow", "collect", "metrics"){ |k| {} } 111 | collectors = m.collect do |k, v| 112 | k.match(/^\/intel\/(.*?)\/(.*?)/) 113 | # NOTE: procfs/* doesn't follow the convention, nor does disk/smart. 114 | if $1 == 'procfs' || $1 == 'disk' 115 | case $2 116 | when 'iface' 117 | 'interface' 118 | when 'filesystem' 119 | 'df' 120 | else 121 | $2 122 | end 123 | else 124 | $1 125 | end 126 | end 127 | add_plugins(collectors, 'collector') 128 | 129 | p = t.deep_find_all("process") || {} 130 | processors = p.collect do |i| 131 | if i.is_a? ::Array 132 | i.collect{|j| j["plugin_name"] if j.include? "plugin_name"} 133 | end 134 | end 135 | add_plugins(processors, 'processor') 136 | 137 | p = t.deep_find_all("publish") || {} 138 | publishers = p.collect do |i| 139 | if i.is_a? ::Array 140 | i.collect{|j| j["plugin_name"] if j.include? "plugin_name"} 141 | end 142 | end 143 | add_plugins(publishers, 'publisher') 144 | end 145 | 146 | def plugins 147 | @plugins ||= load_tasks 148 | end 149 | 150 | def load_tasks 151 | @plugins = Set.new 152 | SnapUtils.tasks.each do |t| 153 | y = load_yaml SnapUtils.examples/"tasks/#{t}" 154 | parse_task(y) 155 | end 156 | @plugins 157 | end 158 | 159 | def load_all_plugins 160 | plugins.each do |i| 161 | type, name = i 162 | load_plugin(type, name) 163 | end 164 | end 165 | end 166 | 167 | RSpec.configure do |c| 168 | c.formatter = 'documentation' 169 | c.mock_framework = :rspec 170 | c.verbose_retry = true 171 | c.order = 'default' 172 | c.include SnapUtils 173 | if ENV["DEMO"] == "true" then 174 | Pry.config.pager = false 175 | 176 | Pry.hooks.add_hook(:before_session, "notice") do |output, binding, pry| 177 | output.puts "Setup complete for DEMO mode. When you are finished checking out Snap please type 'exit-program' to shutdown containers." 178 | end 179 | end 180 | end 181 | --------------------------------------------------------------------------------