├── .gitignore ├── Dockerfile ├── Godeps ├── Godeps.json └── Readme ├── ISSUE_TEMPLATE ├── LICENSE ├── Makefile ├── README.md ├── doc └── Maintainers-Guide.md ├── handlersettings.go ├── integration-test ├── extensionconfig │ ├── public-azurechina.json │ └── public.json ├── gen_docker_certs.sh └── test.sh ├── main.go ├── metadata ├── HandlerManifest.json └── manifest.xml ├── op-disable.go ├── op-enable.go ├── op-enable_test.go ├── op-install.go ├── op-uninstall.go ├── op-update.go ├── op.go ├── pkg ├── distro │ ├── distro.go │ └── distro_test.go ├── dockeropts │ ├── dockeropts.go │ ├── systemd.go │ ├── systemd_test.go │ ├── upstart.go │ └── upstart_test.go ├── driver │ ├── centos.go │ ├── coreos.go │ ├── driver.go │ ├── rhel.go │ ├── systemd_base.go │ ├── ubuntu_base.go │ ├── ubuntu_systemd.go │ ├── ubuntu_upstart.go │ ├── upstart_base.go │ └── util.go ├── executil │ ├── exec.go │ └── exec_test.go ├── seqnumfile │ └── seqnumfile.go ├── util │ ├── util.go │ └── util_test.go └── vmextension │ ├── handlerenv.go │ ├── handlerenv_test.go │ ├── handlersettings.go │ ├── handlersettings_test.go │ ├── seqnum.go │ ├── seqnum_test.go │ └── status │ ├── status.go │ └── status_test.go ├── scripts └── run-in-background.sh ├── testdata ├── B3364F39E3086E9AD0C67767348D7392D35BC176.crt ├── B3364F39E3086E9AD0C67767348D7392D35BC176.prv ├── Extension │ ├── HandlerEnvironment.json │ ├── HandlerManifest.json │ ├── config │ │ ├── 1.settings │ │ ├── 2.settings │ │ └── HandlerState │ └── status │ │ └── 0.status ├── HandlerEnvironment.json ├── lsb-release ├── ovf-env.xml └── sampleProtectedSettings.json └── vendor └── github.com └── cloudfoundry-incubator └── candiedyaml ├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── api.go ├── decode.go ├── emitter.go ├── encode.go ├── fixtures └── specification │ ├── example2_1.yaml │ ├── example2_10.yaml │ ├── example2_11.yaml │ ├── example2_12.yaml │ ├── example2_13.yaml │ ├── example2_14.yaml │ ├── example2_15.yaml │ ├── example2_15_dumped.yaml │ ├── example2_16.yaml │ ├── example2_17.yaml │ ├── example2_17_control.yaml │ ├── example2_17_hexesc.yaml │ ├── example2_17_quoted.yaml │ ├── example2_17_single.yaml │ ├── example2_17_tie_fighter.yaml │ ├── example2_17_unicode.yaml │ ├── example2_18.yaml │ ├── example2_19.yaml │ ├── example2_2.yaml │ ├── example2_20.yaml │ ├── example2_21.yaml │ ├── example2_22.yaml │ ├── example2_23.yaml │ ├── example2_23_application.yaml │ ├── example2_23_non_date.yaml │ ├── example2_23_picture.yaml │ ├── example2_24.yaml │ ├── example2_24_dumped.yaml │ ├── example2_25.yaml │ ├── example2_26.yaml │ ├── example2_27.yaml │ ├── example2_27_dumped.yaml │ ├── example2_28.yaml │ ├── example2_3.yaml │ ├── example2_4.yaml │ ├── example2_5.yaml │ ├── example2_6.yaml │ ├── example2_7.yaml │ ├── example2_8.yaml │ ├── example2_9.yaml │ ├── example_empty.yaml │ └── types │ ├── map.yaml │ ├── map_mixed_tags.yaml │ ├── merge.yaml │ ├── omap.yaml │ ├── pairs.yaml │ ├── seq.yaml │ ├── set.yaml │ ├── v.yaml │ └── value.yaml ├── libyaml-LICENSE ├── parser.go ├── reader.go ├── resolver.go ├── run_parser.go ├── scanner.go ├── tags.go ├── writer.go ├── yaml_definesh.go ├── yaml_privateh.go └── yamlh.go /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | bin 3 | bundle 4 | 5 | # go build . output 6 | azure-docker-extension 7 | 8 | # integration test ssh keys 9 | integration-test/id_rsa** 10 | integration-test/dockercerts* 11 | integration-test/extensionconfig/protected.json 12 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang 2 | RUN useradd azureuser 3 | 4 | COPY testdata/lsb-release lsb-release 5 | RUN mv lsb-release /etc/lsb-release 6 | COPY testdata/ovf-env.xml /var/lib/waagent/ovf-env.xml 7 | 8 | COPY testdata/HandlerEnvironment.json HandlerEnvironment.json 9 | RUN mv HandlerEnvironment.json ../HandlerEnvironment.json 10 | COPY testdata/Extension /var/lib/waagent/Extension 11 | 12 | ADD src src 13 | RUN go build -o a.out docker-extension 14 | ENTRYPOINT ["./a.out"] 15 | -------------------------------------------------------------------------------- /Godeps/Godeps.json: -------------------------------------------------------------------------------- 1 | { 2 | "ImportPath": "github.com/Azure/azure-docker-extension", 3 | "GoVersion": "go1.5.1", 4 | "Deps": [ 5 | { 6 | "ImportPath": "github.com/cloudfoundry-incubator/candiedyaml", 7 | "Rev": "29b4d9cda9fd156adea631d790c95d37ee6ab8e6" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /Godeps/Readme: -------------------------------------------------------------------------------- 1 | This directory tree is generated automatically by godep. 2 | 3 | Please do not edit. 4 | 5 | See https://github.com/tools/godep for more information. 6 | -------------------------------------------------------------------------------- /ISSUE_TEMPLATE: -------------------------------------------------------------------------------- 1 | Please take a look at the following log files for issues you are 2 | encountering and provide them in the issue details. 3 | 4 | - /var/log/azure-docker-extension-enable.log 5 | - /var/log/waagent.log 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2016 Microsoft Corporation 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BIN=docker-extension 2 | BINDIR=bin 3 | BUNDLE=docker-extension.zip 4 | BUNDLEDIR=bundle 5 | 6 | bundle: clean binary 7 | @mkdir -p $(BUNDLEDIR) 8 | zip ./$(BUNDLEDIR)/$(BUNDLE) ./$(BINDIR)/$(BIN) 9 | zip -j ./$(BUNDLEDIR)/$(BUNDLE) ./metadata/HandlerManifest.json 10 | zip -j ./$(BUNDLEDIR)/$(BUNDLE) ./metadata/manifest.xml 11 | zip ./$(BUNDLEDIR)/$(BUNDLE) ./scripts/run-in-background.sh 12 | @echo "OK: Use $(BUNDLEDIR)/$(BUNDLE) to publish the extension." 13 | binary: 14 | if [ -z "$$GOPATH" ]; then echo "GOPATH is not set"; exit 1; fi 15 | GOOS=linux GOARCH=amd64 go build -v -o $(BINDIR)/$(BIN) . 16 | test: 17 | if [ -z "$$GOPATH" ]; then echo "GOPATH is not set"; exit 1; fi 18 | go test ./... -test.v 19 | clean: 20 | rm -rf "$(BUNDLEDIR)" 21 | rm -rf "$(BINDIR)" 22 | 23 | .PHONY: clean bundle binary test 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # :warning: IMPORTANT :warning: 2 | **Docker extension for Linux is deprecated.** 3 | 4 | DockerExtension is deprecated and will be retired November 2018. 5 | 6 | We believe the are better alternatives to this extension. The extension merely installs Docker, so alteratives such as cloud-init or the CustomScript extension are a better way to install the Docker version of choice. 7 | 8 | # Azure Virtual Machine Extension for Docker 9 | 10 | This repository contains source code for the Microsoft Azure Docker Virtual 11 | Machine Extension. 12 | 13 | The source code is meant to be used by Microsoft Azure employees publishing the 14 | extension and the source code is open sourced under Apache 2.0 License for 15 | reference. You can read the User Guide below. 16 | 17 | * [Learn more: Azure Virtual Machine Extensions](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-extensions-features/) 18 | * [How to use: Docker VM Extension](http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-vm-extension/) 19 | 20 | Docker VM extension can: 21 | 22 | - Install latest stable version of Docker Engine on your Linux VM 23 | - If provided, configures Docker daemon to listen on specified port, with given 24 | certs 25 | - Launches a set of containers using `docker-compose` (intended for running a 26 | static set of containers for monitoring, security etc.) 27 | 28 | # User Guide 29 | 30 | ## 1. Configuration schema 31 | 32 | ### 1.1. Public configuration keys 33 | 34 | Schema for the public configuration file for the Docker Extension looks like 35 | this: 36 | 37 | * `docker`: (optional, JSON object) 38 | * `port`: (optional, string) the port Docker listens on 39 | * `options`: (optional, string array) command line options passed to the 40 | Docker engine 41 | * `compose`: (optional, JSON object) the `docker-compose.yml` file to be used, [converted 42 | to JSON][yaml-to-json]. If you are considering to embed secrets as environment 43 | variables in this section, please see the `"environment"` key described below. 44 | **This feature is not intended for managing a dynamic set of containers**, it 45 | is intended for starting a static set of bootstrap containers for monitoring, 46 | security or orchestrator agents. **Please do not manage your containers through 47 | this feature.** 48 | * `compose-environment` (optional, JSON object) [Environment variables for docker-compose][compose-env]. 49 | * `azure-environment` (optional, string) Azure environment. Valid values are "AzureCloud" 50 | and "AzureChinaCloud". The default is "AzureCloud". 51 | 52 | [compose-env]: https://docs.docker.com/compose/reference/envvars/ 53 | 54 | A minimal simple configuration would be an empty json object (`{}`) or a more 55 | advanced one like this: 56 | 57 | ```json 58 | { 59 | "docker":{ 60 | "port": "2376", 61 | "options": ["-D", "--dns=8.8.8.8"] 62 | }, 63 | "compose": { 64 | "cache" : { 65 | "image" : "memcached", 66 | "ports" : ["11211:11211"] 67 | }, 68 | "web": { 69 | "image": "ghost", 70 | "ports": ["80:2368"] 71 | } 72 | }, 73 | "compose-environment": { 74 | "COMPOSE_PROJECT_NAME": "blog", 75 | "COMPOSE_HTTP_TIMEOUT": "600" 76 | } 77 | } 78 | ``` 79 | 80 | > **NOTE:** It is not suggested to specify `"port"` unless you are going to 81 | specify `"certs"` configuration (described below) as well. This can open up 82 | the Docker engine to public internet without authentication. 83 | 84 | ### 1.2. Protected configuration keys 85 | 86 | Schema for the protected configuration file stores the secrets that are passed 87 | to the Docker engine looks like this: 88 | 89 | * `environment`: (optional, JSON object) Key value pairs to store environment variables 90 | to be passed to `docker-compose` securely. By using this, you can avoid embedding secrets 91 | in the unencrypted `"compose"` section. 92 | * `certs`: (optional, JSON object) 93 | * `ca`: (required, string): base64 encoded CA certificate, passed to the engine as `--tlscacert` 94 | * `cert`: (required, string): base64 encoded TLS certificate, passed to the engine as `--tlscert` 95 | * `key`: (required, string): base64 encoded TLS key, passed to the engine as `--tlskey` 96 | * `login`: (optional, JSON object) login credentials to log in to a Docker Registry 97 | * `server`: (string, optional) registry server, if not specified, logs in to Docker Hub 98 | * `username`: (string, required) 99 | * `password`: (string, required) 100 | * `email`: (string, required) 101 | 102 | In order to encode your existing Docker certificates to base64, you can run: 103 | 104 | $ cat ~/.docker/ca.pem | base64 105 | 106 | An advanced configuration that configures TLS for Docker engine and logs in to 107 | Docker Hub account would look like this: 108 | 109 | ```json 110 | { 111 | "environment" : { 112 | "SECRET_ENV": "<>", 113 | "MYSQL_ROOT_PASSWORD": "very-secret-password" 114 | }, 115 | "certs": { 116 | "ca": "<>", 117 | "cert": "<>", 118 | "key": "<>" 119 | }, 120 | "login": { 121 | "username": "myusername", 122 | "password": "mypassword", 123 | "email": "name@example.com" 124 | } 125 | } 126 | ``` 127 | 128 | ## 2. Deploying the Extension to a VM 129 | 130 | Using [**Azure CLI**][azure-cli]: Once you have a VM created on Azure and 131 | configured your `pub.json` and `prot.json` (in section 1.1 and 1.2 above), you 132 | can add the Docker Extension to the virtual machine by running: 133 | 134 | $ azure vm extension set 'yourVMname' DockerExtension Microsoft.Azure.Extensions '1.1' \ 135 | --public-config-path pub.json \ 136 | --private-config-path prot.json 137 | 138 | In the command above, you can change version with `'*'` to use latest 139 | version available, or `'1.*'` to get newest version that does not introduce non- 140 | breaking schema changes. To learn the latest version available, run: 141 | 142 | $ azure vm extension list 143 | 144 | You can also omit `--public-config-path` and/or `--private-config-path` if you 145 | do not want to configure those settings. 146 | 147 | ## 3. Using Docker Extension in ARM templates 148 | 149 | You can provision Docker Extension in [Azure Resource templates](https://azure.microsoft.com/en-us/documentation/articles/resource-group-authoring-templates/) 150 | by specifying it just like a resource in your template. The configuration keys 151 | go to `"settings"` section and (optionally) protected keys go to `"protectedSettings"` section. 152 | 153 | Example resource definition: 154 | 155 | ```json 156 | { 157 | "type": "Microsoft.Compute/virtualMachines/extensions", 158 | "name": "[concat(variables('vmName'), '/DockerExtension'))]", 159 | "apiVersion": "2015-05-01-preview", 160 | "location": "[parameters('location')]", 161 | "dependsOn": [ 162 | "[concat('Microsoft.Compute/virtualMachines/', variables('vmName'))]" 163 | ], 164 | "properties": { 165 | "publisher": "Microsoft.Azure.Extensions", 166 | "type": "DockerExtension", 167 | "typeHandlerVersion": "1.1", 168 | "autoUpgradeMinorVersion": true, 169 | "settings": {}, 170 | "protectedSettings": {} 171 | } 172 | } 173 | ``` 174 | 175 | You can find various usages of this at the following gallery templates: 176 | 177 | * https://github.com/Azure/azure-quickstart-templates/blob/master/docker-simple-on-ubuntu/azuredeploy.json 178 | * https://github.com/Azure/azure-quickstart-templates/tree/master/docker-wordpress-mysql 179 | * https://github.com/Azure/azure-quickstart-templates/tree/master/docker-swarm-cluster 180 | 181 | ----- 182 | 183 | ### Supported Linux Distributions 184 | 185 | - CoreOS 899 and higher 186 | - Ubuntu 13 and higher 187 | - CentOS 7.1 and higher 188 | - Red Hat Enterprise Linux (RHEL) 7.1 and higher 189 | 190 | Other Linux distributions are currently not supported and extension 191 | is expected to fail on unsupported distributions. 192 | 193 | 194 | ### Debugging 195 | 196 | After adding the extension, it can usually take a few minutes for the extension 197 | to make it to the VM, install docker and do other things. 198 | 199 | You can find the extension and Azure Linux agent logs here: 200 | * `/var/log/azure-docker-extension-enable.log` 201 | * `/var/log/waagent.log` 202 | * `/var/log/azure/Microsoft.Azure.Extensions.DockerExtension/**/docker-extension.log` 203 | 204 | If you are going to open an issue, please provide these log files. 205 | 206 | ### Changelog 207 | 208 | ``` 209 | # 1.2.2 (2017-01-21) 210 | - Add suport for the CoreOS rename (gh#116). 211 | 212 | # 1.2.1 (2016-11-15) (released in Azure China only) 213 | - Add support for Azure China by modifying the download URLs to point to the 214 | mirrors hosted by mirror.azure.cn (gh#112) 215 | 216 | # 1.2.0 (2016-08-19) 217 | - Fix: On CentOS start dockerd as -H=unix:// instead of -H=fd:// as get.docker.com 218 | install script has removed socket activation. (gh#104) 219 | - Prefer 'dockerd' in systemd unit files over 'docker daemon'. docker-engine has 220 | migrated to this. This is why we are releasing a minor version for the extension 221 | this time and not a hotfix so that existing VMs don’t automatically get this and 222 | old versions of docker will not work with dockerd. 223 | 224 | # 1.1.1606092330 (2016-06-09) 225 | - Introduced “compose-environment” public configuration to pass additional unencrypted 226 | environment variables to docker-compose for fine tuning. (gh#87, gh#85) 227 | - Better error messages for docker-compose failures indicating the log path. (gh#86) 228 | 229 | # 1.1.1604142300 (2016-04-14) 230 | - Fix: docker v1.11 release has broken docker-compose 1.5 from pulling private images. 231 | Upgrading to docker-compose 1.6.2 and dropping support for docker-engine <1.9.1 (gh#80) 232 | 233 | # 1.1.1602270800 (2016-02-28) 234 | - Fix: extension crash while collecting “yum install” output. 235 | 236 | # 1.1.1601140348 (2016-01-13) 237 | - Fix: eliminate redundant restarts of docker-engine on CoreOS if configuration 238 | is not changed. 239 | 240 | # 1.1.1601070410 (2016-01-06) 241 | - Fix: eliminate redundant restarting of docker-engine. This avoids restart of 242 | docker-engine service (and thus containers) when (1) VM boots (2) waagent 243 | calls extension's enable command in case of GoalState changes such as Load 244 | Balancer updates. 245 | - Fix: Write .status file before forking into background in 'enable' command. 246 | This is a workaround for waagent 2.1.x. 247 | 248 | # 1.1.1512180541 (2015-12-17) 249 | - Security fix: prevent clear-text registry credentials from being logged. 250 | 251 | # 1.1.1512090359 (2015-12-08) 252 | - Introduced secure delivery of secrets through "environment" section of 253 | protected configuration to be passed to docker-compose. Users do not have 254 | to embed secrets in the "compose" section anymore. 255 | 256 | # 1.0.1512030601 (2015-12-02) 257 | - Added support for CentOS and Red Hat Enterprise Linux (RHEL). 258 | 259 | # 1.0.1512020618 (2015-12-01) 260 | - Bumped docker-compose version from v1.4.1 to v1.5.1. 261 | - Added retry logic around installation as a mitigation for a VM scale set 262 | issue. 263 | 264 | # 1.0.1510142311 (2015-10-14) 265 | - Configured docker-compose timeout to 15 minutes to prevent big images 266 | from failing to be pulled down intermittently due to network conditions. 267 | 268 | # 1.0.1509171835 (2015-09-18) 269 | - Move 'install' stage to 'enable' step so that installation is not killed by 270 | 5-minute waagent timeout on slow regions and distros (such as Ubuntu LTS) 271 | with many missing dependency packages. 272 | - Bump docker-compose to v1.4.0 from v1.3.2. 273 | - Extension now uninstalls docker-compose on 'uninstall' stage. 274 | 275 | # 1.0.1509160543 (2015-09-16) 276 | - Workaround for undesirable behavior in WALA: Write .seqnum file to /tmp to 277 | prevent multiple simultaneous calls to the extension with the same sequence 278 | number. 279 | 280 | # 1.0.1508121604 (2015-08-12) 281 | - Replaced '--daemon' flag with daemon due to breaking behavior introduced in 282 | docker-1.8.0 release. 283 | 284 | # 1.0.1507232004 (2015-07-23) 285 | - Updating the apt package name for uninstall step. 286 | 287 | # 1.0.1507151643 (2015-07-15) 288 | - Bump docker-compose to v1.3.2 from v1.2.0. (gh#41) 289 | 290 | # 1.0.1507110733 (2015-07-11) 291 | - Workaround for a bug caused from docker-compose to crash with error 292 | 'write /dev/stderr: broken pipe' 293 | 294 | # 1.0.1507101636 (2015-07-10) 295 | - Bug fix (gh#38). Blocking on install step instead of forking and running in 296 | background. 297 | 298 | # 1.0.1507020203 (2015-07-01) 299 | - Better docker-compose integration and prevent duplicate container creations 300 | between reboots. 301 | - Fork and run in background install/enable steps to avoid waagent time limits. 302 | 303 | # 1.0.1506280321 (2015-06-27) 304 | - "certs" that are not base64-encoded are also accepted now. This provides more 305 | backwards compatibility with the existing format in the old extension. 306 | - Docker certs are now overwritten on every 'enable' run using the extension 307 | configuration. 308 | - Placed certs server-cert.pem/server-key.pem are renamed to cert.pem/key.pem to 309 | be consistent with Docker's nomenclature. The change should be automatically 310 | picked up upon reboot. 311 | 312 | # 1.0.1506141804 (2015-06-14) 313 | - Privacy Policy link update 314 | 315 | # 1.0.1506090235 (2015-06-09) 316 | - Bug fix 317 | 318 | # 1.0.1506041832 (2015-06-04) 319 | - Initial release 320 | ``` 321 | 322 | [yaml-to-json]: http://yamltojson.com/ 323 | [azure-cli]: https://azure.microsoft.com/en-us/documentation/articles/xplat-cli/ 324 | 325 | ----- 326 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 327 | -------------------------------------------------------------------------------- /doc/Maintainers-Guide.md: -------------------------------------------------------------------------------- 1 | # Maintainer’s Guide 2 | 3 | ## Publishing a new version **manually** 4 | 5 | :warning: We use the internal EDP (Extension Deployment Pipeline) for releasing 6 | new versions. 7 | 8 | :warning: You are recommended to use EDP instead of the instructions here. 9 | 10 | ### Prerequisites 11 | 12 | Make sure: 13 | * you have access to the subscription that has publishing this extension. 14 | * you have a subscription management certificate in `.pem` format for that. 15 | * you installed `azure-extensions-cli` (and it `list-versions` command works) 16 | 17 | ### 0. Bump the version number 18 | 19 | You need to update `metadata/manifest.xml` with the new version number that 20 | you should document in `README.md` Changelog section and push that change upstream. 21 | 22 | ### 1. Build and pack 23 | 24 | Run `make bundle` to build an extension handler zip package in the `bundle/` directory. 25 | 26 | ### 2. Upload the package 27 | 28 | Extension follows the semantic versioning [MAJOR].[MINOR].[PATCH]. The MAJOR/MINOR 29 | are stored in the `Makefile` and PATCH is the UTC build date of the bundle in `yymmddHHMM` 30 | format. 31 | 32 | * **Bump `MAJOR` if**: you are introducing breaking changes in the config schema 33 | * **Bump `MINOR` if**: you are adding new features (no need for hotfixes or minor nits) 34 | 35 | Run `azure-extensions-cli new-extension-manifest` with the values in 36 | `metadata/manifest.xml` to upload the package and create a manifest XML. Save the output 37 | of this program to a file (e.g. `/tmp/manifest.xml`). 38 | 39 | ### 3. Publish new version 40 | 41 | Publishing takes 3 steps and you will use the `azure-extensions-cli` program. 42 | 43 | #### 3.1. Publish to Slice 1 44 | 45 | “Slice 1” means you publish the extension internally only to your own publisher subscription: 46 | 47 | export SUBSCRIPTION_ID=[...] 48 | export SUBSCRIPTION_CERT=[...].pem 49 | azure-extensions-cli new-extension-version --manifest [path-to-manifest] 50 | 51 | Then check its replication status using: 52 | 53 | azure-extensions-cli replication-status --namespace Microsoft.Azure.Extensions \ 54 | --name DockerExtension --version 55 | 56 | or `azure-extensions-cli list-versions` command. 57 | 58 | Based on the load on PIR it may take from 10 minutes to 10 hours to replicate. 59 | 60 | #### 3.2. Integration Tests 61 | 62 | After the extension is listed as replicated, you can run integration tests to deploy the extension 63 | to some images and test it is actually working. 64 | 65 | For that, make sure you have install `azure` xplat CLI installed and `azure login` is completed. 66 | 67 | Then run: 68 | 69 | ./integration-test/test.sh 70 | 71 | The tests will: 72 | 73 | 1. Create test VMs with various distro images in test subscription. 74 | 2. Add extension to the VMs with a config that exercises the features. 75 | 3. Verify the correct version of the extension is landed to VMs. 76 | 4. Verify connectivity to docker-engine with TLS certs. 77 | 5. Verify other configuration is taking effect (containers are created etc.) 78 | 6. Tear down the test VMs. 79 | 80 | If the test gets stuck in a verification step and keeps printing `...`s for more than 5 minutes, 81 | it is very likely something is going badly. You can ssh into the VM (command is printed in the test 82 | output) and see what is going on. 83 | 84 | (If you want to delete the buggy version, use `azure-extensions-cli delete-version` at this step.) 85 | 86 | #### 3.3. Publish to Slice 2 87 | 88 | “Slice 2” means you publish the extension publicly to one Azure PROD region: 89 | 90 | azure-extensions-cli promote-single-region --region-1 'Brazil South' --manifest 91 | 92 | and then you can use `azure-extensions-cli replication-status` to see it completed. 93 | 94 | #### 3.4. Publish to Slice 3 95 | 96 | Same as “Slice 2”, but publishes to **two** Azure PROD regions. 97 | 98 | azure-extensions-cli promote-two-regions --region-1 'Brazil South' --region-2 'Southeast Asia' \ 99 | --manifest 100 | 101 | #### 3.5. Publish to Slice 4 102 | 103 | This step publishes the VM extension to **all Azure PROD regions** (be careful). 104 | 105 | azure-extensions-cli promote-all-regions --manifest 106 | 107 | Wait for it to be completed using `azure-extensions-cli replication-status` command and 108 | once completed, run `azure vm extension list --json` command from a subscription that is not a publisher 109 | subscription to verify if the new version is available (not applicable for hotfixes). 110 | 111 | ### 4. Take a code snapshot 112 | 113 | Once the version is successful and works in Production: 114 | 115 | 1. Document the changes in README.md “Changelog” section 116 | 2. Commit the changes to your own fork 117 | 3. End a pull request to Azure repo 118 | 4. Create a tag with the version number you published e.g.: 119 | 120 | git tag 1.0.1506041803 121 | git push --tags 122 | 123 | This will create a snapshot of the code in “releases” section of the GitHub repository. 124 | -------------------------------------------------------------------------------- /handlersettings.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/Azure/azure-docker-extension/pkg/vmextension" 7 | ) 8 | 9 | // publicSettings is the type deserialized from public configuration section. 10 | type publicSettings struct { 11 | Docker dockerEngineSettings `json:"docker"` 12 | ComposeJson map[string]interface{} `json:"compose"` 13 | ComposeEnv map[string]string `json:"compose-environment"` 14 | AzureEnv string `json:"azure-environment"` 15 | } 16 | 17 | // protectedSettings is the type decoded and deserialized from protected 18 | // configuration section. 19 | type protectedSettings struct { 20 | Certs dockerCertSettings `json:"certs"` 21 | Login dockerLoginSettings `json:"login"` 22 | ComposeProtectedEnv map[string]string `json:"environment"` 23 | } 24 | 25 | type dockerEngineSettings struct { 26 | Port string `json:"port"` 27 | Options []string `json:"options"` 28 | } 29 | 30 | type dockerLoginSettings struct { 31 | Server string `json:"server"` 32 | Username string `json:"username"` 33 | Password string `json:"password"` 34 | Email string `json:"email"` 35 | } 36 | 37 | type dockerCertSettings struct { 38 | CABase64 string `json:"ca"` 39 | ServerKeyBase64 string `json:"key"` 40 | ServerCertBase64 string `json:"cert"` 41 | } 42 | 43 | func (e dockerCertSettings) HasDockerCerts() bool { 44 | return e.CABase64 != "" && e.ServerKeyBase64 != "" && e.ServerCertBase64 != "" 45 | } 46 | 47 | func (e dockerLoginSettings) HasLoginInfo() bool { 48 | return e.Username != "" && e.Password != "" 49 | } 50 | 51 | type DockerHandlerSettings struct { 52 | publicSettings 53 | protectedSettings 54 | } 55 | 56 | func parseSettings(configFolder string) (*DockerHandlerSettings, error) { 57 | pubSettingsJSON, protSettingsJSON, err := vmextension.ReadSettings(configFolder) 58 | if err != nil { 59 | return nil, fmt.Errorf("error reading handler settings: %v", err) 60 | } 61 | 62 | var pub publicSettings 63 | var prot protectedSettings 64 | if err := vmextension.UnmarshalHandlerSettings(pubSettingsJSON, protSettingsJSON, &pub, &prot); err != nil { 65 | return nil, fmt.Errorf("error parsing handler settings: %v", err) 66 | } 67 | return &DockerHandlerSettings{pub, prot}, nil 68 | } 69 | -------------------------------------------------------------------------------- /integration-test/extensionconfig/public-azurechina.json: -------------------------------------------------------------------------------- 1 | { 2 | "docker" : { 3 | "port" : "2376", 4 | "options" : [ 5 | "--label", 6 | "foo=bar" 7 | ] 8 | }, 9 | "compose" : { 10 | "envdump" : { 11 | "image" : "mirror.azure.cn:5000/library/busybox", 12 | "volumes": ["/test:/test"], 13 | "environment": ["COMPOSE_PROJECT_NAME", "SECRET_KEY"], 14 | "command": "/bin/sh -c 'env > /test/env.txt'" 15 | }, 16 | "web" : { 17 | "image" : "mirror.azure.cn:5000/library/nginx", 18 | "restart" : "always", 19 | "ports" : ["80:80"] 20 | } 21 | }, 22 | "compose-environment": { 23 | "COMPOSE_PROJECT_NAME": "test" 24 | }, 25 | "azure-environment": "AzureChinaCloud" 26 | } 27 | -------------------------------------------------------------------------------- /integration-test/extensionconfig/public.json: -------------------------------------------------------------------------------- 1 | { 2 | "docker" : { 3 | "port" : "2376", 4 | "options" : [ 5 | "--label", 6 | "foo=bar" 7 | ] 8 | }, 9 | "compose" : { 10 | "envdump" : { 11 | "image" : "busybox", 12 | "volumes": ["/test:/test"], 13 | "environment": ["COMPOSE_PROJECT_NAME", "SECRET_KEY"], 14 | "command": "/bin/sh -c 'env > /test/env.txt'" 15 | }, 16 | "web" : { 17 | "image" : "nginx", 18 | "restart" : "always", 19 | "ports" : ["80:80"] 20 | } 21 | }, 22 | "compose-environment": { 23 | "COMPOSE_PROJECT_NAME": "test" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /integration-test/gen_docker_certs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eou pipefail 3 | 4 | # Script to generate Docker certificates and keys for testing. These are 5 | # temporary certificates, and are good for one day only. 6 | # 7 | # See https://docs.docker.com/engine/security/https/ for deails. 8 | 9 | readonly DAYS=1 10 | readonly KEY_SIZE=2048 11 | readonly OUT=./dockercerts 12 | readonly EXT_OUT=./extensionconfig 13 | 14 | mkdir -p $OUT 15 | 16 | # 1. CA private and public keys 17 | openssl genrsa -out $OUT/ca-key.pem $KEY_SIZE 18 | openssl req -new -x509 -days $DAYS -key $OUT/ca-key.pem -sha256 -out $OUT/ca.pem -subj "/C=US/ST=Washington/L=Redmond/O=azure-docker-extension/CN=test-ca" 19 | 20 | # 2. Server 21 | # a. server key 22 | # b. certificate signing request 23 | # c. sign the request with our CA 24 | openssl genrsa -out $OUT/server-key.pem $KEY_SIZE 25 | openssl req -sha256 -new -key $OUT/server-key.pem -out $OUT/server.csr -subj "/CN=test-server" 26 | 27 | echo subjectAltName = IP:127.0.0.1 > $OUT/server-extfile.cnf 28 | openssl x509 -req -days $DAYS -sha256 -in $OUT/server.csr -CA $OUT/ca.pem -CAkey $OUT/ca-key.pem -CAcreateserial -out $OUT/server-cert.pem -extfile $OUT/server-extfile.cnf 29 | 30 | # 3. Client key, and certificate signing request 31 | # a. client key 32 | # b. certificate signing request 33 | # c. sign the request with our CA 34 | openssl genrsa -out $OUT/key.pem $KEY_SIZE 35 | openssl req -new -key $OUT/key.pem -out $OUT/client.csr -subj "/CN=test-client" 36 | 37 | echo extendedKeyUsage = clientAuth > $OUT/client-extfile.cnf 38 | openssl x509 -req -days $DAYS -sha256 -in $OUT/client.csr -CA $OUT/ca.pem -CAkey $OUT/ca-key.pem -CAcreateserial -out $OUT/cert.pem -extfile $OUT/client-extfile.cnf 39 | 40 | cat < $EXT_OUT/protected.json 41 | { 42 | "environment": { 43 | "SECRET_KEY": "SECRET_VALUE" 44 | }, 45 | "certs": { 46 | "ca": "$(base64 -w0 < $OUT/ca.pem)", 47 | "cert": "$(base64 -w0 < $OUT/server-cert.pem)", 48 | "key": "$(base64 -w0 < $OUT/server-key.pem)" 49 | } 50 | } 51 | EOF 52 | 53 | # 4. Cleanup 54 | # a. .cnf are no longer needed 55 | # b. server key, request, and configuration are no longer needed 56 | # c. client request, and configuration are no longer needed 57 | 58 | rm \ 59 | $OUT/ca-key.pem \ 60 | $OUT/server-cert.pem \ 61 | $OUT/server-key.pem \ 62 | $OUT/server.csr \ 63 | $OUT/server-extfile.cnf \ 64 | $OUT/client.csr \ 65 | $OUT/client-extfile.cnf \ 66 | 67 | -------------------------------------------------------------------------------- /integration-test/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eou pipefail 3 | IFS=$'\n\t' 4 | 5 | # these flighting values should match Makefile 6 | readonly TEST_SUBS_AZURE="c3dfd792-49a4-4b06-80fc-6fc6d06c4742" 7 | readonly TEST_REGION_AZURE="South Central US" 8 | 9 | readonly TEST_SUBS_AZURE_CHINA="cc1624c7-3f1d-4ed3-a855-668a86e96ad8" 10 | readonly TEST_REGION_AZURE_CHINA="China East" 11 | 12 | # make docker-cli send a lower version number so that we can 13 | # test old images (if client>newer, docker engine rejects the request) 14 | readonly DOCKER_REMOTE_API_VERSION=1.20 15 | 16 | # supported images (add/update them as new major versions come out) 17 | readonly DISTROS_AZURE=( 18 | "2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-1235.6.0" \ 19 | "2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-1298.1.0" \ 20 | "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_5-LTS-amd64-server-20170110-en-us-30GB" \ 21 | "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-16_04-LTS-amd64-server-20170113-en-us-30GB" \ 22 | "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-73-20161221" 23 | ) 24 | 25 | readonly DISTROS_AZURE_CHINA=( 26 | "a54f4e2924a249fd94ad761b77c3e83d__CoreOS-Alpha-1192.0.0" \ 27 | "a54f4e2924a249fd94ad761b77c3e83d__CoreOS-Stable-1122.2.0" \ 28 | "b549f4301d0b4295b8e76ceb65df47d4__Ubuntu-14_04_3-LTS-amd64-server-20160627-en-us-30GB" \ 29 | "b549f4301d0b4295b8e76ceb65df47d4__Ubuntu-16_04-LTS-amd64-server-20160627-en-us-30GB" \ 30 | "f1179221e23b4dbb89e39d70e5bc9e72__OpenLogic-CentOS-71-20160329" \ 31 | "f1179221e23b4dbb89e39d70e5bc9e72__OpenLogic-CentOS-72-20160617" 32 | ) 33 | 34 | # Test constants 35 | readonly SCRIPT_DIR=$(dirname $0) 36 | readonly CONCURRENCY=10 37 | readonly DOCKER_CERTS_DIR=dockercerts 38 | readonly VM_PREFIX=dockerextensiontest- 39 | readonly VM_USER=azureuser 40 | readonly EXTENSION_NAME=DockerExtension 41 | readonly EXTENSION_PUBLISHER=Microsoft.Azure.Extensions 42 | readonly EXTENSION_CONFIG_AZURE=extensionconfig/public.json 43 | readonly EXTENSION_CONFIG_AZURE_CHINA=extensionconfig/public-azurechina.json 44 | readonly EXTENSION_CONFIG_PROT=extensionconfig/protected.json 45 | 46 | # Global variables 47 | expected_extension_version= 48 | distros= 49 | busybox_image_name= 50 | extension_public_config= 51 | domain_name= 52 | test_subs= 53 | test_region= 54 | 55 | ### Functions 56 | 57 | log() { 58 | echo "[$(date +%T)][DEBUG]" "$@" 59 | } 60 | 61 | err() { 62 | echo >&2 "[$(date +%T)][ERROR]" "$@" 63 | } 64 | 65 | command_exists() { 66 | command -v "$@" > /dev/null 2>&1 67 | } 68 | 69 | check_deps() { 70 | local deps=(azure jq docker curl parallel) 71 | 72 | local cmd= 73 | for cmd in "${deps[@]}"; do 74 | command_exists $cmd || { err "$cmd not installed."; exit 1; } 75 | done 76 | } 77 | 78 | check_asm() { 79 | # capture "Current Mode: arm" from azure cmd output 80 | if [[ "$(azure)" != *"Current Mode: asm"* ]]; then 81 | cat <<- EOF 82 | azure CLI not in ASM mode (required for testing). Run: 83 | 84 | azure config mode asm 85 | 86 | NOTE: internal versions in PIR don't propogate to ARM stack until they're 87 | published to PROD globally, hence 'asm') 88 | EOF 89 | exit 1 90 | fi 91 | } 92 | 93 | intro() { 94 | cat <<- EOF 95 | $EXTENSION_NAME integration tests: 96 | 97 | Execution Plan 98 | ============== 99 | 1. Create Azure VMs in the first slice region (make sure 'make publish + make replicationstatus'). 100 | 2. Add VM extension with a config exercising features to the VMs. 101 | 3. Make sure the correct version has landed to the VMs. 102 | 4. Wait until the VMs reach the goal state provided by $EXTENSION_NAME. 103 | 5. Clean up VMs. 104 | 105 | Using test subscription: $test_subs 106 | Staging region to deploy VMs: $test_region 107 | 108 | EOF 109 | } 110 | 111 | set_subs() { 112 | log "Setting subscription to $test_subs..." 113 | azure account set $test_subs 1>/dev/null 114 | } 115 | 116 | try_cli() { 117 | log "Validating Azure CLI credentials" 118 | ( 119 | set -x 120 | azure network application-gateway list 121 | ) 122 | log "Azure CLI is authenticated" 123 | } 124 | 125 | ssh_key() { 126 | echo "$SCRIPT_DIR/id_rsa" 127 | } 128 | 129 | ssh_pub_key() { 130 | echo "$(ssh_key).pub" 131 | } 132 | 133 | generate_docker_certs() { 134 | ./gen_docker_certs.sh 135 | } 136 | 137 | generate_ssh_keys() { 138 | local key=$(ssh_key) 139 | local pub=$(ssh_pub_key) 140 | 141 | if [[ -f "$key" ]] && [[ -f "$pub" ]];then 142 | # no need to regenerate keys 143 | return 144 | fi 145 | 146 | log "Generating SSH keys..." 147 | rm -f "$key" "$pub" 148 | ssh-keygen -q -f "$key" -N "" 149 | log "SSH keys generated." 150 | } 151 | 152 | trim_publisher() { 153 | # trims before __ in image name to get rid of publisher GUID 154 | echo $1 | sed 's/.*__//g' 155 | } 156 | 157 | print_distros() { 158 | log "Distro images to be tested:" 159 | 160 | local d 161 | for d in "${distros[@]}"; do 162 | log " - $(trim_publisher $d)" 163 | done 164 | log "Total: ${#distros[@]} VM images." 165 | } 166 | 167 | vm_fqdn() { 168 | local name=$1 169 | echo "$name.$domain_name" 170 | } 171 | 172 | create_vms() { 173 | generate_ssh_keys 174 | print_distros 175 | 176 | local key=$(ssh_pub_key) 177 | local vm_count=${#distros[@]} 178 | local vm_names=$(parallel -j$CONCURRENCY echo $VM_PREFIX{} ::: $(seq 1 $vm_count)) 179 | 180 | 181 | log "Creating test VMs in parallel..." 182 | 183 | # Print commands to be executed, then execute them 184 | local cmd="azure vm create {1} {2} -e 22 -l '$test_region' --no-ssh-password --ssh-cert '$key' $VM_USER" 185 | parallel --dry-run -j$CONCURRENCY --xapply $cmd ::: ${vm_names[@]} ::: ${distros[@]} 186 | parallel -j$CONCURRENCY --xapply $cmd 1>/dev/null ::: ${vm_names[@]} ::: ${distros[@]} 187 | 188 | log "Opening up ports in parallel..." 189 | local ports=( 80 2376 ) 190 | for port in "${ports[@]}"; do # ports need to be added one by one for a single VM 191 | local cmd="azure vm endpoint create {1} $port $port" 192 | parallel --dry-run -j$CONCURRENCY $cmd ::: ${vm_names[@]} 193 | parallel -j$CONCURRENCY $cmd 1>/dev/null ::: ${vm_names[@]} 194 | done 195 | } 196 | 197 | get_vms() { 198 | local list_json=$(azure vm list --json) 199 | echo $list_json | jq -r '.[].VMName' | grep "^$VM_PREFIX" | sort -n 200 | } 201 | 202 | delete_vms() { 203 | log "Cleaning up test VMs in parallel..." 204 | 205 | local cmd="azure vm delete -b -q {}" 206 | local vms=$(get_vms) 207 | 208 | if [[ -z "$vms" ]]; then 209 | return 210 | fi 211 | 212 | # Print commands to be executed, then execute them 213 | parallel --dry-run -j$CONCURRENCY "$cmd" ::: "${vms[@]}" 214 | parallel -j$CONCURRENCY "$cmd" 1>/dev/null ::: "${vms[@]}" 215 | 216 | log "Cleaned up all test VMs." 217 | } 218 | 219 | parse_minor_version() { 220 | # matches to major.minor in major.minor[.patch[.hotfix]] 221 | local v=$1 222 | echo $v | grep -Po "\d+\.[\d]+" | head -1 223 | } 224 | 225 | add_extension_to_vms() { 226 | local pub_config="$SCRIPT_DIR/$extension_public_config" 227 | local prot_config="$SCRIPT_DIR/$EXTENSION_CONFIG_PROT" 228 | 229 | # To use internal version, MAJOR.MINOR must be specified; not '*' or 'MAJOR.*' 230 | local minor_version=$(parse_minor_version $expected_extension_ver) 231 | log "Adding extension v${minor_version} to VMs in parallel..." 232 | 233 | local cmd="azure vm extension set {} $EXTENSION_NAME $EXTENSION_PUBLISHER '$minor_version' --public-config-path '$pub_config' --private-config-path '$prot_config'" 234 | local vms=$(get_vms) 235 | 236 | # Print commands to be executed, then execute them 237 | parallel --dry-run -j$CONCURRENCY "$cmd" ::: "${vms[@]}" 238 | parallel -j$CONCURRENCY "$cmd" 1>/dev/null ::: "${vms[@]}" 239 | 240 | log "Added $EXTENSION_NAME to all test VMs." 241 | } 242 | 243 | docker_addr() { 244 | local fqdn=$1 245 | echo "tcp://$fqdn:2376" 246 | } 247 | 248 | docker_cert_path() { 249 | echo "$SCRIPT_DIR/$DOCKER_CERTS_DIR" 250 | } 251 | 252 | docker_cli_env() { 253 | local fqdn=$1 254 | echo "DOCKER_CERT_PATH=\"$(docker_cert_path)\" DOCKER_HOST=\"$(docker_addr $fqdn)\" DOCKER_API_VERSION=$DOCKER_REMOTE_API_VERSION" 255 | } 256 | 257 | wait_for_docker() { 258 | local host=$1 259 | local addr=$(docker_addr $host) 260 | 261 | local docker_certs="$(docker_cert_path)" 262 | 263 | # Validate "docker info" works 264 | local docker_env=$(docker_cli_env $host) 265 | local docker_cmd="docker --tls info" 266 | log "Waiting for Docker engine on $addr..." 267 | echo "+ $docker_env $docker_cmd" 268 | 269 | while true; do 270 | set +e # ignore errors b/c the following command will retry 271 | eval $docker_env $docker_cmd 1>/dev/null 2>&1 272 | local exit_code=$? 273 | set -e 274 | 275 | if [ $exit_code -ne 0 ]; then 276 | printf '.' 277 | sleep 5 278 | else 279 | log "Authenticated to docker engine at $addr." 280 | # Check if docker.options in public.json took effect 281 | local docker_info_out="$(eval $docker_env $docker_cmd 2>&1)" 282 | log "$docker_info_out" 283 | if [[ "$docker_info_out" != *"foo=bar"* ]]; then 284 | err "Docker engine label (foo=bar) specified in extension configuration did not take effect." 285 | log "docker info output:" 286 | log "$docker_info_out" 287 | exit 1 288 | fi 289 | log "Docker configuration took effect." 290 | return 291 | fi 292 | done 293 | } 294 | 295 | wait_for_container() { 296 | local host=$1 297 | local addr="http://$1:80/" 298 | 299 | log "Waiting for web container on $addr..." 300 | local curl_cmd="curl -sILfo/dev/null $addr" 301 | echo "+ $curl_cmd" 302 | 303 | while true; do 304 | set +e # ignore errors b/c the following command will retry 305 | eval $curl_cmd 2>&1 1>/dev/null 306 | local exit_code=$? 307 | set -e 308 | 309 | if [ $exit_code -eq 0 ]; then 310 | log "Web server container is up." 311 | return 312 | fi 313 | printf '.' 314 | sleep 5 315 | done 316 | } 317 | 318 | validate_extension_version() { 319 | local fqdn=$1 320 | log "Validating extension version on VM." 321 | 322 | # Search for file Microsoft.Azure.Extensions.DockerExtension-{version} 323 | local prefix="${EXTENSION_PUBLISHER}.${EXTENSION_NAME}-" 324 | 325 | # Find out what version of extension is installed by running 326 | # a Docker container with /var/lib/waagent mounted 327 | local docker_env=$(docker_cli_env $fqdn) 328 | local docker_cmd="docker --tls run --rm -i -v /var/lib/waagent:/agent ${busybox_image_name} ls -1 /agent | grep '^$prefix'" 329 | 330 | echo "+ $docker_env $docker_cmd" 331 | local version="$(eval $docker_env $docker_cmd 2>/dev/null | sed "s/^$prefix//g")" 332 | if [[ -z "$version" ]]; then 333 | err "Could not locate $EXTENSION_NAME version." 334 | exit 1 335 | fi 336 | 337 | if [[ "$version" != "$expected_extension_ver" ]]; then 338 | err "Wrong $EXTENSION_NAME encountered: '$version' (expected: '$expected_extension_ver')." 339 | exit 1 340 | fi 341 | log "VM has the correct version of $EXTENSION_NAME." 342 | } 343 | 344 | validate_env() { 345 | local fqdn=$1 346 | local env_key=$2 347 | local env_val=$3 348 | 349 | local docker_env=$(docker_cli_env $fqdn) 350 | local docker_cmd="docker --tls run --rm -i -v /test:/test ${busybox_image_name} cat /test/env.txt" 351 | 352 | log "Validating environment variable '$env_key'." 353 | echo "+ $docker_env $docker_cmd" 354 | local i=0 355 | while true; do 356 | set +e 357 | local output="$(eval $docker_env $docker_cmd 2>&1)" 358 | set -e 359 | if [[ "$output" == *"$env_key=$env_val"* ]]; then 360 | log "Environment variable $env_val found in environment." 361 | return 362 | elif [[ $i -gt 5 ]]; then 363 | log "Environment file served does not contain env key: '$env_val':" 364 | echo "$output" 365 | exit 1 366 | fi 367 | i=$((i+1)) 368 | printf '.' 369 | sleep 5 370 | done 371 | } 372 | 373 | get_container_names() { 374 | local fqdn=$1 375 | 376 | local docker_env=$(docker_cli_env $fqdn) 377 | local docker_cmd="docker --tls ps -a --format '{{.Names}}'" 378 | 379 | echo "$(eval $docker_env $docker_cmd 2>&1)" 380 | } 381 | 382 | validate_container_prefixes() { 383 | local fqdn=$1 384 | local prefix=$2 385 | 386 | local out=$(get_container_names $fqdn | grep -v "^${prefix}_") 387 | if [[ -n "$out" ]]; then 388 | log "DOCKER_COMPOSE_PROJECT setting is not effective." 389 | log " Found containers without preconfigured prefix: $out" 390 | exit 1 391 | fi 392 | log "docker-compose container prefixes are correct." 393 | } 394 | 395 | vm_ssh_cmd() { 396 | local fqdn=$1 397 | echo "ssh -o \"StrictHostKeyChecking no\" -i '$(ssh_key)' ${VM_USER}@${fqdn}" 398 | } 399 | 400 | validate_vm() { 401 | local name=$1 402 | local fqdn=$(vm_fqdn $name) 403 | 404 | log "Validating $EXTENSION_NAME on VM '$name'" 405 | log " (To debug issues: $(echo $(vm_ssh_cmd $fqdn)))" 406 | wait_for_docker $fqdn 407 | validate_extension_version $fqdn 408 | wait_for_container $fqdn 409 | validate_env $fqdn "SECRET_KEY" "SECRET_VALUE" 410 | validate_env $fqdn "COMPOSE_PROJECT_NAME" "test" 411 | validate_container_prefixes $fqdn "test" 412 | 413 | log "VM is O.K.: $name." 414 | echo 415 | } 416 | 417 | validate_vms() { 418 | log "Validating VMs..." 419 | local vms=$(get_vms) 420 | for vm in $vms; do 421 | validate_vm "$vm" 422 | done 423 | } 424 | 425 | read_version() { 426 | read -p "Expected $EXTENSION_NAME version in VMs (e.g. 1.2.2): " expected_extension_ver 427 | if [[ -z "$expected_extension_ver" ]]; then 428 | err "Empty string passed" 429 | exit 1 430 | fi 431 | } 432 | 433 | read_environment() { 434 | read -p "Enter the test environment name (e.g. AzureCloud, AzureChinaCloud. The default is AzureCloud): " test_environment 435 | case "$test_environment" in 436 | "" | "AzureCloud") 437 | distros=( "${DISTROS_AZURE[@]}" ) 438 | extension_public_config=$EXTENSION_CONFIG_AZURE 439 | busybox_image_name="busybox" 440 | domain_name="cloudapp.net" 441 | test_subs=$TEST_SUBS_AZURE 442 | test_region=$TEST_REGION_AZURE 443 | ;; 444 | "AzureChinaCloud") 445 | distros=( "${DISTROS_AZURE_CHINA[@]}" ) 446 | extension_public_config=$EXTENSION_CONFIG_AZURE_CHINA 447 | busybox_image_name="mirror.azure.cn:5000/library/busybox" 448 | domain_name="chinacloudapp.cn" 449 | test_subs=$TEST_SUBS_AZURE_CHINA 450 | test_region=$TEST_REGION_AZURE_CHINA 451 | ;; 452 | *) 453 | err "Invalid environment name" 454 | exit 1 455 | esac 456 | } 457 | 458 | check_deps 459 | intro 460 | read_version 461 | read_environment 462 | check_asm 463 | set_subs 464 | try_cli 465 | 466 | delete_vms 467 | generate_docker_certs 468 | create_vms 469 | add_extension_to_vms 470 | validate_vms 471 | 472 | log "Test run is successful!" 473 | log "Cleaning up test artifacts..." 474 | delete_vms 475 | log "Success." 476 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | lg "log" 7 | "os" 8 | "os/user" 9 | "path/filepath" 10 | "strings" 11 | 12 | "github.com/Azure/azure-docker-extension/pkg/distro" 13 | "github.com/Azure/azure-docker-extension/pkg/driver" 14 | "github.com/Azure/azure-docker-extension/pkg/executil" 15 | "github.com/Azure/azure-docker-extension/pkg/seqnumfile" 16 | "github.com/Azure/azure-docker-extension/pkg/vmextension" 17 | "github.com/Azure/azure-docker-extension/pkg/vmextension/status" 18 | ) 19 | 20 | const ( 21 | LogFilename = "docker-extension.log" 22 | ) 23 | 24 | var ( 25 | log *lg.Logger 26 | handlerEnv vmextension.HandlerEnvironment 27 | seqNum = -1 28 | out io.Writer 29 | ) 30 | 31 | func init() { 32 | // Read extension handler environment 33 | var err error 34 | handlerEnv, err = vmextension.GetHandlerEnv() 35 | if err != nil { 36 | lg.Fatalf("ERROR: Cannot load handler environment: %v", err) 37 | } 38 | seqNum, err = vmextension.FindSeqNum(handlerEnv.HandlerEnvironment.ConfigFolder) 39 | if err != nil { 40 | lg.Fatalf("ERROR: cannot find seqnum: %v", err) 41 | } 42 | 43 | // Update logger to write to logfile 44 | ld := handlerEnv.HandlerEnvironment.LogFolder 45 | if err := os.MkdirAll(ld, 0644); err != nil { 46 | lg.Fatalf("ERROR: Cannot create log folder %s: %v", ld, err) 47 | } 48 | lf, err := os.OpenFile(filepath.Join(ld, LogFilename), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) 49 | if err != nil { 50 | lg.Fatalf("ERROR: Cannot open log file: %v", err) 51 | } 52 | out = io.MultiWriter(os.Stderr, lf) 53 | log = lg.New(out, "[DockerExtension] ", lg.LstdFlags) 54 | executil.SetOut(out) 55 | } 56 | 57 | func main() { 58 | log.Printf(strings.Repeat("-", 40)) 59 | log.Printf("Extension handler launch args: %#v", strings.Join(os.Args, " ")) 60 | if len(os.Args) <= 1 { 61 | ops := []string{} 62 | for k, _ := range operations { 63 | ops = append(ops, k) 64 | } 65 | log.Fatalf("ERROR: No arguments supplied, valid arguments: '%s'.", strings.Join(ops, "', '")) 66 | } 67 | opStr := os.Args[1] 68 | op, ok := operations[opStr] 69 | if !ok { 70 | log.Fatalf("ERROR: Invalid operation provided: '%s'", opStr) 71 | } 72 | log.Printf("seqnum: %d", seqNum) 73 | 74 | // seqnum check: waagent invokes enable twice with the same seqnum, so exit the process 75 | // started later. Refuse proceeding if seqNum is smaller or the same than the one running. 76 | if seqExists, seq, err := seqnumfile.Get(); err != nil { 77 | log.Fatalf("ERROR: seqnumfile could not be read: %v", err) 78 | } else if seqExists { 79 | if seq == seqNum { 80 | log.Printf("WARNING: Another instance of the extension handler with the same seqnum (=%d) is currently active according to .seqnum file.", seq) 81 | log.Println("Exiting gracefully with exitcode 0, not reporting to .status file.") 82 | os.Exit(0) 83 | } else if seq > seqNum { 84 | log.Printf("WARNING: Another instance of the extension handler with a higher seqnum (%d > %d) is currently active according to .seqnum file. The smaller seqnum will not proceed.", seq, seqNum) 85 | log.Println("Exiting gracefully with exitcode 0, not reporting to .status file.") 86 | os.Exit(0) 87 | } 88 | } 89 | 90 | // create .seqnum file 91 | if err := seqnumfile.Set(seqNum); err != nil { 92 | log.Fatalf("Error seting seqnum file: %v", err) 93 | } 94 | 95 | var fail = func(format string, args ...interface{}) { 96 | logFail(op, fmt.Sprintf(format, args...)) 97 | } 98 | 99 | // Report status as in progress 100 | if err := reportStatus(status.StatusTransitioning, op, ""); err != nil { 101 | log.Printf("Error reporting extension status: %v", err) 102 | } 103 | 104 | d, err := distro.GetDistro() 105 | if err != nil { 106 | fail("ERROR: Cannot get distro info: %v", err) 107 | } 108 | log.Printf("distro info: %s", d) 109 | dd, err := driver.GetDriver(d) 110 | if err != nil { 111 | fail("ERROR: %v", err) 112 | } 113 | log.Printf("using distro driver: %T", dd) 114 | 115 | if u, err := user.Current(); err != nil { 116 | log.Printf("Failed to get current user: %v", err) 117 | } else { 118 | log.Printf("user: %s uid:%v gid:%v", u.Username, u.Uid, u.Gid) 119 | } 120 | log.Printf("env['PATH'] = %s", os.Getenv("PATH")) 121 | 122 | log.Printf("+ starting: '%s'", opStr) 123 | if err = op.f(handlerEnv, dd); err != nil { 124 | fail("ERROR: %v", err) 125 | } 126 | log.Printf("- completed: '%s'", opStr) 127 | reportStatus(status.StatusSuccess, op, "") 128 | 129 | // clear .seqnum file 130 | if err := seqnumfile.Delete(); err != nil { 131 | log.Printf("WARNING: Error deleting seqnumfile: %v", err) 132 | } 133 | log.Printf("Cleaned up .seqnum file.") 134 | } 135 | 136 | // reportStatus saves operation status to the status file for the extension. 137 | func reportStatus(t status.Type, op Op, msg string) error { 138 | if !op.reportsStatus { 139 | log.Printf("Status '%s' not reported for operation '%v' (by design)", t, op.name) 140 | return nil 141 | } 142 | dir := handlerEnv.HandlerEnvironment.StatusFolder 143 | m := msg 144 | if m == "" { 145 | m = op.name 146 | if t == status.StatusSuccess { 147 | m += " succeeded" 148 | } 149 | } 150 | if t == status.StatusError { 151 | m = fmt.Sprintf("%s failed: %s", op.name, m) 152 | } 153 | s := status.NewStatus(t, op.name, m) 154 | return s.Save(dir, seqNum) 155 | } 156 | 157 | // logFail prints the failure, reports failure status and exits 158 | func logFail(op Op, msg string) { 159 | log.Printf(msg) 160 | if err := reportStatus(status.StatusError, op, msg); err != nil { 161 | log.Printf("Error reporting extension status: %v", err) 162 | } 163 | if err := seqnumfile.Delete(); err != nil { 164 | log.Printf("WARNING: Error deleting seqnumfile: %v", err) 165 | } 166 | log.Println("Cleaned up .seqnum file.") 167 | log.Println("Exiting with code 1.") 168 | os.Exit(1) 169 | } 170 | -------------------------------------------------------------------------------- /metadata/HandlerManifest.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "version": 1.0, 3 | "handlerManifest": { 4 | "installCommand": "bin/docker-extension install", 5 | "enableCommand": "scripts/run-in-background.sh enable", 6 | "uninstallCommand": "bin/docker-extension uninstall", 7 | "updateCommand": "bin/docker-extension update", 8 | "disableCommand": "bin/docker-extension disable", 9 | "rebootAfterInstall": false, 10 | "reportHeartbeat": false, 11 | "updateMode": "UpdateWithoutInstall" 12 | } 13 | }] 14 | -------------------------------------------------------------------------------- /metadata/manifest.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | Microsoft.Azure.Extensions 7 | DockerExtension 8 | 1.2.2 9 | 10 | VmRole 11 | 12 | Microsoft Azure Docker Extension for Linux Virtual Machines 13 | true 14 | https://github.com/Azure/azure-docker-extension/blob/master/LICENSE 15 | http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx 16 | https://github.com/Azure/azure-docker-extension 17 | true 18 | Linux 19 | Microsoft 20 | 21 | 22 | -------------------------------------------------------------------------------- /op-disable.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/driver" 5 | "github.com/Azure/azure-docker-extension/pkg/vmextension" 6 | ) 7 | 8 | func disable(he vmextension.HandlerEnvironment, d driver.DistroDriver) error { 9 | log.Printf("++ stop docker daemon") 10 | if err := d.StopDocker(); err != nil { 11 | return err 12 | } 13 | log.Printf("-- stop docker daemon") 14 | return nil 15 | } 16 | -------------------------------------------------------------------------------- /op-enable.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/base64" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "net/http" 10 | "os" 11 | "os/exec" 12 | "path/filepath" 13 | "strings" 14 | "time" 15 | 16 | "github.com/Azure/azure-docker-extension/pkg/driver" 17 | "github.com/Azure/azure-docker-extension/pkg/executil" 18 | "github.com/Azure/azure-docker-extension/pkg/util" 19 | "github.com/Azure/azure-docker-extension/pkg/vmextension" 20 | 21 | yaml "github.com/cloudfoundry-incubator/candiedyaml" 22 | ) 23 | 24 | const ( 25 | composeUrlGlobal = "https://github.com/docker/compose/releases/download/1.6.2/docker-compose-Linux-x86_64" 26 | composeUrlAzureChina = "https://mirror.azure.cn/docker-toolbox/linux/compose/1.6.2/docker-compose-Linux-x86_64" 27 | composeBin = "docker-compose" 28 | composeTimeoutSecs = 600 29 | 30 | composeYml = "docker-compose.yml" 31 | composeYmlDir = "/etc/docker/compose" 32 | composeProject = "compose" // prefix for compose-created containers 33 | 34 | dockerCfgDir = "/etc/docker" 35 | dockerCaCert = "ca.pem" 36 | dockerSrvCert = "cert.pem" 37 | dockerSrvKey = "key.pem" 38 | ) 39 | 40 | func enable(he vmextension.HandlerEnvironment, d driver.DistroDriver) error { 41 | settings, err := parseSettings(he.HandlerEnvironment.ConfigFolder) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | dockerInstallCmd := "" 47 | composeUrl := "" 48 | switch settings.AzureEnv { 49 | case "AzureChinaCloud": 50 | dockerInstallCmd = "curl -sSL https://mirror.azure.cn/repo/install-docker-engine.sh | sh -s -- --mirror AzureChinaCloud" 51 | composeUrl = composeUrlAzureChina 52 | case "AzureCloud", "": 53 | dockerInstallCmd = "curl -sSL https://get.docker.com/ | sh" 54 | composeUrl = composeUrlGlobal 55 | default: 56 | return fmt.Errorf("invalid environment name: %s", settings.AzureEnv) 57 | } 58 | 59 | // Install docker daemon 60 | log.Printf("++ install docker") 61 | if _, err := exec.LookPath("docker"); err == nil { 62 | log.Printf("docker already installed. not re-installing") 63 | } else { 64 | // TODO(ahmetb) Temporary retry logic around installation for serialization 65 | // problem in Azure VM Scale Sets. In case of scale-up event, the new VM with 66 | // multiple extensions (such as Linux Diagnostics and Docker Extension) will install 67 | // the extensions in parallel and that will result in non-deterministic 68 | // acquisition of dpkg lock (apt-get install) and thus causing one of the 69 | // extensions to fail. 70 | // 71 | // Adding this temporary retry logic just for Linux Diagnostics extension 72 | // assuming it will take at most 5 minutes to be done with apt-get lock. 73 | // 74 | // This retry logic should be removed once the issue is fixed on the resource 75 | // provider layer. 76 | 77 | var ( 78 | nRetries = 6 79 | retryInterval = time.Minute * 1 80 | ) 81 | 82 | for nRetries > 0 { 83 | if err := d.InstallDocker(dockerInstallCmd); err != nil { 84 | nRetries-- 85 | if nRetries == 0 { 86 | return err 87 | } 88 | log.Printf("install failed. remaining attempts=%d. error=%v", nRetries, err) 89 | log.Printf("sleeping %s", retryInterval) 90 | time.Sleep(retryInterval) 91 | } else { 92 | break 93 | } 94 | } 95 | } 96 | log.Printf("-- install docker") 97 | 98 | // Install docker-compose 99 | log.Printf("++ install docker-compose") 100 | if err := installCompose(composeBinPath(d), composeUrl); err != nil { 101 | return fmt.Errorf("error installing docker-compose: %v", err) 102 | } 103 | log.Printf("-- install docker-compose") 104 | 105 | // Add user to 'docker' group to user docker as non-root 106 | u, err := util.GetAzureUser() 107 | if err != nil { 108 | return fmt.Errorf("failed to get provisioned user: %v", err) 109 | } 110 | log.Printf("++ add user to docker group") 111 | if out, err := executil.Exec("usermod", "-aG", "docker", u); err != nil { 112 | log.Printf("%s", string(out)) 113 | return err 114 | } 115 | log.Printf("-- add user to docker group") 116 | 117 | // Install docker remote access certs 118 | log.Printf("++ setup docker certs") 119 | if err := installDockerCerts(*settings, dockerCfgDir); err != nil { 120 | return fmt.Errorf("error installing docker certs: %v", err) 121 | } 122 | log.Printf("-- setup docker certs") 123 | 124 | // Update dockeropts 125 | log.Printf("++ update dockeropts") 126 | restartNeeded, err := updateDockerOpts(d, getArgs(*settings, d)) 127 | if err != nil { 128 | return fmt.Errorf("failed to update dockeropts: %v", err) 129 | } 130 | log.Printf("restart needed: %v", restartNeeded) 131 | log.Printf("-- update dockeropts") 132 | 133 | // Restart docker 134 | log.Printf("++ restart docker") 135 | if !restartNeeded { 136 | log.Printf("no restart needed. issuing only a start command.") 137 | _ = d.StartDocker() // ignore error as it already may be running due to multiple calls to enable 138 | } else { 139 | log.Printf("restarting docker-engine") 140 | if err := d.RestartDocker(); err != nil { 141 | return err 142 | } 143 | } 144 | time.Sleep(3 * time.Second) // wait for instance to come up 145 | log.Printf("-- restart docker") 146 | 147 | // Login Docker registry server 148 | log.Printf("++ login docker registry") 149 | if err := loginRegistry(settings.Login); err != nil { 150 | return err 151 | } 152 | log.Printf("-- login docker registry") 153 | 154 | // Compose Up 155 | log.Printf("++ compose up") 156 | if err := composeUp(d, settings.ComposeJson, settings.ComposeEnv, settings.ComposeProtectedEnv); err != nil { 157 | return fmt.Errorf("'docker-compose up' failed: %v. Check logs at %s.", err, filepath.Join(he.HandlerEnvironment.LogFolder, LogFilename)) 158 | } 159 | log.Printf("-- compose up") 160 | return nil 161 | } 162 | 163 | // installCompose download docker-compose from given url and saves to the specified path if it 164 | // is not already installed. 165 | func installCompose(path string, url string) error { 166 | // Check if already installed at path. 167 | if ok, err := util.PathExists(path); err != nil { 168 | return err 169 | } else if ok { 170 | log.Printf("docker-compose is already installed at %s", path) 171 | return nil 172 | } 173 | 174 | // Create dir if not exists 175 | dir := filepath.Dir(path) 176 | ok, err := util.PathExists(dir) 177 | if err != nil { 178 | return err 179 | } else if !ok { 180 | if err := os.MkdirAll(dir, 755); err != nil { 181 | return err 182 | } 183 | } 184 | 185 | log.Printf("Downloading compose from %s", url) 186 | resp, err := http.Get(url) 187 | if err != nil { 188 | return fmt.Errorf("error downloading docker-compose: %v", err) 189 | } 190 | if resp.StatusCode/100 != 2 { 191 | return fmt.Errorf("response status code from %s: %s", url, resp.Status) 192 | } 193 | defer resp.Body.Close() 194 | 195 | f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0777) 196 | if err != nil { 197 | return fmt.Errorf("error creating %s: %v", path, err) 198 | } 199 | 200 | defer f.Close() 201 | if _, err := io.Copy(f, resp.Body); err != nil { 202 | return fmt.Errorf("failed to save response body to %s: %v", path, err) 203 | } 204 | return nil 205 | } 206 | 207 | // loginRegistry calls the `docker login` command to authenticate the engine to the 208 | // specified registry with given credentials. 209 | func loginRegistry(s dockerLoginSettings) error { 210 | if !s.HasLoginInfo() { 211 | log.Println("registry login not specificied") 212 | return nil 213 | } 214 | opts := []string{ 215 | "login", 216 | "--email=" + s.Email, 217 | "--username=" + s.Username, 218 | "--password=" + s.Password, 219 | } 220 | if s.Server != "" { 221 | opts = append(opts, s.Server) 222 | } 223 | _, err := executil.Exec("docker", opts...) 224 | if err != nil { 225 | return errors.New("'docker login' failed") 226 | } 227 | return nil 228 | } 229 | 230 | // composeBinPath returns the path docker-compose binary should be installed at 231 | // on the host operating system. 232 | func composeBinPath(d driver.DistroDriver) string { 233 | return filepath.Join(d.DockerComposeDir(), composeBin) 234 | } 235 | 236 | // composeUp converts given json to yaml, saves to a file on the host and 237 | // uses `docker-compose up -d` to create the containers. 238 | func composeUp(d driver.DistroDriver, json map[string]interface{}, publicEnv, protectedEnv map[string]string) error { 239 | if len(json) == 0 { 240 | log.Println("docker-compose config not specified, noop") 241 | return nil 242 | } 243 | 244 | // Convert json to yaml 245 | yaml, err := yaml.Marshal(json) 246 | if err != nil { 247 | return fmt.Errorf("error converting to compose.yml: %v", err) 248 | } 249 | 250 | if err := os.MkdirAll(composeYmlDir, 0777); err != nil { 251 | return fmt.Errorf("failed creating %s: %v", composeYmlDir, err) 252 | } 253 | log.Printf("Using compose yaml:>>>>>\n%s\n<<<<<", string(yaml)) 254 | ymlPath := filepath.Join(composeYmlDir, composeYml) 255 | if err := ioutil.WriteFile(ymlPath, yaml, 0666); err != nil { 256 | return fmt.Errorf("error writing %s: %v", ymlPath, err) 257 | } 258 | 259 | if publicEnv == nil { 260 | publicEnv = make(map[string]string) 261 | } 262 | 263 | // set timeout for docker-compose -> docker-engine interactions. 264 | // When downloading large images, docker-compose intermittently times out 265 | // (gh#docker/compose/issues/2186) (gh#Azure/azure-docker-extension/issues/87). 266 | if _, ok := publicEnv["COMPOSE_HTTP_TIMEOUT"]; !ok { 267 | publicEnv["COMPOSE_HTTP_TIMEOUT"] = fmt.Sprintf("%d", composeTimeoutSecs) 268 | } 269 | 270 | // provide a consistent default project name for docker-compose. this is to prevent 271 | // inconsistencies that may occur when we change where docker-compose.yml lives. 272 | if _, ok := publicEnv["COMPOSE_PROJECT_NAME"]; !ok { 273 | publicEnv["COMPOSE_PROJECT_NAME"] = composeProject 274 | } 275 | 276 | // set public environment variables to be used in docker-compose 277 | for k, v := range publicEnv { 278 | log.Printf("Setting docker-compose environment variable %q=%q.", k, v) 279 | os.Setenv(k, v) 280 | defer os.Unsetenv(k) 281 | } 282 | 283 | // set protected environment variables to be used in docker-compose 284 | for k, v := range protectedEnv { 285 | log.Printf("Setting protected docker-compose environment variable %q.", k) 286 | os.Setenv(k, v) 287 | defer os.Unsetenv(k) 288 | } 289 | 290 | return executil.ExecPipeToFds(executil.Fds{Out: ioutil.Discard}, composeBinPath(d), "-f", ymlPath, "up", "-d") 291 | } 292 | 293 | // installDockerCerts saves the configured certs to the specified dir 294 | // if and only if the certs are not already placed there. If no certs 295 | // are provided or some certs already exist, nothing is written. 296 | func installDockerCerts(s DockerHandlerSettings, dstDir string) error { 297 | m := []struct { 298 | src string 299 | dst string 300 | }{ 301 | {s.Certs.CABase64, filepath.Join(dstDir, dockerCaCert)}, 302 | {s.Certs.ServerCertBase64, filepath.Join(dstDir, dockerSrvCert)}, 303 | {s.Certs.ServerKeyBase64, filepath.Join(dstDir, dockerSrvKey)}, 304 | } 305 | 306 | // Check if certs are provided 307 | for _, v := range m { 308 | if len(v.src) == 0 { 309 | log.Printf("Docker certificate %s is not provided in the extension settings, skipping docker certs installation", v.dst) 310 | return nil 311 | } 312 | } 313 | 314 | // Check the target directory, if not create 315 | if ok, err := util.PathExists(dstDir); err != nil { 316 | return fmt.Errorf("error checking cert dir: %v", err) 317 | } else if !ok { 318 | if err := os.MkdirAll(dstDir, 0755); err != nil { 319 | return err 320 | } 321 | } 322 | 323 | // Write the certs 324 | for _, v := range m { 325 | // Decode base64 326 | in := strings.TrimSpace(v.src) 327 | f, err := base64.StdEncoding.DecodeString(in) 328 | if err != nil { 329 | // Fallback to original file input 330 | f = []byte(in) 331 | } 332 | 333 | if err := ioutil.WriteFile(v.dst, f, 0600); err != nil { 334 | return fmt.Errorf("error writing certificate: %v", err) 335 | } 336 | } 337 | return nil 338 | } 339 | 340 | func updateDockerOpts(dd driver.DistroDriver, args string) (bool, error) { 341 | log.Printf("Updating daemon args to: %s", args) 342 | restartNeeded, err := dd.UpdateDockerArgs(args) 343 | if err != nil { 344 | return false, fmt.Errorf("error updating DOCKER_OPTS: %v", err) 345 | } 346 | return restartNeeded, nil 347 | } 348 | 349 | // getArgs provides set of arguments that should be used in updating Docker 350 | // daemon options based on the distro. 351 | func getArgs(s DockerHandlerSettings, dd driver.DistroDriver) string { 352 | args := dd.BaseOpts() 353 | 354 | if s.Certs.HasDockerCerts() { 355 | tls := []string{"--tlsverify", 356 | fmt.Sprintf("--tlscacert=%s", filepath.Join(dockerCfgDir, dockerCaCert)), 357 | fmt.Sprintf("--tlscert=%s", filepath.Join(dockerCfgDir, dockerSrvCert)), 358 | fmt.Sprintf("--tlskey=%s", filepath.Join(dockerCfgDir, dockerSrvKey)), 359 | } 360 | args = append(args, tls...) 361 | } 362 | 363 | if s.Docker.Port != "" { 364 | args = append(args, fmt.Sprintf("-H=0.0.0.0:%s", s.Docker.Port)) 365 | } 366 | 367 | if len(s.Docker.Options) > 0 { 368 | args = append(args, s.Docker.Options...) 369 | } 370 | 371 | return strings.Join(args, " ") 372 | } 373 | -------------------------------------------------------------------------------- /op-enable_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func Test_composeYaml(t *testing.T) { 8 | var m = map[string]interface{}{ 9 | "db": map[string]interface{}{ 10 | "image": "postgres"}, 11 | "web": map[string]interface{}{ 12 | "image": "myweb", 13 | "links": []interface{}{"db"}, 14 | "ports": []interface{}{"8000:8000"}}} 15 | 16 | expected := `db: 17 | image: postgres 18 | web: 19 | image: myweb 20 | links: 21 | - db 22 | ports: 23 | - 8000:8000 24 | ` 25 | 26 | yaml, err := composeYaml(m) 27 | if err != nil { 28 | t.Fatal(err) 29 | } 30 | if yaml != expected { 31 | t.Fatalf("got wrong yaml: '%s'\nexpected: '%s'", yaml, expected) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /op-install.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/driver" 5 | "github.com/Azure/azure-docker-extension/pkg/vmextension" 6 | ) 7 | 8 | func install(he vmextension.HandlerEnvironment, d driver.DistroDriver) error { 9 | log.Printf("installing is deferred to the enable step to avoid timeouts.") 10 | return nil 11 | } 12 | -------------------------------------------------------------------------------- /op-uninstall.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/Azure/azure-docker-extension/pkg/driver" 7 | "github.com/Azure/azure-docker-extension/pkg/vmextension" 8 | ) 9 | 10 | func uninstall(he vmextension.HandlerEnvironment, d driver.DistroDriver) error { 11 | log.Println("++ uninstall docker") 12 | if err := d.UninstallDocker(); err != nil { 13 | return err 14 | } 15 | log.Println("-- uninstall docker") 16 | 17 | log.Println("++ uninstall docker-compose") 18 | if err := uninstallDockerCompose(d); err != nil { 19 | return err 20 | } 21 | log.Println("++ uninstall docker-compose") 22 | return nil 23 | } 24 | 25 | func uninstallDockerCompose(d driver.DistroDriver) error { 26 | return os.RemoveAll(composeBinPath(d)) 27 | } 28 | -------------------------------------------------------------------------------- /op-update.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/driver" 5 | "github.com/Azure/azure-docker-extension/pkg/vmextension" 6 | ) 7 | 8 | func update(he vmextension.HandlerEnvironment, d driver.DistroDriver) error { 9 | log.Println("updating docker not implemented") 10 | return nil 11 | } 12 | -------------------------------------------------------------------------------- /op.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/driver" 5 | "github.com/Azure/azure-docker-extension/pkg/vmextension" 6 | ) 7 | 8 | type OperationFunc func(vmextension.HandlerEnvironment, driver.DistroDriver) error 9 | 10 | type Op struct { 11 | f OperationFunc 12 | name string 13 | reportsStatus bool // determines if op should log to .status file 14 | } 15 | 16 | var operations = map[string]Op{ 17 | "install": Op{install, "Install Docker", false}, 18 | "uninstall": Op{uninstall, "Uninstall Docker", false}, 19 | "enable": Op{enable, "Enable Docker", true}, 20 | "update": Op{update, "Updating Docker", true}, 21 | "disable": Op{disable, "Disabling Docker", true}, 22 | } 23 | -------------------------------------------------------------------------------- /pkg/distro/distro.go: -------------------------------------------------------------------------------- 1 | package distro 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "regexp" 8 | 9 | "github.com/Azure/azure-docker-extension/pkg/util" 10 | ) 11 | 12 | const ( 13 | lsbReleasePath = "/etc/lsb-release" 14 | redhatReleasePath = "/etc/redhat-release" 15 | centosReleasePath = "/etc/centos-release" 16 | 17 | RhelID = "Red Hat Enterprise Linux Server" 18 | CentosID = "CentOS" 19 | ) 20 | 21 | type Info struct{ Id, Release string } 22 | 23 | func (d Info) String() string { 24 | return fmt.Sprintf("%s %s", d.Id, d.Release) 25 | } 26 | 27 | type distroReleaseInfo interface { 28 | Get() (Info, error) 29 | } 30 | 31 | // lsbReleaseInfo parses /etc/lsb-release to return Distro 32 | // ID and Release Number 33 | type lsbReleaseInfo struct{} 34 | 35 | func (l lsbReleaseInfo) Get() (Info, error) { 36 | b, err := ioutil.ReadFile(lsbReleasePath) 37 | if err != nil && os.IsNotExist(err) { 38 | return Info{}, fmt.Errorf("Could not find distro info at %s", lsbReleasePath) 39 | } 40 | 41 | return l.parse(b) 42 | } 43 | 44 | func (l lsbReleaseInfo) parse(b []byte) (Info, error) { 45 | var d Info 46 | m, err := util.ParseINI(string(b)) 47 | if err != nil { 48 | return d, fmt.Errorf("Error parsing distro info: %v. info=%q", err, b) 49 | } 50 | 51 | fields := []struct { 52 | key string 53 | val *string 54 | }{ 55 | {"DISTRIB_ID", &d.Id}, 56 | {"DISTRIB_RELEASE", &d.Release}, 57 | } 58 | for _, f := range fields { 59 | v, ok := m[f.key] 60 | if !ok { 61 | return d, fmt.Errorf("Key %s not found in LSB info. info=%q", f.key, b) 62 | } 63 | *f.val = v 64 | } 65 | return d, nil 66 | } 67 | 68 | // centosReleaseInfo parses release information of distributions in CentOS family such as 69 | // RHEL and CentOS from /etc/redhat-release and /etc/centos-release. 70 | type centosReleaseInfo struct { 71 | path string 72 | distro string 73 | } 74 | 75 | // parseVersion extracts a version string from given release string or returns empty string 76 | // if it is not found. Version should be in form 'n.n.[n.[n.[...]]]' 77 | func (c centosReleaseInfo) parseVersion(release []byte) string { 78 | r := regexp.MustCompile(`[\d+\.]+[\d+]`) 79 | return string(r.Find([]byte(release))) 80 | } 81 | 82 | func (c centosReleaseInfo) Get() (Info, error) { 83 | b, err := ioutil.ReadFile(c.path) 84 | if err != nil && os.IsNotExist(err) { 85 | return Info{}, fmt.Errorf("Could not find distro info at %s", c.path) 86 | } 87 | 88 | version := c.parseVersion(b) 89 | if version == "" { 90 | return Info{}, fmt.Errorf("cannot extract version from release string: %q", b) 91 | } 92 | return Info{ 93 | Id: c.distro, 94 | Release: version, 95 | }, nil 96 | } 97 | 98 | func GetDistro() (Info, error) { 99 | src, err := releaseInfoSource() 100 | if err != nil { 101 | return Info{}, err 102 | } 103 | return src.Get() 104 | } 105 | 106 | func releaseInfoSource() (distroReleaseInfo, error) { 107 | // LSB 108 | if ok, err := util.PathExists(lsbReleasePath); err != nil { 109 | return nil, err 110 | } else if ok { 111 | return lsbReleaseInfo{}, nil 112 | } 113 | 114 | // RedHat/CentOS. Checking for CentOS first as CentOS contains 115 | // both centos-release and redhat-release and this path-existence 116 | // check makes it look like RHEL. 117 | if ok, err := util.PathExists(centosReleasePath); err != nil { 118 | return nil, err 119 | } else if ok { 120 | return centosReleaseInfo{centosReleasePath, CentosID}, nil 121 | } 122 | if ok, err := util.PathExists(redhatReleasePath); err != nil { 123 | return nil, err 124 | } else if ok { 125 | return centosReleaseInfo{redhatReleasePath, RhelID}, nil 126 | } 127 | 128 | // Unknown 129 | return nil, fmt.Errorf("could not determine distro") 130 | } 131 | -------------------------------------------------------------------------------- /pkg/distro/distro_test.go: -------------------------------------------------------------------------------- 1 | package distro 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func Test_GetDistro(t *testing.T) { 8 | d, err := GetDistro() 9 | if err != nil { 10 | t.Fatalf("failed to get distro: %v", err) 11 | } 12 | if d.Id == "" { 13 | t.Fatal("no distro id") 14 | } 15 | if d.Release == "" { 16 | t.Fatal("no distro release") 17 | } 18 | t.Logf("Distro: %#v", d) 19 | } 20 | 21 | func Test_lsbReleaseInfo_parse(t *testing.T) { 22 | s := []byte(`DISTRIB_ID=Ubuntu 23 | DISTRIB_RELEASE=14.04 24 | DISTRIB_CODENAME=trusty 25 | DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"`) 26 | i := lsbReleaseInfo{} 27 | d, err := i.parse(s) 28 | if err != nil { 29 | t.Fatal(err) 30 | } 31 | if d.Id != "Ubuntu" { 32 | t.Fatalf("wrong disro id: %s", d.Id) 33 | } 34 | if d.Release != "14.04" { 35 | t.Fatalf("wrong disro release: %s", d.Release) 36 | } 37 | } 38 | 39 | func Test_centosReleaseInfo_parseVersion(t *testing.T) { 40 | cases := []struct { 41 | in string 42 | out string 43 | }{ 44 | {"No version at all", ""}, 45 | {"No minor version 3", ""}, 46 | {"Should extract only numeric part 3.2a4.5", "3.2"}, 47 | {"Should extract only first one 3.2 4.5", "3.2"}, 48 | {"CentOS Linux release 7.1.1503 (Core)", "7.1.1503"}, 49 | {"Red Hat Enterprise Linux Server release 7.2 (Maipo)", "7.2"}, 50 | {"Foo 1.22.333.4444.55555", "1.22.333.4444.55555"}, 51 | } 52 | 53 | i := centosReleaseInfo{} 54 | for _, c := range cases { 55 | v := i.parseVersion([]byte(c.in)) 56 | if v != c.out { 57 | t.Fatalf("wrong version. expected=%q got=%q in=%q", c.out, v, c.in) 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /pkg/dockeropts/dockeropts.go: -------------------------------------------------------------------------------- 1 | // Package dockeropts provides various methods to modify docker 2 | // service start arguments (a.k.a DOCKER_OPTS) 3 | package dockeropts 4 | 5 | // Editor describes an implementation that can 6 | // take a init config for docker service and modify the start 7 | // arguments and return the new init config contents. 8 | type Editor interface { 9 | ChangeOpts(contents, args string) (out string, err error) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/dockeropts/systemd.go: -------------------------------------------------------------------------------- 1 | package dockeropts 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "regexp" 7 | ) 8 | 9 | // SystemdUnitEditor modifies the 'ExecStart=' line as 10 | // 'ExecStart=/usr/bin/docker $args'. If ExecStart line does not 11 | // exist, returns error. 12 | type SystemdUnitEditor struct{} 13 | 14 | func (e SystemdUnitEditor) ChangeOpts(contents, args string) (string, error) { 15 | cmd := fmt.Sprintf("ExecStart=/usr/bin/dockerd %s", args) 16 | r := regexp.MustCompile("ExecStart=.*") 17 | 18 | if r.FindString(contents) == "" { 19 | return "", errors.New("systemd unit editor could not find ExecStart") 20 | } 21 | return string(r.ReplaceAllString(contents, cmd)), nil 22 | } 23 | -------------------------------------------------------------------------------- /pkg/dockeropts/systemd_test.go: -------------------------------------------------------------------------------- 1 | package dockeropts 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func Test_SystemdUnitEditor_Bad(t *testing.T) { 8 | _, err := SystemdUnitEditor{}.ChangeOpts("FooBar", "--tlsverify") 9 | if err == nil { 10 | t.Fatal("error expected") 11 | } 12 | } 13 | 14 | func Test_SystemdUnitEditor(t *testing.T) { 15 | in := `[Unit] 16 | Description=Docker Application Container Engine 17 | Documentation=http://docs.docker.com 18 | After=network.target docker.socket 19 | Requires=docker.socket 20 | 21 | [Service] 22 | ExecStart=/usr/bin/dockerd -H=fd:// 23 | MountFlags=slave 24 | LimitNOFILE=1048576 25 | LimitNPROC=1048576 26 | LimitCORE=infinity 27 | 28 | [Install] 29 | WantedBy=multi-user.target 30 | ` 31 | expected := `[Unit] 32 | Description=Docker Application Container Engine 33 | Documentation=http://docs.docker.com 34 | After=network.target docker.socket 35 | Requires=docker.socket 36 | 37 | [Service] 38 | ExecStart=/usr/bin/dockerd --tlsverify 39 | MountFlags=slave 40 | LimitNOFILE=1048576 41 | LimitNPROC=1048576 42 | LimitCORE=infinity 43 | 44 | [Install] 45 | WantedBy=multi-user.target 46 | ` 47 | 48 | out, err := SystemdUnitEditor{}.ChangeOpts(in, "--tlsverify") 49 | if err != nil { 50 | t.Fatal(err) 51 | } 52 | if out != expected { 53 | t.Fatalf("out:%s\nexpected:%s", out, expected) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /pkg/dockeropts/upstart.go: -------------------------------------------------------------------------------- 1 | package dockeropts 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "strings" 7 | ) 8 | 9 | // UpstartCfgEditor finds the line that contains 'DOCKER_OPTS=' and 10 | // replaces with the given args. If not found, appends a new line 11 | // with given configuration. 12 | type UpstartCfgEditor struct{} 13 | 14 | func (e UpstartCfgEditor) ChangeOpts(contents, args string) (string, error) { 15 | var ( 16 | out = []string{} 17 | sc = bufio.NewScanner(strings.NewReader(contents)) 18 | replaced = false 19 | cfg = fmt.Sprintf(`DOCKER_OPTS="%s"`, args) 20 | ) 21 | 22 | for sc.Scan() { 23 | line := sc.Text() 24 | if !replaced && strings.Contains(line, "DOCKER_OPTS=") { 25 | replaced = true 26 | line = cfg 27 | } 28 | out = append(out, line) 29 | } 30 | if err := sc.Err(); err != nil { 31 | return "", err 32 | } 33 | if !replaced { 34 | out = append(out, cfg) 35 | } 36 | // Reconstruct 37 | file := strings.Join(out, "\n") 38 | return file, nil 39 | } 40 | -------------------------------------------------------------------------------- /pkg/dockeropts/upstart_test.go: -------------------------------------------------------------------------------- 1 | package dockeropts 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestUpstartEditor_NoContent(t *testing.T) { 8 | out, err := UpstartCfgEditor{}.ChangeOpts("", "-d --tlsverify") 9 | if err != nil { 10 | t.Fatal(err) 11 | } 12 | expected := `DOCKER_OPTS="-d --tlsverify"` 13 | if out != expected { 14 | t.Fatal("out:%s\nexpected:%s", out, expected) 15 | } 16 | } 17 | 18 | func TestUpstartEditor_UbuntuDefault(t *testing.T) { 19 | in := `# Docker Upstart and SysVinit configuration file 20 | 21 | # Customize location of Docker binary (especially for development testing). 22 | #DOCKER="/usr/local/bin/dockerd" 23 | 24 | # Use DOCKER_OPTS to modify the daemon startup options. 25 | #DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" 26 | 27 | # If you need Docker to use an HTTP proxy, it can also be specified here. 28 | #export http_proxy="http://127.0.0.1:3128/" 29 | 30 | # This is also a handy place to tweak where Docker's temporary files go. 31 | #export TMPDIR="/mnt/bigdrive/docker-tmp"` 32 | 33 | expected := `# Docker Upstart and SysVinit configuration file 34 | 35 | # Customize location of Docker binary (especially for development testing). 36 | #DOCKER="/usr/local/bin/dockerd" 37 | 38 | # Use DOCKER_OPTS to modify the daemon startup options. 39 | DOCKER_OPTS="-d --tlsverify" 40 | 41 | # If you need Docker to use an HTTP proxy, it can also be specified here. 42 | #export http_proxy="http://127.0.0.1:3128/" 43 | 44 | # This is also a handy place to tweak where Docker's temporary files go. 45 | #export TMPDIR="/mnt/bigdrive/docker-tmp"` 46 | 47 | out, err := UpstartCfgEditor{}.ChangeOpts(in, "-d --tlsverify") 48 | if err != nil { 49 | t.Fatal(err) 50 | } 51 | if out != expected { 52 | t.Fatal("out:%s\nexpected:%s", out, expected) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /pkg/driver/centos.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/executil" 5 | ) 6 | 7 | // CentOSDriver is for CentOS-based distros. 8 | type CentOSDriver struct { 9 | systemdBaseDriver 10 | systemdUnitOverwriteDriver 11 | } 12 | 13 | func (c CentOSDriver) InstallDocker(installCmd string) error { 14 | return executil.ExecPipe("/bin/sh", "-c", installCmd) 15 | } 16 | 17 | func (c CentOSDriver) UninstallDocker() error { 18 | return executil.ExecPipe("yum", "-y", "-q", "remove", "docker-engine.x86_64") 19 | } 20 | 21 | func (c CentOSDriver) DockerComposeDir() string { return "/usr/local/bin" } 22 | 23 | func (c CentOSDriver) BaseOpts() []string { 24 | // centos socket activation is removed from get.docker.com installation script 25 | // therefore we don't use -H=fd:// on centos. See more context here: 26 | // - https://github.com/docker/docker/issues/23793 27 | // - https://github.com/docker/docker/pull/24804 28 | return []string{"-H=unix://"} 29 | } 30 | -------------------------------------------------------------------------------- /pkg/driver/coreos.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "log" 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/Azure/azure-docker-extension/pkg/util" 11 | ) 12 | 13 | // CoreOS: distro already comes with docker installed and 14 | // uses systemd as init system. 15 | type CoreOSDriver struct { 16 | systemdBaseDriver 17 | } 18 | 19 | func (c CoreOSDriver) InstallDocker(installCmd string) error { 20 | log.Println("CoreOS: docker already installed, noop") 21 | return nil 22 | } 23 | func (c CoreOSDriver) UninstallDocker() error { 24 | log.Println("CoreOS: docker cannot be uninstalled, noop") 25 | return nil 26 | } 27 | 28 | func (c CoreOSDriver) DockerComposeDir() string { return "/opt/bin" } 29 | 30 | func (c CoreOSDriver) BaseOpts() []string { return []string{} } 31 | 32 | func (c CoreOSDriver) UpdateDockerArgs(args string) (bool, error) { 33 | const dropInDir = "/run/systemd/system/docker.service.d" 34 | const dropInFile = "10-docker-extension.conf" 35 | filePath := filepath.Join(dropInDir, dropInFile) 36 | 37 | config := fmt.Sprintf(`[Service] 38 | Environment="DOCKER_OPTS=%s"`, args) 39 | 40 | // check if config file exists and needs an update 41 | if ok, _ := util.PathExists(filePath); ok { 42 | existing, err := ioutil.ReadFile(filePath) 43 | if err != nil { 44 | return false, fmt.Errorf("error reading %s: %v", filePath, err) 45 | } 46 | 47 | // no need to update config or restart service if goal config is already there 48 | if string(existing) == config { 49 | return false, nil 50 | } 51 | } 52 | 53 | if err := os.MkdirAll(dropInDir, 0755); err != nil { 54 | return false, fmt.Errorf("error creating %s dir: %v", dropInDir, err) 55 | } 56 | err := ioutil.WriteFile(filePath, []byte(config), 0644) 57 | log.Println("Written systemd service drop-in to disk.") 58 | return true, err 59 | } 60 | -------------------------------------------------------------------------------- /pkg/driver/driver.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | 8 | "github.com/Azure/azure-docker-extension/pkg/distro" 9 | ) 10 | 11 | type DistroDriver interface { 12 | InstallDocker(installCmd string) error 13 | DockerComposeDir() string 14 | 15 | BaseOpts() []string 16 | UpdateDockerArgs(args string) (restartNeeded bool, err error) 17 | 18 | RestartDocker() error 19 | StartDocker() error 20 | StopDocker() error 21 | UninstallDocker() error 22 | } 23 | 24 | func GetDriver(d distro.Info) (DistroDriver, error) { 25 | if d.Id == "CoreOS" || d.Id == "\"Container Linux by CoreOS\"" { 26 | return CoreOSDriver{}, nil 27 | } else if d.Id == "Ubuntu" { 28 | parts := strings.Split(d.Release, ".") 29 | if len(parts) == 0 { 30 | return nil, fmt.Errorf("invalid ubuntu version format: %s", d.Release) 31 | } 32 | major, err := strconv.Atoi(parts[0]) 33 | if err != nil { 34 | return nil, fmt.Errorf("can't parse ubuntu version number: %s", parts[0]) 35 | } 36 | 37 | // - <13: not supportted 38 | // - 13.x, 14.x : uses upstart 39 | // - 15.x+: uses systemd 40 | if major < 13 { 41 | return nil, fmt.Errorf("Ubuntu 12 or older not supported. Got: %s", d) 42 | } else if major < 15 { 43 | return UbuntuUpstartDriver{}, nil 44 | } else { 45 | return UbuntuSystemdDriver{}, nil 46 | } 47 | } else if d.Id == distro.RhelID { 48 | return RHELDriver{}, nil 49 | } else if d.Id == distro.CentosID { 50 | return CentOSDriver{}, nil 51 | } 52 | 53 | return nil, fmt.Errorf("Distro not supported: %s", d) 54 | } 55 | -------------------------------------------------------------------------------- /pkg/driver/rhel.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | // RHELDriver is for Red Hat Enterprise Linux. 4 | type RHELDriver struct { 5 | CentOSDriver 6 | } 7 | 8 | // DockerComposeDir for RHEL is different than CentOSDriver as CentOS 9 | // has /usr/local/bin in $PATH and RHEL does not. Therefore using /usr/bin. 10 | func (r RHELDriver) DockerComposeDir() string { return "/usr/bin" } 11 | -------------------------------------------------------------------------------- /pkg/driver/systemd_base.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/dockeropts" 5 | "github.com/Azure/azure-docker-extension/pkg/executil" 6 | ) 7 | 8 | type systemdBaseDriver struct{} 9 | 10 | func (d systemdBaseDriver) RestartDocker() error { 11 | if err := executil.ExecPipe("systemctl", "daemon-reload"); err != nil { 12 | return err 13 | } 14 | return executil.ExecPipe("systemctl", "restart", "docker") 15 | } 16 | 17 | func (d systemdBaseDriver) StartDocker() error { 18 | return executil.ExecPipe("systemctl", "start", "docker") 19 | } 20 | 21 | func (d systemdBaseDriver) StopDocker() error { 22 | return executil.ExecPipe("systemctl", "stop", "docker") 23 | } 24 | 25 | // systemdUnitOverwriteDriver is for distros where we modify docker.service 26 | // file in-place. 27 | type systemdUnitOverwriteDriver struct{} 28 | 29 | func (u systemdUnitOverwriteDriver) UpdateDockerArgs(args string) (bool, error) { 30 | const cfg = "/lib/systemd/system/docker.service" 31 | e := dockeropts.SystemdUnitEditor{} 32 | return rewriteOpts(e, cfg, args) 33 | } 34 | 35 | func (u systemdUnitOverwriteDriver) BaseOpts() []string { 36 | return []string{"-H=fd://"} 37 | } 38 | -------------------------------------------------------------------------------- /pkg/driver/ubuntu_base.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/executil" 5 | ) 6 | 7 | type ubuntuBaseDriver struct{} 8 | 9 | func (u ubuntuBaseDriver) InstallDocker(installCmd string) error { 10 | return executil.ExecPipe("/bin/sh", "-c", installCmd) 11 | } 12 | 13 | func (u ubuntuBaseDriver) UninstallDocker() error { 14 | if err := executil.ExecPipe("apt-get", "-qqy", "purge", "docker-engine"); err != nil { 15 | return err 16 | } 17 | return executil.ExecPipe("apt-get", "-qqy", "autoremove") 18 | } 19 | 20 | func (u ubuntuBaseDriver) DockerComposeDir() string { return "/usr/local/bin" } 21 | -------------------------------------------------------------------------------- /pkg/driver/ubuntu_systemd.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | type UbuntuSystemdDriver struct { 4 | ubuntuBaseDriver 5 | systemdBaseDriver 6 | systemdUnitOverwriteDriver 7 | } 8 | -------------------------------------------------------------------------------- /pkg/driver/ubuntu_upstart.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/dockeropts" 5 | ) 6 | 7 | type UbuntuUpstartDriver struct { 8 | ubuntuBaseDriver 9 | upstartBaseDriver 10 | } 11 | 12 | func (u UbuntuUpstartDriver) BaseOpts() []string { 13 | return []string{"-H=unix://"} 14 | } 15 | 16 | func (u UbuntuUpstartDriver) UpdateDockerArgs(args string) (bool, error) { 17 | const cfgPath = "/etc/default/docker" 18 | e := dockeropts.UpstartCfgEditor{} 19 | return rewriteOpts(e, cfgPath, args) 20 | } 21 | -------------------------------------------------------------------------------- /pkg/driver/upstart_base.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "github.com/Azure/azure-docker-extension/pkg/executil" 5 | ) 6 | 7 | type upstartBaseDriver struct{} 8 | 9 | func (d upstartBaseDriver) RestartDocker() error { 10 | if err := executil.ExecPipe("update-rc.d", "docker", "defaults"); err != nil { 11 | return err 12 | } 13 | return executil.ExecPipe("service", "docker", "restart") 14 | } 15 | 16 | func (d upstartBaseDriver) StartDocker() error { 17 | return executil.ExecPipe("service", "docker", "start") 18 | } 19 | 20 | func (d upstartBaseDriver) StopDocker() error { 21 | return executil.ExecPipe("service", "docker", "stop") 22 | } 23 | -------------------------------------------------------------------------------- /pkg/driver/util.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | 7 | "github.com/Azure/azure-docker-extension/pkg/dockeropts" 8 | "github.com/Azure/azure-docker-extension/pkg/util" 9 | ) 10 | 11 | // rewriteOpts uses the specified dockeropts editor to modify the existing cfgFile 12 | // (if not exists, it creates the cfg file and its directory) with specified args. 13 | // If nothing is changed, this will return false. 14 | func rewriteOpts(e dockeropts.Editor, cfgFile string, args string) (restartNeeded bool, err error) { 15 | in, err := ioutil.ReadFile(cfgFile) 16 | if err != nil { 17 | return false, fmt.Errorf("error reading %s: %v", cfgFile, err) 18 | } 19 | 20 | out, err := e.ChangeOpts(string(in), args) 21 | if err != nil { 22 | return false, fmt.Errorf("error updating settings at %s: %v", cfgFile, err) 23 | } 24 | 25 | // check if existing config file needs an update 26 | if ok, _ := util.PathExists(cfgFile); ok { 27 | existing, err := ioutil.ReadFile(cfgFile) 28 | if err != nil { 29 | return false, fmt.Errorf("error reading %s: %v", cfgFile, err) 30 | } 31 | 32 | // no need to update config or restart service if goal config is already there 33 | if string(existing) == out { 34 | return false, nil 35 | } 36 | } 37 | 38 | if err := ioutil.WriteFile(cfgFile, []byte(out), 0644); err != nil { 39 | return false, fmt.Errorf("error writing to %s: %v", cfgFile, err) 40 | } 41 | return true, nil 42 | } 43 | -------------------------------------------------------------------------------- /pkg/executil/exec.go: -------------------------------------------------------------------------------- 1 | package executil 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | lg "log" 8 | "os" 9 | osexec "os/exec" 10 | ) 11 | 12 | var ( 13 | out io.Writer // default output stream for ExecPipe 14 | log *lg.Logger 15 | ) 16 | 17 | func init() { 18 | SetOut(os.Stderr) 19 | } 20 | 21 | func SetOut(o io.Writer) { 22 | out = o 23 | log = lg.New(out, "[executil] ", lg.LstdFlags) 24 | } 25 | 26 | type Fds struct{ Out, Err io.Writer } 27 | 28 | // ExecPipe is a convenience method to run programs with 29 | // arguments and return their combined stdout/stderr 30 | // output while printing them both to calling process' 31 | // stdout. 32 | func ExecPipe(program string, args ...string) error { 33 | return ExecPipeToFds(Fds{out, out}, program, args...) 34 | } 35 | 36 | // ExecPipeToFds runs the program with specified args and given 37 | // out/err descriptiors. Non-specified (nil) descriptors will be 38 | // replaced with default out stream. 39 | func ExecPipeToFds(fds Fds, program string, args ...string) error { 40 | log.Printf("+++ invoke: %s %v", program, args) 41 | defer log.Printf("--- invoke end") 42 | cmd := osexec.Command(program, args...) 43 | 44 | // replace nil streams with default 45 | if fds.Out == nil { 46 | fds.Out = out 47 | } 48 | if fds.Err == nil { 49 | fds.Err = out 50 | } 51 | 52 | cmd.Stdout, cmd.Stderr = fds.Out, fds.Err 53 | err := cmd.Run() 54 | if err != nil { 55 | err = fmt.Errorf("executing %s %v failed: %v", program, args, err) 56 | } 57 | return err 58 | } 59 | 60 | // Exec is a convenience method to run programs with 61 | // arguments and return their combined stdout/stderr 62 | // output as bytes. 63 | func Exec(program string, args ...string) ([]byte, error) { 64 | var b bytes.Buffer 65 | cmd := osexec.Command(program, args...) 66 | cmd.Stdout = &b 67 | cmd.Stderr = &b 68 | err := cmd.Run() 69 | if err != nil { 70 | err = fmt.Errorf("executing %s failed: %v", program, err) 71 | } 72 | return b.Bytes(), err 73 | } 74 | 75 | // ExecWithStdin pipes given ReadCloser's contents to the stdin of executed 76 | // command and returns stdout as bytes and redirects stderr of executed command 77 | // stderr of executing process. 78 | func ExecWithStdin(in io.ReadCloser, program string, args ...string) ([]byte, error) { 79 | var b bytes.Buffer 80 | cmd := osexec.Command(program, args...) 81 | cmd.Stdin = in 82 | cmd.Stdout = &b 83 | cmd.Stderr = os.Stderr 84 | err := cmd.Run() 85 | if err != nil { 86 | err = fmt.Errorf("executing %s failed: %v", program, err) 87 | } 88 | return b.Bytes(), err 89 | } 90 | -------------------------------------------------------------------------------- /pkg/executil/exec_test.go: -------------------------------------------------------------------------------- 1 | package executil 2 | 3 | import ( 4 | "io/ioutil" 5 | "strings" 6 | "testing" 7 | ) 8 | 9 | func Test_ExecOkProcess(t *testing.T) { 10 | out, err := Exec("date", `+%s`) 11 | if err != nil { 12 | t.Fatal(err) 13 | } 14 | if len(out) == 0 { 15 | t.Fatal("empty output") 16 | } 17 | } 18 | 19 | func Test_ExecBadProcess(t *testing.T) { 20 | _, err := Exec("false") 21 | if err == nil { 22 | t.Fatal("expected error") 23 | } 24 | t.Logf("%v", err) 25 | } 26 | 27 | func Test_ExecWithStdin(t *testing.T) { 28 | s := "1\n2\n3" 29 | in := ioutil.NopCloser(strings.NewReader(s)) 30 | b, err := ExecWithStdin(in, "cat") 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | out := string(b) 35 | if out != s { 36 | t.Fatalf("got wrong string: %s, expected: %s", out, s) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /pkg/seqnumfile/seqnumfile.go: -------------------------------------------------------------------------------- 1 | // Package seqnumfile contains helper methods that allow saving 2 | // and retrieving extension handler seqNum from a hard-coded file 3 | // path. Atomicity requirements are relaxed, therefore files are used. 4 | package seqnumfile 5 | 6 | import ( 7 | "fmt" 8 | "io/ioutil" 9 | "os" 10 | "path/filepath" 11 | "strconv" 12 | ) 13 | 14 | const ( 15 | filename = "docker-extension.seqnum" 16 | ) 17 | 18 | func Get() (exists bool, seqnum int, err error) { 19 | b, err := ioutil.ReadFile(filePath()) 20 | if err != nil { 21 | if os.IsNotExist(err) { 22 | return false, 0, nil 23 | } 24 | return false, 0, err 25 | } 26 | n, err := strconv.Atoi(string(b)) 27 | if err != nil { 28 | return true, 0, fmt.Errorf("seqnumfile: cannot atoi %q: %v", b, err) 29 | } 30 | return true, n, nil 31 | } 32 | 33 | func Set(seqnum int) error { 34 | return ioutil.WriteFile(filePath(), []byte(fmt.Sprintf("%d", seqnum)), 0644) 35 | } 36 | 37 | func Delete() error { 38 | return os.RemoveAll(filePath()) 39 | } 40 | 41 | func filePath() string { 42 | return filepath.Join(os.TempDir(), filename) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "bufio" 5 | "encoding/xml" 6 | "fmt" 7 | "io/ioutil" 8 | "os" 9 | "strings" 10 | ) 11 | 12 | const ( 13 | OvfEnvPath = "/var/lib/waagent/ovf-env.xml" 14 | ) 15 | 16 | // ParseINI basic INI config file format into a map. 17 | // Example expected format: 18 | // KEY=VAL 19 | // KEY2=VAL2 20 | func ParseINI(s string) (map[string]string, error) { 21 | m := make(map[string]string) 22 | sc := bufio.NewScanner(strings.NewReader(s)) 23 | 24 | for sc.Scan() { 25 | l := sc.Text() // format: K=V 26 | p := strings.Split(l, "=") 27 | if len(p) != 2 { 28 | return nil, fmt.Errorf("Unexpected config line: %q", l) 29 | } 30 | m[p[0]] = p[1] 31 | } 32 | if err := sc.Err(); err != nil { 33 | return nil, fmt.Errorf("Could not scan config file: %v", err) 34 | } 35 | return m, nil 36 | } 37 | 38 | // GetAzureUser returns the username provided at VM provisioning time to Azure. 39 | func GetAzureUser() (string, error) { 40 | b, err := ioutil.ReadFile(OvfEnvPath) 41 | if err != nil { 42 | return "", err 43 | } 44 | 45 | var v struct { 46 | XMLName xml.Name `xml:"Environment"` 47 | UserName string `xml:"ProvisioningSection>LinuxProvisioningConfigurationSet>UserName"` 48 | } 49 | if err := xml.Unmarshal(b, &v); err != nil { 50 | return "", err 51 | } 52 | return v.UserName, nil 53 | } 54 | 55 | // PathExists checks if a path is a directory or file on the 56 | // filesystem. 57 | func PathExists(path string) (bool, error) { 58 | _, err := os.Stat(path) 59 | if err == nil { 60 | return true, nil 61 | } 62 | if os.IsNotExist(err) { 63 | return false, nil 64 | } 65 | return false, fmt.Errorf("util: error checking path %s: %v", path, err) 66 | } 67 | -------------------------------------------------------------------------------- /pkg/util/util_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "os" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func Test_ParseINI(t *testing.T) { 10 | cases := []struct { 11 | in string 12 | out map[string]string 13 | }{ 14 | {"", map[string]string{}}, 15 | {"K=V\nFOO=BAR", map[string]string{"K": "V", "FOO": "BAR"}}, 16 | } 17 | 18 | for _, c := range cases { 19 | m, err := ParseINI(c.in) 20 | if err != nil { 21 | t.Fatalf("config parsing failed for input: %q, err: %v", c.in, err) 22 | } 23 | if !reflect.DeepEqual(m, c.out) { 24 | t.Fatalf("got wrong output. expected: %v, got: %v", c.out, m) 25 | } 26 | } 27 | } 28 | 29 | func Test_ScriptDir(t *testing.T) { 30 | s, err := ScriptDir() 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | if s == "" { 35 | t.Fatal("returned script dir is empty") 36 | } 37 | st, err := os.Stat(s) 38 | if err != nil { 39 | t.Fatal(err) 40 | } 41 | if !st.Mode().IsDir() { 42 | t.Fatalf("%s is not dir") 43 | } 44 | t.Logf("Script dir: %s", s) 45 | } 46 | 47 | func Test_GetAzureUser(t *testing.T) { 48 | u, err := GetAzureUser() 49 | if err != nil { 50 | if os.IsNotExist(err) { 51 | t.Skipf("File not found, maybe not running on Azure? %s", OvfEnvPath) 52 | } 53 | t.Fatal(err) 54 | } 55 | t.Log(u) 56 | } 57 | 58 | func Test_PathExists(t *testing.T) { 59 | for _, v := range []struct { 60 | path string 61 | exists bool 62 | }{ 63 | {".", true}, 64 | {"/tmp", true}, 65 | {"/tmp/foobar", false}, 66 | } { 67 | ok, err := PathExists(v.path) 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | if ok != v.exists { 72 | t.Fatal("got %v for %s, expected: %v", ok, v.path, v.exists) 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /pkg/vmextension/handlerenv.go: -------------------------------------------------------------------------------- 1 | package vmextension 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | ) 11 | 12 | // HandlerEnvFileName is the file name of the Handler Environment as placed by the 13 | // Azure Linux Guest Agent. 14 | const HandlerEnvFileName = "HandlerEnvironment.json" 15 | 16 | // HandlerEnvironment describes the handler environment configuration presented 17 | // to the extension handler by the Azure Linux Guest Agent. 18 | type HandlerEnvironment struct { 19 | Version float64 `json:"version"` 20 | Name string `json:"name"` 21 | HandlerEnvironment struct { 22 | HeartbeatFile string `json:"heartbeatFile"` 23 | StatusFolder string `json:"statusFolder"` 24 | ConfigFolder string `json:"configFolder"` 25 | LogFolder string `json:"logFolder"` 26 | } 27 | } 28 | 29 | // GetHandlerEnv locates the HandlerEnvironment.json file by assuming it lives 30 | // next to or one level above the extension handler (read: this) executable, 31 | // reads, parses and returns it. 32 | func GetHandlerEnv() (he HandlerEnvironment, _ error) { 33 | dir, err := scriptDir() 34 | if err != nil { 35 | return he, fmt.Errorf("vmextension: cannot find base directory of the running process: %v", err) 36 | } 37 | paths := []string{ 38 | filepath.Join(dir, HandlerEnvFileName), // this level (i.e. executable is in [EXT_NAME]/.) 39 | filepath.Join(dir, "..", HandlerEnvFileName), // one up (i.e. executable is in [EXT_NAME]/bin/.) 40 | } 41 | var b []byte 42 | for _, p := range paths { 43 | o, err := ioutil.ReadFile(p) 44 | if err != nil && !os.IsNotExist(err) { 45 | return he, fmt.Errorf("vmextension: error examining HandlerEnvironment at '%s': %v", p, err) 46 | } else if err == nil { 47 | b = o 48 | break 49 | } 50 | } 51 | if b == nil { 52 | return he, fmt.Errorf("vmextension: Cannot find HandlerEnvironment at paths: %s", strings.Join(paths, ", ")) 53 | } 54 | return ParseHandlerEnv(b) 55 | } 56 | 57 | // scriptDir returns the absolute path of the running process. 58 | func scriptDir() (string, error) { 59 | p, err := filepath.Abs(os.Args[0]) 60 | if err != nil { 61 | return "", err 62 | } 63 | return filepath.Dir(p), nil 64 | } 65 | 66 | // ParseHandlerEnv parses the 67 | // /var/lib/waagent/[extension]/HandlerEnvironment.json format. 68 | func ParseHandlerEnv(b []byte) (he HandlerEnvironment, _ error) { 69 | var hf []HandlerEnvironment 70 | 71 | if err := json.Unmarshal(b, &hf); err != nil { 72 | return he, fmt.Errorf("vmextension: failed to parse handler env: %v", err) 73 | } 74 | if len(hf) != 1 { 75 | return he, fmt.Errorf("vmextension: expected 1 config in parsed HandlerEnvironment, found: %v", len(hf)) 76 | } 77 | return hf[0], nil 78 | } 79 | -------------------------------------------------------------------------------- /pkg/vmextension/handlerenv_test.go: -------------------------------------------------------------------------------- 1 | package vmextension 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func Test_ParseHandlerEnv(t *testing.T) { 8 | json := `[{ "name": "MSOpenTech.Extensions.DockerExtension", "version": 1.0, "handlerEnvironment": { "logFolder": "/var/log/azure/MSOpenTech.Extensions.DockerExtension/0.6.0.0", "configFolder": "/var/lib/waagent/MSOpenTech.Extensions.DockerExtension-0.6.0.0/config", "statusFolder": "/var/lib/waagent/MSOpenTech.Extensions.DockerExtension-0.6.0.0/status", "heartbeatFile": "/var/lib/waagent/MSOpenTech.Extensions.DockerExtension-0.6.0.0/heartbeat.log"}}]` 9 | c, err := ParseHandlerEnv([]byte(json)) 10 | if err != nil { 11 | t.Fatal(err) 12 | } 13 | t.Logf("Parsed: %#v", c) 14 | } 15 | -------------------------------------------------------------------------------- /pkg/vmextension/handlersettings.go: -------------------------------------------------------------------------------- 1 | package vmextension 2 | 3 | import ( 4 | "bytes" 5 | "encoding/base64" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io/ioutil" 10 | "path/filepath" 11 | 12 | "os/exec" 13 | ) 14 | 15 | const ( 16 | settingsFileSuffix = ".settings" 17 | ) 18 | 19 | type handlerSettingsFile struct { 20 | RuntimeSettings []struct { 21 | HandlerSettings handlerSettings `json:"handlerSettings"` 22 | } `json:"runtimeSettings"` 23 | } 24 | 25 | type handlerSettings struct { 26 | PublicSettings map[string]interface{} `json:"publicSettings"` 27 | ProtectedSettingsBase64 string `json:"protectedSettings"` 28 | SettingsCertThumbprint string `json:"protectedSettingsCertThumbprint"` 29 | } 30 | 31 | // settingsPath returns the full path to the .settings file with the 32 | // highest sequence number found in configFolder. 33 | func settingsPath(configFolder string) (string, error) { 34 | seq, err := FindSeqNum(configFolder) 35 | if err != nil { 36 | return "", fmt.Errorf("Cannot find seqnum: %v", err) 37 | } 38 | return filepath.Join(configFolder, fmt.Sprintf("%d%s", seq, settingsFileSuffix)), nil 39 | } 40 | 41 | // ReadSettings locates the .settings file and returns public settings 42 | // JSON, and protected settings JSON (by decrypting it with the keys in 43 | // configFolder). 44 | func ReadSettings(configFolder string) (public, protected map[string]interface{}, _ error) { 45 | cf, err := settingsPath(configFolder) 46 | if err != nil { 47 | return nil, nil, fmt.Errorf("canot locate settings file: %v", err) 48 | } 49 | hs, err := parseHandlerSettingsFile(cf) 50 | if err != nil { 51 | return nil, nil, fmt.Errorf("error parsing settings file: %v", err) 52 | } 53 | 54 | public = hs.PublicSettings 55 | if err := unmarshalProtectedSettings(configFolder, hs, &protected); err != nil { 56 | return nil, nil, fmt.Errorf("failed to parse protected settings: %v", err) 57 | } 58 | return public, protected, nil 59 | } 60 | 61 | // UnmarshalHandlerSettings unmarshals given publicSettings/protectedSettings types 62 | // assumed underlying values are JSON into references publicV/protectedV respectively 63 | // (of struct types that contain structured fields for settings). 64 | func UnmarshalHandlerSettings(publicSettings, protectedSettings map[string]interface{}, publicV, protectedV interface{}) error { 65 | if err := unmarshalSettings(publicSettings, &publicV); err != nil { 66 | return fmt.Errorf("failed to unmarshal public settings: %v", err) 67 | } 68 | if err := unmarshalSettings(protectedSettings, &protectedV); err != nil { 69 | return fmt.Errorf("failed to unmarshal protected settings: %v", err) 70 | } 71 | return nil 72 | } 73 | 74 | // unmarshalSettings makes a round-trip JSON marshaling and unmarshaling 75 | // from in (assumed map[interface]{}) to v (actual settings type). 76 | func unmarshalSettings(in interface{}, v interface{}) error { 77 | s, err := json.Marshal(in) 78 | if err != nil { 79 | return fmt.Errorf("failed to marshal into json: %v", err) 80 | } 81 | if err := json.Unmarshal(s, &v); err != nil { 82 | return fmt.Errorf("failed to unmarshal json: %v", err) 83 | } 84 | return nil 85 | } 86 | 87 | // parseHandlerSettings parses a handler settings file (e.g. 0.settings) and 88 | // returns it as a structured object. 89 | func parseHandlerSettingsFile(path string) (h handlerSettings, _ error) { 90 | b, err := ioutil.ReadFile(path) 91 | if err != nil { 92 | return h, fmt.Errorf("Error reading %s: %v", path, err) 93 | } 94 | if len(b) == 0 { // if no config is specified, we get an empty file 95 | return h, nil 96 | } 97 | 98 | var f handlerSettingsFile 99 | if err := json.Unmarshal(b, &f); err != nil { 100 | return h, fmt.Errorf("error parsing json: %v", err) 101 | } 102 | if len(f.RuntimeSettings) != 1 { 103 | return h, fmt.Errorf("wrong runtimeSettings count. expected:1, got:%d", len(f.RuntimeSettings)) 104 | } 105 | return f.RuntimeSettings[0].HandlerSettings, nil 106 | } 107 | 108 | // unmarshalProtectedSettings decodes the protected settings from handler 109 | // runtime settings JSON file, decrypts it using the certificates and unmarshals 110 | // into the given struct v. 111 | func unmarshalProtectedSettings(configFolder string, hs handlerSettings, v interface{}) error { 112 | if hs.ProtectedSettingsBase64 == "" { 113 | return nil 114 | } 115 | if hs.SettingsCertThumbprint == "" { 116 | return errors.New("HandlerSettings has protected settings but no cert thumbprint") 117 | } 118 | 119 | decoded, err := base64.StdEncoding.DecodeString(hs.ProtectedSettingsBase64) 120 | if err != nil { 121 | return fmt.Errorf("failed to decode base64: %v", err) 122 | } 123 | 124 | // go two levels up where certs are placed (/var/lib/waagent) 125 | crt := filepath.Join(configFolder, "..", "..", fmt.Sprintf("%s.crt", hs.SettingsCertThumbprint)) 126 | prv := filepath.Join(configFolder, "..", "..", fmt.Sprintf("%s.prv", hs.SettingsCertThumbprint)) 127 | 128 | // we use os/exec instead of azure-docker-extension/pkg/executil here as 129 | // other extension handlers depend on this package for parsing handler 130 | // settings. 131 | cmd := exec.Command("openssl", "smime", "-inform", "DER", "-decrypt", "-recip", crt, "-inkey", prv) 132 | var bOut, bErr bytes.Buffer 133 | cmd.Stdin = bytes.NewReader(decoded) 134 | cmd.Stdout = &bOut 135 | cmd.Stderr = &bErr 136 | 137 | if err := cmd.Run(); err != nil { 138 | return fmt.Errorf("decrypting protected settings failed: error=%v stderr=%s", err, string(bErr.Bytes())) 139 | } 140 | 141 | // decrypted: json object for protected settings 142 | if err := json.Unmarshal(bOut.Bytes(), &v); err != nil { 143 | return fmt.Errorf("failed to unmarshal decrypted settings json: %v", err) 144 | } 145 | return nil 146 | } 147 | -------------------------------------------------------------------------------- /pkg/vmextension/handlersettings_test.go: -------------------------------------------------------------------------------- 1 | package vmextension 2 | 3 | import ( 4 | "io/ioutil" 5 | "testing" 6 | ) 7 | 8 | func Test_parseHandlerSettingsFile_Bad(t *testing.T) { 9 | json := `{"runtimeSettings": [ ]}` 10 | if _, err := parseHandlerSettingsFile([]byte(json)); err == nil { 11 | t.Fatal("did not fail") 12 | } 13 | } 14 | 15 | func Test_parseHandlerSettingsFile_Good(t *testing.T) { 16 | json, err := ioutil.ReadFile("../../testdata/Extension/config/2.settings") 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | if _, err := parseHandlerSettingsFile(json); err != nil { 21 | t.Fatal(err) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /pkg/vmextension/seqnum.go: -------------------------------------------------------------------------------- 1 | package vmextension 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "sort" 7 | "strconv" 8 | "strings" 9 | ) 10 | 11 | // FindSeqnum finds the file with the highest number under configFolder 12 | // named like 0.settings, 1.settings so on. 13 | func FindSeqNum(configFolder string) (int, error) { 14 | g, err := filepath.Glob(configFolder + "/*.settings") 15 | if err != nil { 16 | return 0, err 17 | } 18 | seqs := make([]int, len(g)) 19 | for _, v := range g { 20 | f := filepath.Base(v) 21 | i, err := strconv.Atoi(strings.Replace(f, ".settings", "", 1)) 22 | if err != nil { 23 | return 0, fmt.Errorf("Can't parse int from filename: %s", f) 24 | } 25 | seqs = append(seqs, i) 26 | } 27 | if len(seqs) == 0 { 28 | return 0, fmt.Errorf("Can't find out seqnum from %s, not enough files.", configFolder) 29 | } 30 | sort.Sort(sort.Reverse(sort.IntSlice(seqs))) 31 | return seqs[0], nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/vmextension/seqnum_test.go: -------------------------------------------------------------------------------- 1 | package vmextension 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | "testing" 9 | ) 10 | 11 | func Test_FindSeqNum(t *testing.T) { 12 | cases := []struct { 13 | files []string 14 | out int 15 | fails bool 16 | }{ 17 | {[]string{}, 0, true}, 18 | {[]string{"HandlerState", "0.settings"}, 0, false}, 19 | {[]string{"HandlerState", "4.settings", "0.settings"}, 4, false}, 20 | {[]string{"HandlerState", "0.settings", "1.settings", "12.settings", "2.settings"}, 12, false}, 21 | } 22 | 23 | for i, c := range cases { 24 | td, err := ioutil.TempDir(os.TempDir(), fmt.Sprintf("test%d", i)) 25 | if err != nil { 26 | t.Fatal(err) 27 | } 28 | defer os.RemoveAll(td) 29 | 30 | for _, f := range c.files { 31 | if _, err := os.Create(filepath.Join(td, f)); err != nil { 32 | t.Fatal(err) 33 | } 34 | } 35 | 36 | seq, err := FindSeqNum(td) 37 | if c.fails { 38 | if err == nil { 39 | t.Fatalf("expected to fail, didn't. case: %v", c.files) 40 | } 41 | } else { 42 | if err != nil { 43 | t.Fatalf("failed at case %v: %v", c, err) 44 | } 45 | if seq != c.out { 46 | t.Fatalf("wrong seqnum. expected:%d, got:%d. case: %v", c.out, seq, c.files) 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /pkg/vmextension/status/status.go: -------------------------------------------------------------------------------- 1 | package status 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "path/filepath" 9 | "time" 10 | ) 11 | 12 | type StatusReport []StatusItem 13 | 14 | type StatusItem struct { 15 | Version float64 `json:"version"` 16 | TimestampUTC string `json:"timestampUTC"` 17 | Status Status `json:"status"` 18 | } 19 | 20 | type Type string 21 | 22 | const ( 23 | StatusTransitioning Type = "transitioning" 24 | StatusError Type = "error" 25 | StatusSuccess Type = "success" 26 | ) 27 | 28 | type Status struct { 29 | Operation string `json:"operation"` 30 | Status Type `json:"status"` 31 | FormattedMessage FormattedMessage `json:"formattedMessage"` 32 | } 33 | type FormattedMessage struct { 34 | Lang string `json:"lang"` 35 | Message string `json:"message"` 36 | } 37 | 38 | func NewStatus(t Type, operation, message string) StatusReport { 39 | return []StatusItem{ 40 | { 41 | Version: 1.0, 42 | TimestampUTC: time.Now().UTC().Format(time.RFC3339), 43 | Status: Status{ 44 | Operation: operation, 45 | Status: t, 46 | FormattedMessage: FormattedMessage{ 47 | Lang: "en", 48 | Message: message}, 49 | }, 50 | }, 51 | } 52 | } 53 | 54 | func (r StatusReport) marshal() ([]byte, error) { 55 | return json.MarshalIndent(r, "", "\t") 56 | } 57 | 58 | // Save persists the status message to the specified status folder using the 59 | // sequence number. The operation consists of writing to a temporary file in the 60 | // same folder and moving it to the final destination for atomicity. 61 | func (r StatusReport) Save(statusFolder string, seqNum int) error { 62 | fn := fmt.Sprintf("%d.status", seqNum) 63 | path := filepath.Join(statusFolder, fn) 64 | tmpFile, err := ioutil.TempFile(statusFolder, fn) 65 | if err != nil { 66 | return fmt.Errorf("status: failed to create temporary file: %v", err) 67 | } 68 | tmpFile.Close() 69 | 70 | b, err := r.marshal() 71 | if err != nil { 72 | return fmt.Errorf("status: failed to marshal into json: %v", err) 73 | } 74 | if err := ioutil.WriteFile(tmpFile.Name(), b, 0644); err != nil { 75 | return fmt.Errorf("status: failed to path=%s error=%v", tmpFile.Name(), err) 76 | } 77 | 78 | if err := os.Rename(tmpFile.Name(), path); err != nil { 79 | return fmt.Errorf("status: failed to move to path=%s error=%v", path, err) 80 | } 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /pkg/vmextension/status/status_test.go: -------------------------------------------------------------------------------- 1 | package status 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path/filepath" 7 | "testing" 8 | ) 9 | 10 | func Test_NewStatus(t *testing.T) { 11 | dir, err := ioutil.TempDir("", "") 12 | defer os.RemoveAll(dir) 13 | if err != nil { 14 | t.Fatal(err) 15 | } 16 | 17 | s := NewStatus(StatusSuccess, "op", "msg") 18 | if err := s.Save(dir, 2); err != nil { 19 | t.Fatal(err) 20 | } 21 | 22 | out, err := ioutil.ReadFile(filepath.Join(dir, "2.status")) 23 | if err != nil { 24 | t.Fatal(err) 25 | } 26 | 27 | if len(out) == 0 { 28 | t.Fatal("file empty") 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /scripts/run-in-background.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | readonly SCRIPT_DIR=$(dirname $0) 4 | 5 | # Script logs its output (stdout/stderr) simultaneously to a file 6 | # as waagent does not capture output of processes it starts. 7 | exec > >(tee -ia /var/log/azure-docker-extension-enable.log) 8 | exec 2>&1 9 | 10 | # This script kicks off the ./bin/docker-extension in the 11 | # background and disowns it with nohup. This is a workaround 12 | # for the 5-minute time limit for 'enable' step and 15-minute 13 | # time limit for 'install' step of the Windows Azure VM Extension 14 | # model which exists by design. By forking and running in the 15 | # background, the process does not get killed after the timeout 16 | # and yet still reports its progress through '.status' files to 17 | # the extension system. 18 | 19 | # First, report "transitioning" status through .status file before 20 | # returning from this script so that agent can see the file before 21 | # the main extension executable starts. Another workaround really. 22 | 23 | # status_file returns the .status file path we are supposed to write 24 | # by determining the highest sequence number from .settings files. 25 | status_file_path() { 26 | # normally we'd need to find this config_dir by parsing the 27 | # HandlerEnvironment.json, but hey we're in a bash script here, 28 | # so assume it's at ../config/. 29 | local config_dir=$(readlink -f "${SCRIPT_DIR}/../config") 30 | local status_dir=$(readlink -f "${SCRIPT_DIR}/../status") 31 | config_file=$(ls $config_dir | grep -E ^[0-9]+.settings$ | sort -n | tail -n 1) 32 | status_file=$(echo $config_file | sed s/settings/status/) 33 | readlink -f "$status_dir/$status_file" 34 | } 35 | 36 | write_status() { 37 | local timestamp="$(date --utc --iso-8601=seconds)" 38 | local status_file=$(status_file_path) 39 | echo "Writing status to $status_file." 40 | cat > "$status_file" <<- EOF 41 | [ 42 | { 43 | "version": 1, 44 | "timestampUTC": "$timestamp", 45 | "status": { 46 | "operation": "Enable Docker", 47 | "status": "transitioning", 48 | "formattedMessage": { 49 | "lang": "en", 50 | "message": "Enabling Docker" 51 | } 52 | } 53 | } 54 | ] 55 | EOF 56 | } 57 | 58 | 59 | write_status 60 | set -x 61 | nohup $(readlink -f "$SCRIPT_DIR/../bin/docker-extension") $@ & 62 | -------------------------------------------------------------------------------- /testdata/B3364F39E3086E9AD0C67767348D7392D35BC176.crt: -------------------------------------------------------------------------------- 1 | Bag Attributes 2 | localKeyID: 01 00 00 00 3 | friendlyName: dockerextensioncoreos 4 | subject=/DC=Windows Azure Service Management for Extensions 5 | issuer=/DC=Windows Azure Service Management for Extensions 6 | -----BEGIN CERTIFICATE----- 7 | MIIDCjCCAfKgAwIBAgIQJAjSoLTBHbNMzpc1G+xmhDANBgkqhkiG9w0BAQUFADBB 8 | MT8wPQYKCZImiZPyLGQBGRYvV2luZG93cyBBenVyZSBTZXJ2aWNlIE1hbmFnZW1l 9 | bnQgZm9yIEV4dGVuc2lvbnMwHhcNMTUwNDIzMTc0MjUxWhcNMjAwNDIzMTc0MjUx 10 | WjBBMT8wPQYKCZImiZPyLGQBGRYvV2luZG93cyBBenVyZSBTZXJ2aWNlIE1hbmFn 11 | ZW1lbnQgZm9yIEV4dGVuc2lvbnMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK 12 | AoIBAQDIvy9+qVVLGHjWGi/UQQI9U716H8zjg8e0k2cvZEzqKqjLBTA213atcUlj 13 | 01ibP5T8kehX8yW9lj54Yb4u1sVOn5lG58QPH9/+NmlFkaRJuVgzOwOdi7hfw7y7 14 | WFQ2ntZ5l2Ysx9fvB0tVp/JLcC1/N25KS/qQ5hvK6DnS/2pLDM8s+klblbew18D7 15 | jx5nb4yvwVo+A98Yl0QToTyrf3/NxWhs3rl77RTPPxmUN8CXt0owq+AvbzHXRt+O 16 | w/3/l40nd4SgvV6xdL4xIoQQ3wUvilqsm0WxZmPvACfjTkaMG7BjmzlzY5W/OTx6 17 | UKF0aE9zHabjyvdYH3xWpIkYMCV7AgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAKYD 18 | /Cfc4ZI4HKOh7CFX3h27fcoIAXU0D5u7aXHHL/FuosIEdxwbNMhlulYyMKuOgele 19 | rqkW1hQPkF+CSzVM2DDSAJOdMIGeFT5xBF05T+t+HDSTQnDGlu9Uo6stp8d+PQCd 20 | oizjxWaQGPOf+ptDAt18nxbthakOgQdYs6Ey4dfd/JIuy5nVIpjXzNuGkU6PUmC1 21 | td3dRNBit/aes6FEoW+fQKWLWgXv/b4ggloEuxLnmihq6zhG5z4aPCe6Y5Qxbwkj 22 | hpUN7MKIErYnt4xe9Mf/iT84dKq9+xriQzuCdiIhPjdlUrZ46X2GDfCQXZaMcHv7 23 | Qi3JgUhZ6TruxJCDV1A= 24 | -----END CERTIFICATE----- 25 | -------------------------------------------------------------------------------- /testdata/B3364F39E3086E9AD0C67767348D7392D35BC176.prv: -------------------------------------------------------------------------------- 1 | Bag Attributes 2 | localKeyID: 01 00 00 00 3 | Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 4 | Key Attributes 5 | X509v3 Key Usage: 10 6 | -----BEGIN PRIVATE KEY----- 7 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDIvy9+qVVLGHjW 8 | Gi/UQQI9U716H8zjg8e0k2cvZEzqKqjLBTA213atcUlj01ibP5T8kehX8yW9lj54 9 | Yb4u1sVOn5lG58QPH9/+NmlFkaRJuVgzOwOdi7hfw7y7WFQ2ntZ5l2Ysx9fvB0tV 10 | p/JLcC1/N25KS/qQ5hvK6DnS/2pLDM8s+klblbew18D7jx5nb4yvwVo+A98Yl0QT 11 | oTyrf3/NxWhs3rl77RTPPxmUN8CXt0owq+AvbzHXRt+Ow/3/l40nd4SgvV6xdL4x 12 | IoQQ3wUvilqsm0WxZmPvACfjTkaMG7BjmzlzY5W/OTx6UKF0aE9zHabjyvdYH3xW 13 | pIkYMCV7AgMBAAECggEAMn07Ucz/AcMm6Rpu+yBaktgT6LpEytgjCzyjkBzW53JR 14 | P7aPWMu6MocyoSPPvkQwkZGU8UHbNSKrlZVnwtoLh+nGIDo8al1m9cXzlia6Pjya 15 | fVATabVxasyNFEVz8MTkRnKYpRyiJ6EbBnEWFHkjSELFv3P6kiT9ynu7x2Cr+DHP 16 | gfrDbWoci7eOehnOrZRMgJhLYgHwKSUXn/aiZ2TBfAGxESnY2mP0p+SOuOvtT0KC 17 | ZSkDZS9A7hKOmhCYZuuIqllSmQoSOJ4Ot7S27RXkJrNcfVVl+c/IYWy6qvOKH0qb 18 | ani+5LnoR28gl1IKFMk7qFxBYb7fQSYGQrzak1gg4QKBgQDmCMh2QYAo/dIc3jrT 19 | k63kebHPmwkrANqJ5m6rk4Ny5qDMW0ilDuyTE6Zdbz9TXytj1SxjiFwoMK2HNiFy 20 | AWcQ9/9OQWwOhyRqORp0VtFhOVj27bSKKTpp9iCjCWtPv+yXgknoQeZiznvcJeui 21 | R4cjCbMq3CrbsWsznSHEDSO/SwKBgQDfaBe13pdt9lkWQiI5KYkPbtCQJT/oXoox 22 | RMEXKH3gkK2nyAwtrP5I+/xHsy3ZqBAfNOGWrgWx1SNoFc0kPKApGHfXKSZtdj3D 23 | WLDQKS54O3gtfA6Up3iGyjdAxHs+8ULTiEZom2Iwstd/0eRYk0mBP88xR0CrSHD7 24 | MmNUbHbkkQKBgQC68vxoD0enCNGQIJPSi7zRaI232ZoT1vfFBv9+db5ozUJ1kuJx 25 | MrXVuiyE1vqyOTA8TG1s3s924kjFEJsR2k33HnXhFY5KKE7R/WKglkZIK6E/3WPS 26 | vTr6DsvrdKZABAy1/W8vUQ950bIb8r2T/2m2chWoiZJTRdeH9PYSLVOWDQKBgQCg 27 | 6XTKhj1D8sBQC+piicwy1XpbmLjjQdT/j6oUaBHigdLSmoBLFeNB5l3btu87apXS 28 | JGCoC0gGwqHyAmHzy8cAUqNe0kDfbt8lqOgRxte1uf/how1NfSKO02/gsqSivVuI 29 | yQsNpX2f3fDiG+gA9HVpJvCxacTmmhLmm3B4CnfIQQKBgCfC+Po3M5uMi7zRiDrn 30 | 2cwSJiL6EdkK6fXnDlXGlI6bvVOcb79Vqkg6EC4/o0DG5iLXsTTw7m7sKY98+iJf 31 | GWjDPEjvvqFyScMBmbmNArGGptF3RAJAmwIpGjtcODHCMlC2y95jU4UoLetGR1F3 32 | +jFwJW9t131PgltaIPsHBMAv 33 | -----END PRIVATE KEY----- 34 | -------------------------------------------------------------------------------- /testdata/Extension/HandlerEnvironment.json: -------------------------------------------------------------------------------- 1 | [{ "name": "MSOpenTech.Extensions.DockerExtension", "seqNo": "0", "version": 1.0, "handlerEnvironment": { "logFolder": "/var/log/azure/MSOpenTech.Extensions.DockerExtension/0.6.0.0", "configFolder": "/var/lib/waagent/MSOpenTech.Extensions.DockerExtension-0.6.0.0/config", "statusFolder": "/var/lib/waagent/MSOpenTech.Extensions.DockerExtension-0.6.0.0/status", "heartbeatFile": "/var/lib/waagent/MSOpenTech.Extensions.DockerExtension-0.6.0.0/heartbeat.log"}}] -------------------------------------------------------------------------------- /testdata/Extension/HandlerManifest.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "name": "DockerHandler", 3 | "version": 0.6, 4 | "handlerManifest": { 5 | "installCommand": "scripts/install.sh", 6 | "uninstallCommand": "scripts/uninstall.sh", 7 | "updateCommand": "scripts/update.sh", 8 | "enableCommand": "scripts/enable.sh", 9 | "disableCommand": "scripts/disable.sh", 10 | "rebootAfterInstall": false, 11 | "reportHeartbeat": false 12 | } 13 | }] 14 | -------------------------------------------------------------------------------- /testdata/Extension/config/1.settings: -------------------------------------------------------------------------------- 1 | { 2 | "runtimeSettings": [ 3 | { 4 | "handlerSettings": { 5 | "publicSettings": { 6 | "dockerport": "4243" 7 | }, 8 | "protectedSettings": "", 9 | "protectedSettingsCertThumbprint": "B3364F39E3086E9AD0C67767348D7392D35BC176" 10 | } 11 | } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /testdata/Extension/config/2.settings: -------------------------------------------------------------------------------- 1 | { 2 | "runtimeSettings": [ 3 | { 4 | "handlerSettings": { 5 | "protectedSettingsCertThumbprint": "B3364F39E3086E9AD0C67767348D7392D35BC176", 6 | "protectedSettings": "MIIMJAYJKoZIhvcNAQcDoIIMFTCCDBECAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgRXh0ZW5zaW9ucwIQJAjSoLTBHbNMzpc1G+xmhDANBgkqhkiG9w0BAQEFAASCAQBKTb+weVwFqhWqEIfaePk61aICM+hHtXyHX4gQptHzS65rs45j8K9f2IX/uvuZoIQXgylEeUSn9gnSUvZjJSbYJ4x4weqhe7TVsMA/+naHUP/zQCkbAbC6TjhuPlX6cHHWDgCilQSKI6Ry6pmovKmSBqL2AY/JUO/kInPECpz6fcU8Z9RqzVNK+vJwMdZ5NZS7ywgKCEy+VyDhY1VJ1Ckmt1qtOjkstv3N8a4fugunBJrG1t5vN2lbDlJWNYOgjzPtTXE4G7N/zw5t2n8epydXfun7crFYxV4faGK0+muFtC+MdWGt36NTbr5vhbVnHpByDxpzOSyr54IqFtpAaHulMIIKlQYJKoZIhvcNAQcBMBQGCCqGSIb3DQMHBAjN/F//fsylJ4CCCnBAJpTUxNVRDC2MTG3IePytxGHhTGB5cT5U3024FnUxWGSnRDjCSPAVBUFFcAOTQkDzJI8HwDlUno2KbkSR0cQ1cucK18n3uw22pXV30h5LpEZBsxlEIalbXcSnCgixD0bfv4vz1yoRtxSFeDNH5STOn9NMDQd4p1Z6sfMCmFF09d8j8vx1A2vu7rmYZR6nhrjdrCWUJzELC5XYulqFqh/558P0OnPgeuRcki7CZrniUifDllMoHJcZV1zVcyEGZiPFXwpG9sJe4c/49CbAmFjL8QMzoDDzFrB79HCKu1x/4ldZHMs01CA6Y+GPTZsAoAl0C4oSDkUFm/8jmh3NYJ1SgQxc2yo64Q5EMm9uY5agV788uCjTK7PMmUDstwyQjqm/s2kq+kiB8jv9+V7USoEeaUPgVdxGI3bOGZRJVM+ROYtZTD6RVDH6BUImu1IhUcR8d7xU0s83xlpd7Z1TGAvlAa+8hdQDI4hywwWbpC8U2eSWr4dsExRj5QR943nXs4EHjybtydjGVWBYz/Ii2znLbpiX9O6Qx2Mueza6ka2oYT3Id09e8wfjkmwpVYHGV8cVw76Uwe+qOc8YNYC0FsLdKbOzM2YwEpXEt0v+Z4asG/6EpeFeUxzoSyj+HkjCcSlag3vaKpbv0cq1FuEj45r0upqkyC6St9qit6ikT7o2RiyJgO9h7w1/3ERGHkXoEgMzJjUdmhQF856qoL1IrcTkcaSFps+5Ce5bwOWr2G6yvrezneIwvapY9//Iem8QIkt8lQmFe0tns6KYhIvMUbSpH+DAUcX1QoSXOWLawkytbzOiQDQol7DW0yxlQfHvrpVYADebJFs3/EQw8HjR+0rI8rpypgcKZZ/8zjybn+1cxvvyYd0XOU9u1CT6k4K2CdoYckLB5/K2ri/i5IDpnsXFWLxB57XxVk1TkmPdQS87z9E6Nlzs+3Tf8ZEYwRKlVQ/CDj0DPbn9X7S2Y/UxxsmFqpXeTXTtPOXoLhyD72qS8EcdkV89K7q5zqduu1adZJoBW2bnVVeiEBrY80nTbV2nON3V8Onaa4HUsLIVSwNDCa72Vlen4nVwI8zJLIPawahJCS/UlXxolbTXHV3Sdo40YTUCB6wCaZ6MmevpifgYWVR8tzPAy+VvHTGXnQC87tXd+hmaKrZSzb7S67TSqTW+ZYJ+HPGTeiPJs+yF//uC3Y0Y4i//KTzR1arCsaUOtawtg2bsUPrb2Dys+99KNirEzRbIAHWf/pwsNrohkl7jsbC7/d8QhnHpp8Sq4/QCyI8fYKuFQ1MA/SKPy+CorAfIYaizcxvxa4mBmvZwi9DaAzrFryeJMrHtzf953z/lrMOp0r9pKG/ES0fY52/7LhdbIiLTC4saiE6m2O68QrzLHy0M1iw7YWXH4jowfOooNKFQTZm7uajo+E/yj4+7e8GsK5sxeXPydiv9s2o9KUmbapRmVw4QMpAVcz6Qf71DRAtFqaYjHJ9970wyuVLkDJVeBi15CYcqCOjF3QrFWQfa6LYTja8jQwa42sPpfxJZpDdiiDg4FOjUP+nZjaAQC4WRLZ0ju5Blh1xUpDXPg1jql5UqHzSVimqRuCi9Ksf5Sg8kceOErCai3NgdqRy1RsVnrm2lv8xAYNfEhkN9XaT1yylvIFHzFQqfUNTzzySU5188hh6uB9VbB6xuPuDbz+QwSFLV46vcoW/+eCneUJ8yPwGLFpbkDehkEgaBLSzw/Xy10ri0TXmMZs4CWDJmRoCwCEqdA/OkMWA0Tscapi8J3OxDgqofA75ilP2WcFU2wxiebtU2YyI10U86Uqwsi3lL06MGJtRfBQZOe8tMxByfB7PLiM4w1c0C7gKJbfn6dqATVfCbhniuaB7TxcBncya5+6Trs1HZQEP2jt1eHfsMyoxKIx3Azt+y6I6Nne6s/M1/B5Sw58EZ2XcTi3XLlyQ6EWLI3ujApE4znAFRTs+UDIQrCUSNB49QezMvxxZ9Xpw2Bqch0OTfmo45CUy0KidgLrwUoS9iG9JHHTtybytVzMndHt5TKpdHuYdrfCODJXGVxNylvTxzkG4EMwR67ya/oWSmx+oQ4nXYPXVwUGhK1w7zrfpd+guEPVTXzwERKOWO6WLnfoPn66/ZJIexEMoQcFctnmRZF8r2e66yQzrWuxxgCU648bEDIrXp72Wpbj5fQnNWMREu+YkKpszO4sEKz7/9XUycd0IZZ1iEMX2cf0R7SeMnScNNnAe5K2tD6DcmyMwTrtg/VRGhH3+c+g/49nTtJinK6dQLQ26A7DR1KUaqIa3Kz5o3MxtoLAYHVyR5w4xppq1sPXNZOISI269Ep2p23LZGxZJ+GJnJGNQicyTPb0Xu2SbWF5rPGWV16gC6lho88B6jvMsgefAzMDeZJJoMDH2qddJLlZgEjspOX6apL+/5Qm7GGXZ28NHqR1R5c2epHaE6NVBBpKqECnt0oJXVtxnITcN5MZHPX5MCSJ0ztatES+tNeeAWS5hKUAxrL7kStGY63cuCSSfpSs/ERafT2TMhsMDNsk55T9B9NPvGifUFpaxYMR9ox2BYC1isDEBoDUQfL8axm1kiv9gRJhO0e2ytpRxdAZHMKCS20M4ccpOn16PPKF/Z0AQwC7eR0WVboNtnbRdmocQE1ZXECtHQjXAV3JuLk0NtG438yrnT0P86xw9eF/UXYsAKmiFtB5pfWaEDuMF9VnrbQ86lsBe4y5Uilgpbd3VVgasOFM7ce7LPsjQ9vyisPBd3FVn1Voats6vo6Hla7X+5XdyLYPiCyA/FbhwX9RVkd4TVv9nyNFA2zKH4CDZJ+WMlJK8UJodgZv41O2MCCGjYCflqdVi3wWVk9PfZAJTMmgSIk2MXpjB/psH9vCXngNQL5CzTC+NYQK2uOEjejsFA6oVVw6XpaOs+9dEmagDmCJ0m0B5g6vx7KtFDPnxQ19DLWKJrIB47Eb5lwwWdvp6aN01AUIm5qunvNDq3ua/JlN0AJPncCQm0ilzwJxjMRNmO1yHXPGq047XGRuSObDPMSGzCiJalxPcgSJWLa+x8/GUfdZHXhd5bgcUS1do2iLpKJWGmvIzDoNhVlR+Y4JhoLEI8mrgFIZX1pUYCbsGY3wXrJLvm3UQKG7i4VBGLF2On/eopkFyEhX7xTp779md784jWEWOYHHhvIUXt51EHY/dB2DreOwiZBPKKH2IjBLW7fZjKYvEsVG0dsFbfAJPjMSzqPSJujTewhrSiyM4fcceIRL4SqW648/f6PyIss6ZSrfeEhHt5sgDYi3ZNKqA6z7IsXVDLWG2NraNtfdn9KdFoUqYJzm45vApEOOnNdx/EMm64Tdxt+vtboCD9BA+m9czo8HrSlHv655QSPu/gQ4SzHFVgseJA+kM5rTRYw9tbiLN7i5Sm9Xbd71lrhutZzpJUvskhU0JANL1rzREq71OhDYhTiDY+Zt+IEbTgAl/KQCZ6xu7zTypT6IkKos3BNVVlmFyhXfPxv8fnSgHB1ZpSUhwu3XWNEZ/WlyiV3kLoTZgC5+6C3VqxQnMuJRaKptiBbtCCj3xStH2Xl1sq4NEKwg==", 7 | "publicSettings": { 8 | "dockerport": "4243", 9 | "composeup": { 10 | "db": { 11 | "image": "postgres" 12 | }, 13 | "web": { 14 | "ports": [ 15 | "8000:8000" 16 | ], 17 | "links": [ 18 | "db" 19 | ], 20 | "image": "myweb" 21 | } 22 | } 23 | } 24 | } 25 | } 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /testdata/Extension/config/HandlerState: -------------------------------------------------------------------------------- 1 | Installed -------------------------------------------------------------------------------- /testdata/Extension/status/0.status: -------------------------------------------------------------------------------- 1 | [{ 2 | "version": 1.0, 3 | "timestampUTC": "2015-04-23T17:45:09Z", 4 | "status" : { 5 | "name": "Enable Docker", 6 | "operation": "Enabling Docker", 7 | "status": "transitioning", 8 | "formattedMessage": { 9 | "lang": "en", 10 | "message": "Enabling Docker." 11 | } 12 | } 13 | }] 14 | -------------------------------------------------------------------------------- /testdata/HandlerEnvironment.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "handlerEnvironment": { 4 | "heartbeatFile": "/var/lib/waagent/Extension/heartbeat.log", 5 | "statusFolder": "/var/lib/waagent/Extension/status", 6 | "configFolder": "/var/lib/waagent/Extension/config", 7 | "logFolder": "/var/log/azure/Extension/0.6.0.0" 8 | }, 9 | "version": 1, 10 | "seqNo": "0", 11 | "name": "MSOpenTech.Extensions.DockerExtension" 12 | } 13 | ] 14 | -------------------------------------------------------------------------------- /testdata/lsb-release: -------------------------------------------------------------------------------- 1 | DISTRIB_ID=Ubuntu 2 | DISTRIB_RELEASE=14.04 3 | DISTRIB_CODENAME=trusty 4 | DISTRIB_DESCRIPTION="Ubuntu 14.04.2 LTS" 5 | -------------------------------------------------------------------------------- /testdata/ovf-env.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 1.0LinuxProvisioningConfigurationdockerextensioncoreosazureuser*false 5 | 6 | 1.0kms.core.windows.nettrueWin7_Win8_IaaS_rd_art_stable_141201-1241_GuestAgentPackage.zipfalse 7 | -------------------------------------------------------------------------------- /testdata/sampleProtectedSettings.json: -------------------------------------------------------------------------------- 1 | { 2 | "server-key": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlCT3dJQkFBSkJBTmh2K1dyQ3c2aW1TNzl4WGxabW84RTRlc1RUeThWbURIWkt3TXJOeDFGeTcvTCtwUEdGCnlUcXhsTmE0ZkZ2TVZXU2g2S3BhSCt1Tk1rNHdVS3pxa1cwQ0F3RUFBUUpCQUxmbXBMRnUzWnE5K2R5bnNqV04KcnBRdHJqcnhqdEdpUFBLQjM4QXlDcXFMZzN2MEdiTzh2R2tLOHlkei9ROXI4V2lvbkRZbmVWamRJSEZScHZMMApjaTBDSVFEdnIvRXJZbU1qT245THhyZnFLV2czLzZvdmZaeVBWcG5zTFgwWnI4VDFWd0loQU9jcTgzSlM0RzJ0CkFjQThkUlVYOG94OVhZcENzYWo3a1p1VWdDOEUydERiQWlBclJQRGRxeUpJZnN2eUt3cVNqQzBUUnVVN000cnEKMlhCcUVZQXUvZXNwZXdJaEFNVlBNYy9JMFdxTG1zejhUV3NkcjVjZWdqSzZ6bTFQbkc3UkI4QWFRcksxQWlCSQpJSVowYURiaVFGL2poS3FyRzY5bG5FTVNvSGxKdEpTeVZnNVZ5Tllqbmc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=", 3 | "server-cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJqRENDQVRZQ0FRRXdEUVlKS29aSWh2Y05BUUVGQlFBd1VURUxNQWtHQTFVRUJoTUNRVlV4RXpBUkJnTlYKQkFnVENsTnZiV1V0VTNSaGRHVXhJVEFmQmdOVkJBb1RHRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREVLTUFnR0ExVUVBeFFCS2pBZUZ3MHhOVEEwTVRNeU1UQXpNRGxhRncweE5qQTBNVEl5TVRBek1EbGFNRkV4CkN6QUpCZ05WQkFZVEFrRlZNUk13RVFZRFZRUUlFd3BUYjIxbExWTjBZWFJsTVNFd0h3WURWUVFLRXhoSmJuUmwKY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReENqQUlCZ05WQkFNVUFTb3dYREFOQmdrcWhraUc5dzBCQVFFRgpBQU5MQURCSUFrRUEyRy81YXNMRHFLWkx2M0ZlVm1handUaDZ4TlBMeFdZTWRrckF5czNIVVhMdjh2Nms4WVhKCk9yR1Uxcmg4Vzh4VlpLSG9xbG9mNjQweVRqQlFyT3FSYlFJREFRQUJNQTBHQ1NxR1NJYjNEUUVCQlFVQUEwRUEKSEVYRlNxMG1pLzN5c2pMM0NDT0hZUVNXNXNhZk1WZWhubVVGQksrc0diY2ZnWHVtZzc1NkQrUk1EY3FnQWx3QQpVM0dDK1c1YlpCa3hDOGx6VmFTWEd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", 4 | "ca": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNVRENDQWZxZ0F3SUJBZ0lKQU1CMmJXMXVTWGFkTUEwR0NTcUdTSWIzRFFFQkJRVUFNRkV4Q3pBSkJnTlYKQkFZVEFrRlZNUk13RVFZRFZRUUlFd3BUYjIxbExWTjBZWFJsTVNFd0h3WURWUVFLRXhoSmJuUmxjbTVsZENCWAphV1JuYVhSeklGQjBlU0JNZEdReENqQUlCZ05WQkFNVUFTb3dIaGNOTVRVd05ERXpNakV3TXpBNVdoY05NVFl3Ck5ERXlNakV3TXpBNVdqQlJNUXN3Q1FZRFZRUUdFd0pCVlRFVE1CRUdBMVVFQ0JNS1UyOXRaUzFUZEdGMFpURWgKTUI4R0ExVUVDaE1ZU1c1MFpYSnVaWFFnVjJsa1oybDBjeUJRZEhrZ1RIUmtNUW93Q0FZRFZRUURGQUVxTUZ3dwpEUVlKS29aSWh2Y05BUUVCQlFBRFN3QXdTQUpCQU5NbzVVTjY4amc0T2ZhSVR0YnJlZEFPTWE5SzJwSzQrZUF4CmcyMFlNZVI3UnFWeDhhdWpEQkFpRkV6TTJxeDBzZ2taU0l3dmhHNExaRFJlUWNTdkMxc0NBd0VBQWFPQnREQ0IKc1RBZEJnTlZIUTRFRmdRVWNlTjBCMHMvbUplaXZwUnUvSWp2b1RZWFZ5WXdnWUVHQTFVZEl3UjZNSGlBRkhIagpkQWRMUDVpWG9yNlVidnlJNzZFMkYxY21vVldrVXpCUk1Rc3dDUVlEVlFRR0V3SkJWVEVUTUJFR0ExVUVDQk1LClUyOXRaUzFUZEdGMFpURWhNQjhHQTFVRUNoTVlTVzUwWlhKdVpYUWdWMmxrWjJsMGN5QlFkSGtnVEhSa01Rb3cKQ0FZRFZRUURGQUVxZ2drQXdIWnRiVzVKZHAwd0RBWURWUjBUQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCQVFVRgpBQU5CQU14Rk95ZnV0RDhjdWJDbUJJMFV3MWo1bXFBdUtqNUJUL2FLTFArWUNTU2g3K0FORTd4dFRSVFgyMklkCllOdHhzaEF1Z2RDU05yNnhjTFc0VnJscUZNbz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" 5 | } 6 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore: -------------------------------------------------------------------------------- 1 | *.coverprofile 2 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.4.1 5 | 6 | install: 7 | - go get -t -v ./... 8 | - go install github.com/onsi/ginkgo/ginkgo 9 | 10 | script: 11 | - export PATH=$HOME/gopath/bin:$PATH 12 | - ginkgo -r -failOnPending -randomizeAllSpecs -race 13 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/cloudfoundry-incubator/candiedyaml.svg)](https://travis-ci.org/cloudfoundry-incubator/candiedyaml) 2 | 3 | candiedyaml 4 | =========== 5 | 6 | YAML for Go 7 | 8 | A YAML 1.1 parser with support for YAML 1.2 features 9 | 10 | Usage 11 | ----- 12 | 13 | ```go 14 | package myApp 15 | 16 | import ( 17 | "github.com/cloudfoundry-incubator/candiedyaml" 18 | "fmt" 19 | "os" 20 | ) 21 | 22 | func main() { 23 | file, err := os.Open("path/to/some/file.yml") 24 | if err != nil { 25 | println("File does not exist:", err.Error()) 26 | os.Exit(1) 27 | } 28 | 29 | document := new(interface{}) 30 | decoder := candiedyaml.NewDecoder(file) 31 | err = decoder.Decode(document) 32 | 33 | if err != nil { 34 | println("Failed to decode document:", err.Error()) 35 | } 36 | 37 | println("parsed yml into interface:", fmt.Sprintf("%#v", document)) 38 | 39 | fileToWrite, err := os.Create("path/to/some/new/file.yml") 40 | if err != nil { 41 | println("Failed to open file for writing:", err.Error()) 42 | os.Exit(1) 43 | } 44 | 45 | encoder := candiedyaml.NewEncoder(fileToWrite) 46 | err = encoder.Encode(document) 47 | 48 | if err != nil { 49 | println("Failed to encode document:", err.Error()) 50 | os.Exit(1) 51 | } 52 | 53 | return 54 | } 55 | ``` 56 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go: -------------------------------------------------------------------------------- 1 | /* 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package candiedyaml 16 | 17 | import ( 18 | "bytes" 19 | "encoding/base64" 20 | "io" 21 | "math" 22 | "reflect" 23 | "regexp" 24 | "sort" 25 | "strconv" 26 | "time" 27 | ) 28 | 29 | var ( 30 | timeTimeType = reflect.TypeOf(time.Time{}) 31 | marshalerType = reflect.TypeOf(new(Marshaler)).Elem() 32 | numberType = reflect.TypeOf(Number("")) 33 | nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]") 34 | multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029") 35 | 36 | shortTags = map[string]string{ 37 | yaml_NULL_TAG: "!!null", 38 | yaml_BOOL_TAG: "!!bool", 39 | yaml_STR_TAG: "!!str", 40 | yaml_INT_TAG: "!!int", 41 | yaml_FLOAT_TAG: "!!float", 42 | yaml_TIMESTAMP_TAG: "!!timestamp", 43 | yaml_SEQ_TAG: "!!seq", 44 | yaml_MAP_TAG: "!!map", 45 | yaml_BINARY_TAG: "!!binary", 46 | } 47 | ) 48 | 49 | type Marshaler interface { 50 | MarshalYAML() (tag string, value interface{}, err error) 51 | } 52 | 53 | // An Encoder writes JSON objects to an output stream. 54 | type Encoder struct { 55 | w io.Writer 56 | emitter yaml_emitter_t 57 | event yaml_event_t 58 | flow bool 59 | err error 60 | } 61 | 62 | func Marshal(v interface{}) ([]byte, error) { 63 | b := bytes.Buffer{} 64 | e := NewEncoder(&b) 65 | err := e.Encode(v) 66 | return b.Bytes(), err 67 | } 68 | 69 | // NewEncoder returns a new encoder that writes to w. 70 | func NewEncoder(w io.Writer) *Encoder { 71 | e := &Encoder{w: w} 72 | yaml_emitter_initialize(&e.emitter) 73 | yaml_emitter_set_output_writer(&e.emitter, e.w) 74 | yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) 75 | e.emit() 76 | yaml_document_start_event_initialize(&e.event, nil, nil, true) 77 | e.emit() 78 | 79 | return e 80 | } 81 | 82 | func (e *Encoder) Encode(v interface{}) (err error) { 83 | defer recovery(&err) 84 | 85 | if e.err != nil { 86 | return e.err 87 | } 88 | 89 | e.marshal("", reflect.ValueOf(v), true) 90 | 91 | yaml_document_end_event_initialize(&e.event, true) 92 | e.emit() 93 | e.emitter.open_ended = false 94 | yaml_stream_end_event_initialize(&e.event) 95 | e.emit() 96 | 97 | return nil 98 | } 99 | 100 | func (e *Encoder) emit() { 101 | if !yaml_emitter_emit(&e.emitter, &e.event) { 102 | panic("bad emit") 103 | } 104 | } 105 | 106 | func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) { 107 | vt := v.Type() 108 | 109 | if vt.Implements(marshalerType) { 110 | e.emitMarshaler(tag, v) 111 | return 112 | } 113 | 114 | if vt.Kind() != reflect.Ptr && allowAddr { 115 | if reflect.PtrTo(vt).Implements(marshalerType) { 116 | e.emitAddrMarshaler(tag, v) 117 | return 118 | } 119 | } 120 | 121 | switch v.Kind() { 122 | case reflect.Interface: 123 | if v.IsNil() { 124 | e.emitNil() 125 | } else { 126 | e.marshal(tag, v.Elem(), allowAddr) 127 | } 128 | case reflect.Map: 129 | e.emitMap(tag, v) 130 | case reflect.Ptr: 131 | if v.IsNil() { 132 | e.emitNil() 133 | } else { 134 | e.marshal(tag, v.Elem(), true) 135 | } 136 | case reflect.Struct: 137 | e.emitStruct(tag, v) 138 | case reflect.Slice: 139 | e.emitSlice(tag, v) 140 | case reflect.String: 141 | e.emitString(tag, v) 142 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 143 | e.emitInt(tag, v) 144 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 145 | e.emitUint(tag, v) 146 | case reflect.Float32, reflect.Float64: 147 | e.emitFloat(tag, v) 148 | case reflect.Bool: 149 | e.emitBool(tag, v) 150 | default: 151 | panic("Can't marshal type yet: " + v.Type().String()) 152 | } 153 | } 154 | 155 | func (e *Encoder) emitMap(tag string, v reflect.Value) { 156 | e.mapping(tag, func() { 157 | var keys stringValues = v.MapKeys() 158 | sort.Sort(keys) 159 | for _, k := range keys { 160 | e.marshal("", k, true) 161 | e.marshal("", v.MapIndex(k), true) 162 | } 163 | }) 164 | } 165 | 166 | func (e *Encoder) emitStruct(tag string, v reflect.Value) { 167 | if v.Type() == timeTimeType { 168 | e.emitTime(tag, v) 169 | return 170 | } 171 | 172 | fields := cachedTypeFields(v.Type()) 173 | 174 | e.mapping(tag, func() { 175 | for _, f := range fields { 176 | fv := fieldByIndex(v, f.index) 177 | if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) { 178 | continue 179 | } 180 | 181 | e.marshal("", reflect.ValueOf(f.name), true) 182 | e.flow = f.flow 183 | e.marshal("", fv, true) 184 | } 185 | }) 186 | } 187 | 188 | func (e *Encoder) emitTime(tag string, v reflect.Value) { 189 | t := v.Interface().(time.Time) 190 | bytes, _ := t.MarshalText() 191 | e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE) 192 | } 193 | 194 | func isEmptyValue(v reflect.Value) bool { 195 | switch v.Kind() { 196 | case reflect.Array, reflect.Map, reflect.Slice, reflect.String: 197 | return v.Len() == 0 198 | case reflect.Bool: 199 | return !v.Bool() 200 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 201 | return v.Int() == 0 202 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 203 | return v.Uint() == 0 204 | case reflect.Float32, reflect.Float64: 205 | return v.Float() == 0 206 | case reflect.Interface, reflect.Ptr: 207 | return v.IsNil() 208 | } 209 | return false 210 | } 211 | 212 | func (e *Encoder) mapping(tag string, f func()) { 213 | implicit := tag == "" 214 | style := yaml_BLOCK_MAPPING_STYLE 215 | if e.flow { 216 | e.flow = false 217 | style = yaml_FLOW_MAPPING_STYLE 218 | } 219 | yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) 220 | e.emit() 221 | 222 | f() 223 | 224 | yaml_mapping_end_event_initialize(&e.event) 225 | e.emit() 226 | } 227 | 228 | func (e *Encoder) emitSlice(tag string, v reflect.Value) { 229 | if v.Type() == byteSliceType { 230 | e.emitBase64(tag, v) 231 | return 232 | } 233 | 234 | implicit := tag == "" 235 | style := yaml_BLOCK_SEQUENCE_STYLE 236 | if e.flow { 237 | e.flow = false 238 | style = yaml_FLOW_SEQUENCE_STYLE 239 | } 240 | yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) 241 | e.emit() 242 | 243 | n := v.Len() 244 | for i := 0; i < n; i++ { 245 | e.marshal("", v.Index(i), true) 246 | } 247 | 248 | yaml_sequence_end_event_initialize(&e.event) 249 | e.emit() 250 | } 251 | 252 | func (e *Encoder) emitBase64(tag string, v reflect.Value) { 253 | if v.IsNil() { 254 | e.emitNil() 255 | return 256 | } 257 | 258 | s := v.Bytes() 259 | 260 | dst := make([]byte, base64.StdEncoding.EncodedLen(len(s))) 261 | 262 | base64.StdEncoding.Encode(dst, s) 263 | e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE) 264 | } 265 | 266 | func (e *Encoder) emitString(tag string, v reflect.Value) { 267 | var style yaml_scalar_style_t 268 | s := v.String() 269 | 270 | if nonPrintable.MatchString(s) { 271 | e.emitBase64(tag, v) 272 | return 273 | } 274 | 275 | if v.Type() == numberType { 276 | style = yaml_PLAIN_SCALAR_STYLE 277 | } else { 278 | event := yaml_event_t{ 279 | implicit: true, 280 | value: []byte(s), 281 | } 282 | 283 | rtag, _ := resolveInterface(event, false) 284 | if tag == "" && rtag != yaml_STR_TAG { 285 | style = yaml_DOUBLE_QUOTED_SCALAR_STYLE 286 | } else if multiline.MatchString(s) { 287 | style = yaml_LITERAL_SCALAR_STYLE 288 | } else { 289 | style = yaml_PLAIN_SCALAR_STYLE 290 | } 291 | } 292 | 293 | e.emitScalar(s, "", tag, style) 294 | } 295 | 296 | func (e *Encoder) emitBool(tag string, v reflect.Value) { 297 | s := strconv.FormatBool(v.Bool()) 298 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) 299 | } 300 | 301 | func (e *Encoder) emitInt(tag string, v reflect.Value) { 302 | s := strconv.FormatInt(v.Int(), 10) 303 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) 304 | } 305 | 306 | func (e *Encoder) emitUint(tag string, v reflect.Value) { 307 | s := strconv.FormatUint(v.Uint(), 10) 308 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) 309 | } 310 | 311 | func (e *Encoder) emitFloat(tag string, v reflect.Value) { 312 | f := v.Float() 313 | 314 | var s string 315 | switch { 316 | case math.IsNaN(f): 317 | s = ".nan" 318 | case math.IsInf(f, 1): 319 | s = "+.inf" 320 | case math.IsInf(f, -1): 321 | s = "-.inf" 322 | default: 323 | s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits()) 324 | } 325 | 326 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) 327 | } 328 | 329 | func (e *Encoder) emitNil() { 330 | e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) 331 | } 332 | 333 | func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { 334 | implicit := tag == "" 335 | if !implicit { 336 | style = yaml_PLAIN_SCALAR_STYLE 337 | } 338 | 339 | stag := shortTags[tag] 340 | if stag == "" { 341 | stag = tag 342 | } 343 | 344 | yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style) 345 | e.emit() 346 | } 347 | 348 | func (e *Encoder) emitMarshaler(tag string, v reflect.Value) { 349 | if v.Kind() == reflect.Ptr && v.IsNil() { 350 | e.emitNil() 351 | return 352 | } 353 | 354 | m := v.Interface().(Marshaler) 355 | if m == nil { 356 | e.emitNil() 357 | return 358 | } 359 | t, val, err := m.MarshalYAML() 360 | if err != nil { 361 | panic(err) 362 | } 363 | if val == nil { 364 | e.emitNil() 365 | return 366 | } 367 | 368 | e.marshal(t, reflect.ValueOf(val), false) 369 | } 370 | 371 | func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) { 372 | if !v.CanAddr() { 373 | e.marshal(tag, v, false) 374 | return 375 | } 376 | 377 | va := v.Addr() 378 | if va.IsNil() { 379 | e.emitNil() 380 | return 381 | } 382 | 383 | m := v.Interface().(Marshaler) 384 | t, val, err := m.MarshalYAML() 385 | if err != nil { 386 | panic(err) 387 | } 388 | 389 | if val == nil { 390 | e.emitNil() 391 | return 392 | } 393 | 394 | e.marshal(t, reflect.ValueOf(val), false) 395 | } 396 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_1.yaml: -------------------------------------------------------------------------------- 1 | - Mark McGwire 2 | - Sammy Sosa 3 | - Ken Griffey 4 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_10.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | hr: 3 | - Mark McGwire 4 | # Following node labeled SS 5 | - &SS Sammy Sosa 6 | rbi: 7 | - *SS # Subsequent occurrence 8 | - Ken Griffey 9 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_11.yaml: -------------------------------------------------------------------------------- 1 | ? - Detroit Tigers 2 | - Chicago cubs 3 | : 4 | - 2001-07-23 5 | 6 | ? [ New York Yankees, 7 | Atlanta Braves ] 8 | : [ 2001-07-02, 2001-08-12, 9 | 2001-08-14 ] 10 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_12.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # products purchased 3 | - item : Super Hoop 4 | quantity: 1 5 | - item : Basketball 6 | quantity: 4 7 | - item : Big Shoes 8 | quantity: 1 9 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_13.yaml: -------------------------------------------------------------------------------- 1 | # ASCII Art 2 | --- | 3 | \//||\/|| 4 | // || ||__ 5 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_14.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | Mark McGwire's 3 | year was crippled 4 | by a knee injury. 5 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_15.yaml: -------------------------------------------------------------------------------- 1 | > 2 | Sammy Sosa completed another 3 | fine season with great stats. 4 | 5 | 63 Home Runs 6 | 0.288 Batting Average 7 | 8 | What a year! 9 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_15_dumped.yaml: -------------------------------------------------------------------------------- 1 | > 2 | Sammy Sosa completed another fine season with great stats. 3 | 4 | 63 Home Runs 5 | 0.288 Batting Average 6 | 7 | What a year! -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_16.yaml: -------------------------------------------------------------------------------- 1 | name: Mark McGwire 2 | accomplishment: > 3 | Mark set a major league 4 | home run record in 1998. 5 | stats: | 6 | 65 Home Runs 7 | 0.278 Batting Average 8 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17.yaml: -------------------------------------------------------------------------------- 1 | unicode: "Sosa did fine.\u263A" 2 | control: "\b1998\t1999\t2000\n" 3 | hexesc: "\x0D\x0A is \r\n" 4 | 5 | single: '"Howdy!" he cried.' 6 | quoted: ' # not a ''comment''.' 7 | tie-fighter: '|\-*-/|' 8 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_control.yaml: -------------------------------------------------------------------------------- 1 | control: "\b1998\t1999\t2000\n" 2 | 3 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_hexesc.yaml: -------------------------------------------------------------------------------- 1 | hexesc: "\x0D\x0A is \r\n" 2 | 3 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_quoted.yaml: -------------------------------------------------------------------------------- 1 | quoted: ' # not a ''comment''.' 2 | 3 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_single.yaml: -------------------------------------------------------------------------------- 1 | single: '"Howdy!" he cried.' 2 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_tie_fighter.yaml: -------------------------------------------------------------------------------- 1 | tie-fighter: '|\-*-/|' 2 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_unicode.yaml: -------------------------------------------------------------------------------- 1 | unicode: "Sosa did fine.\u263A" 2 | 3 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_18.yaml: -------------------------------------------------------------------------------- 1 | plain: 2 | This unquoted scalar 3 | spans many lines. 4 | 5 | quoted: "So does this 6 | quoted scalar.\n" 7 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_19.yaml: -------------------------------------------------------------------------------- 1 | canonical: 12345 2 | decimal: +12_345 3 | octal: 014 4 | hexadecimal: 0xC 5 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_2.yaml: -------------------------------------------------------------------------------- 1 | hr: 65 # Home runs 2 | avg: 0.278 # Batting average 3 | rbi: 147 # Runs Batted In 4 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_20.yaml: -------------------------------------------------------------------------------- 1 | canonical: 1.23015e+3 2 | exponential: 12.3015e+02 3 | fixed: 1_230.15 4 | negative infinity: -.inf 5 | not a number: .NaN 6 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_21.yaml: -------------------------------------------------------------------------------- 1 | null: ~ 2 | true: yes 3 | false: no 4 | string: '12345' 5 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_22.yaml: -------------------------------------------------------------------------------- 1 | canonical: 2001-12-15T02:59:43.1Z 2 | iso8601: 2001-12-14t21:59:43.10-05:00 3 | spaced: 2001-12-14 21:59:43.10 -5 4 | date: 2002-12-14 5 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_23.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | not-date: !!str 2002-04-28 3 | 4 | picture: !!binary "\ 5 | R0lGODlhDAAMAIQAAP//9/X\ 6 | 17unp5WZmZgAAAOfn515eXv\ 7 | Pz7Y6OjuDg4J+fn5OTk6enp\ 8 | 56enmleECcgggoBADs=" 9 | 10 | application specific tag: !something | 11 | The semantics of the tag 12 | above may be different for 13 | different documents. 14 | 15 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_23_application.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | application specific tag: !something | 3 | The semantics of the tag 4 | above may be different for 5 | different documents. 6 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_23_non_date.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | not-date: !!str 2002-04-28 3 | 4 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_23_picture.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | picture: !!binary "\ 3 | R0lGODlhDAAMAIQAAP//9/X\ 4 | 17unp5WZmZgAAAOfn515eXv\ 5 | Pz7Y6OjuDg4J+fn5OTk6enp\ 6 | 56enmleECcgggoBADs=" 7 | 8 | 9 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_24.yaml: -------------------------------------------------------------------------------- 1 | %TAG ! tag:clarkevans.com,2002: 2 | --- !shape 3 | # Use the ! handle for presenting 4 | # tag:clarkevans.com,2002:circle 5 | - !circle 6 | center: &ORIGIN {x: 73, y: 129} 7 | radius: 7 8 | - !line 9 | start: *ORIGIN 10 | finish: { x: 89, y: 102 } 11 | - !label 12 | start: *ORIGIN 13 | color: 0xFFEEBB 14 | text: Pretty vector drawing. 15 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_24_dumped.yaml: -------------------------------------------------------------------------------- 1 | !shape 2 | - !circle 3 | center: &id001 {x: 73, y: 129} 4 | radius: 7 5 | - !line 6 | finish: {x: 89, y: 102} 7 | start: *id001 8 | - !label 9 | color: 0xFFEEBB 10 | start: *id001 11 | text: Pretty vector drawing. -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_25.yaml: -------------------------------------------------------------------------------- 1 | # sets are represented as a 2 | # mapping where each key is 3 | # associated with the empty string 4 | --- !!set 5 | ? Mark McGwire 6 | ? Sammy Sosa 7 | ? Ken Griff 8 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_26.yaml: -------------------------------------------------------------------------------- 1 | # ordered maps are represented as 2 | # a sequence of mappings, with 3 | # each mapping having one key 4 | --- !!omap 5 | - Mark McGwire: 65 6 | - Sammy Sosa: 63 7 | - Ken Griffy: 58 8 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_27.yaml: -------------------------------------------------------------------------------- 1 | --- ! 2 | invoice: 34843 3 | date : 2001-01-23 4 | billTo: &id001 5 | given : Chris 6 | family : Dumars 7 | address: 8 | lines: | 9 | 458 Walkman Dr. 10 | Suite #292 11 | city : Royal Oak 12 | state : MI 13 | postal : 48046 14 | shipTo: *id001 15 | product: 16 | - sku : BL394D 17 | quantity : 4 18 | description : Basketball 19 | price : 450.00 20 | - sku : BL4438H 21 | quantity : 1 22 | description : Super Hoop 23 | price : 2392.00 24 | tax : 251.42 25 | total: 4443.52 26 | comments: 27 | Late afternoon is best. 28 | Backup contact is Nancy 29 | Billsmer @ 338-4338. 30 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_27_dumped.yaml: -------------------------------------------------------------------------------- 1 | !!org.yaml.snakeyaml.Invoice 2 | billTo: &id001 3 | address: 4 | city: Royal Oak 5 | lines: | 6 | 458 Walkman Dr. 7 | Suite #292 8 | postal: '48046' 9 | state: MI 10 | family: Dumars 11 | given: Chris 12 | comments: Late afternoon is best. Backup contact is Nancy Billsmer @ 338-4338. 13 | date: '2001-01-23' 14 | invoice: 34843 15 | product: 16 | - {description: Basketball, price: 450.0, quantity: 4, sku: BL394D} 17 | - {description: Super Hoop, price: 2392.0, quantity: 1, sku: BL4438H} 18 | shipTo: *id001 19 | tax: 251.42 20 | total: 4443.52 -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_28.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | Time: 2001-11-23 15:01:42 -5 3 | User: ed 4 | Warning: 5 | This is an error message 6 | for the log file 7 | --- 8 | Time: 2001-11-23 15:02:31 -5 9 | User: ed 10 | Warning: 11 | A slightly different error 12 | message. 13 | --- 14 | Date: 2001-11-23 15:03:17 -5 15 | User: ed 16 | Fatal: 17 | Unknown variable "bar" 18 | Stack: 19 | - file: TopClass.py 20 | line: 23 21 | code: | 22 | x = MoreObject("345\n") 23 | - file: MoreClass.py 24 | line: 58 25 | code: |- 26 | foo = bar 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_3.yaml: -------------------------------------------------------------------------------- 1 | american: 2 | - Boston Red Sox 3 | - Detroit Tigers 4 | - New York Yankees 5 | national: 6 | - New York Mets 7 | - Chicago Cubs 8 | - Atlanta Braves -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_4.yaml: -------------------------------------------------------------------------------- 1 | - 2 | name: Mark McGwire 3 | hr: 65 4 | avg: 0.278 5 | - 6 | name: Sammy Sosa 7 | hr: 63 8 | avg: 0.288 9 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_5.yaml: -------------------------------------------------------------------------------- 1 | - [name , hr, avg ] 2 | - [Mark McGwire, 65, 0.278] 3 | - [Sammy Sosa , 63, 0.288] 4 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_6.yaml: -------------------------------------------------------------------------------- 1 | Mark McGwire: {hr: 65, avg: 0.278} 2 | Sammy Sosa: { 3 | hr: 63, 4 | avg: 0.288 5 | } 6 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_7.yaml: -------------------------------------------------------------------------------- 1 | # Ranking of 1998 home runs 2 | --- 3 | - Mark McGwire 4 | - Sammy Sosa 5 | - Ken Griffey 6 | 7 | # Team ranking 8 | --- 9 | - Chicago Cubs 10 | - St Louis Cardinals 11 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | time: 20:03:20 3 | player: Sammy Sosa 4 | action: strike (miss) 5 | ... 6 | --- 7 | time: 20:03:47 8 | player: Sammy Sosa 9 | action: grand slam 10 | ... 11 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | hr: # 1998 hr ranking 3 | - Mark McGwire 4 | - Sammy Sosa 5 | rbi: 6 | # 1998 rbi ranking 7 | - Sammy Sosa 8 | - Ken Griffey 9 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example_empty.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-docker-extension/d1d2547150489b2495a8d09a642e7f515bbf484f/vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example_empty.yaml -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/map.yaml: -------------------------------------------------------------------------------- 1 | # Unordered set of key: value pairs. 2 | Block style: !!map 3 | Clark : Evans 4 | Brian : Ingerson 5 | Oren : Ben-Kiki 6 | Flow style: !!map { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki } 7 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/map_mixed_tags.yaml: -------------------------------------------------------------------------------- 1 | # Unordered set of key: value pairs. 2 | Block style: ! 3 | Clark : Evans 4 | Brian : Ingerson 5 | Oren : Ben-Kiki 6 | Flow style: { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki } 7 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/merge.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - &CENTER { x: 1, y: 2 } 3 | - &LEFT { x: 0, y: 2 } 4 | - &BIG { r: 10 } 5 | - &SMALL { r: 1 } 6 | 7 | # All the following maps are equal: 8 | 9 | - # Explicit keys 10 | x: 1 11 | y: 2 12 | r: 10 13 | label: center/big 14 | 15 | - # Merge one map 16 | << : *CENTER 17 | r: 10 18 | label: center/big 19 | 20 | - # Merge multiple maps 21 | << : [ *CENTER, *BIG ] 22 | label: center/big 23 | 24 | - # Override 25 | << : [ *BIG, *LEFT, *SMALL ] 26 | x: 1 27 | label: center/big 28 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/omap.yaml: -------------------------------------------------------------------------------- 1 | # Explicitly typed ordered map (dictionary). 2 | Bestiary: !!omap 3 | - aardvark: African pig-like ant eater. Ugly. 4 | - anteater: South-American ant eater. Two species. 5 | - anaconda: South-American constrictor snake. Scaly. 6 | # Etc. 7 | # Flow style 8 | Numbers: !!omap [ one: 1, two: 2, three : 3 ] 9 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/pairs.yaml: -------------------------------------------------------------------------------- 1 | # Explicitly typed pairs. 2 | Block tasks: !!pairs 3 | - meeting: with team. 4 | - meeting: with boss. 5 | - break: lunch. 6 | - meeting: with client. 7 | Flow tasks: !!pairs [ meeting: with team, meeting: with boss ] 8 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/seq.yaml: -------------------------------------------------------------------------------- 1 | # Ordered sequence of nodes 2 | Block style: !!seq 3 | - Mercury # Rotates - no light/dark sides. 4 | - Venus # Deadliest. Aptly named. 5 | - Earth # Mostly dirt. 6 | - Mars # Seems empty. 7 | - Jupiter # The king. 8 | - Saturn # Pretty. 9 | - Uranus # Where the sun hardly shines. 10 | - Neptune # Boring. No rings. 11 | - Pluto # You call this a planet? 12 | Flow style: !!seq [ Mercury, Venus, Earth, Mars, # Rocks 13 | Jupiter, Saturn, Uranus, Neptune, # Gas 14 | Pluto ] # Overrated 15 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/set.yaml: -------------------------------------------------------------------------------- 1 | # Explicitly typed set. 2 | baseball players: !!set 3 | ? Mark McGwire 4 | ? Sammy Sosa 5 | ? Ken Griffey 6 | # Flow style 7 | baseball teams: !!set { Boston Red Sox, Detroit Tigers, New York Yankees } 8 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/v.yaml: -------------------------------------------------------------------------------- 1 | --- # New schema 2 | link with: 3 | - = : library1.dll 4 | version: 1.2 5 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/value.yaml: -------------------------------------------------------------------------------- 1 | --- # Old schema 2 | link with: 3 | - library1.dll 4 | - library2.dll 5 | --- # New schema 6 | link with: 7 | - = : library1.dll 8 | version: 1.2 9 | - = : library2.dll 10 | version: 2.3 11 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2006 Kirill Simonov 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 7 | of the Software, and to permit persons to whom the Software is furnished to do 8 | so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go: -------------------------------------------------------------------------------- 1 | /* 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package candiedyaml 16 | 17 | import ( 18 | "bytes" 19 | "encoding/base64" 20 | "fmt" 21 | "math" 22 | "reflect" 23 | "regexp" 24 | "strconv" 25 | "strings" 26 | "time" 27 | ) 28 | 29 | var byteSliceType = reflect.TypeOf([]byte(nil)) 30 | 31 | var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)} 32 | var bool_values map[string]bool 33 | var null_values map[string]bool 34 | 35 | var signs = []byte{'-', '+'} 36 | var nulls = []byte{'~', 'n', 'N'} 37 | var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'} 38 | 39 | var timestamp_regexp *regexp.Regexp 40 | var ymd_regexp *regexp.Regexp 41 | 42 | func init() { 43 | bool_values = make(map[string]bool) 44 | bool_values["y"] = true 45 | bool_values["yes"] = true 46 | bool_values["n"] = false 47 | bool_values["no"] = false 48 | bool_values["true"] = true 49 | bool_values["false"] = false 50 | bool_values["on"] = true 51 | bool_values["off"] = false 52 | 53 | null_values = make(map[string]bool) 54 | null_values["~"] = true 55 | null_values["null"] = true 56 | null_values["Null"] = true 57 | null_values["NULL"] = true 58 | 59 | timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$") 60 | ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$") 61 | } 62 | 63 | func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) { 64 | val := string(event.value) 65 | 66 | if null_values[val] { 67 | v.Set(reflect.Zero(v.Type())) 68 | return yaml_NULL_TAG, nil 69 | } 70 | 71 | switch v.Kind() { 72 | case reflect.String: 73 | if useNumber && v.Type() == numberType { 74 | tag, i := resolveInterface(event, useNumber) 75 | if n, ok := i.(Number); ok { 76 | v.Set(reflect.ValueOf(n)) 77 | return tag, nil 78 | } 79 | return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark) 80 | } 81 | 82 | return resolve_string(val, v, event) 83 | case reflect.Bool: 84 | return resolve_bool(val, v, event) 85 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 86 | return resolve_int(val, v, useNumber, event) 87 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: 88 | return resolve_uint(val, v, useNumber, event) 89 | case reflect.Float32, reflect.Float64: 90 | return resolve_float(val, v, useNumber, event) 91 | case reflect.Interface: 92 | _, i := resolveInterface(event, useNumber) 93 | if i != nil { 94 | v.Set(reflect.ValueOf(i)) 95 | } else { 96 | v.Set(reflect.Zero(v.Type())) 97 | } 98 | 99 | case reflect.Struct: 100 | return resolve_time(val, v, event) 101 | case reflect.Slice: 102 | if v.Type() != byteSliceType { 103 | return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark) 104 | } 105 | b, err := decode_binary(event.value, event) 106 | if err != nil { 107 | return "", err 108 | } 109 | 110 | v.Set(reflect.ValueOf(b)) 111 | default: 112 | return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark) 113 | } 114 | 115 | return yaml_STR_TAG, nil 116 | } 117 | 118 | func hasBinaryTag(event yaml_event_t) bool { 119 | for _, tag := range binary_tags { 120 | if bytes.Equal(event.tag, tag) { 121 | return true 122 | } 123 | } 124 | return false 125 | } 126 | 127 | func decode_binary(value []byte, event yaml_event_t) ([]byte, error) { 128 | b := make([]byte, base64.StdEncoding.DecodedLen(len(value))) 129 | n, err := base64.StdEncoding.Decode(b, value) 130 | if err != nil { 131 | return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark) 132 | } 133 | return b[:n], nil 134 | } 135 | 136 | func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) { 137 | if len(event.tag) > 0 { 138 | if hasBinaryTag(event) { 139 | b, err := decode_binary(event.value, event) 140 | if err != nil { 141 | return "", err 142 | } 143 | val = string(b) 144 | } 145 | } 146 | v.SetString(val) 147 | return yaml_STR_TAG, nil 148 | } 149 | 150 | func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) { 151 | b, found := bool_values[strings.ToLower(val)] 152 | if !found { 153 | return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark) 154 | } 155 | 156 | v.SetBool(b) 157 | return yaml_BOOL_TAG, nil 158 | } 159 | 160 | func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { 161 | original := val 162 | val = strings.Replace(val, "_", "", -1) 163 | var value uint64 164 | 165 | isNumberValue := v.Type() == numberType 166 | 167 | sign := int64(1) 168 | if val[0] == '-' { 169 | sign = -1 170 | val = val[1:] 171 | } else if val[0] == '+' { 172 | val = val[1:] 173 | } 174 | 175 | base := 0 176 | if val == "0" { 177 | if isNumberValue { 178 | v.SetString("0") 179 | } else { 180 | v.Set(reflect.Zero(v.Type())) 181 | } 182 | 183 | return yaml_INT_TAG, nil 184 | } 185 | 186 | if strings.HasPrefix(val, "0o") { 187 | base = 8 188 | val = val[2:] 189 | } 190 | 191 | value, err := strconv.ParseUint(val, base, 64) 192 | if err != nil { 193 | return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) 194 | } 195 | 196 | var val64 int64 197 | if value <= math.MaxInt64 { 198 | val64 = int64(value) 199 | if sign == -1 { 200 | val64 = -val64 201 | } 202 | } else if sign == -1 && value == uint64(math.MaxInt64)+1 { 203 | val64 = math.MinInt64 204 | } else { 205 | return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) 206 | } 207 | 208 | if isNumberValue { 209 | v.SetString(strconv.FormatInt(val64, 10)) 210 | } else { 211 | if v.OverflowInt(val64) { 212 | return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) 213 | } 214 | v.SetInt(val64) 215 | } 216 | 217 | return yaml_INT_TAG, nil 218 | } 219 | 220 | func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { 221 | original := val 222 | val = strings.Replace(val, "_", "", -1) 223 | var value uint64 224 | 225 | isNumberValue := v.Type() == numberType 226 | 227 | if val[0] == '-' { 228 | return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark) 229 | } 230 | 231 | if val[0] == '+' { 232 | val = val[1:] 233 | } 234 | 235 | base := 0 236 | if val == "0" { 237 | if isNumberValue { 238 | v.SetString("0") 239 | } else { 240 | v.Set(reflect.Zero(v.Type())) 241 | } 242 | 243 | return yaml_INT_TAG, nil 244 | } 245 | 246 | if strings.HasPrefix(val, "0o") { 247 | base = 8 248 | val = val[2:] 249 | } 250 | 251 | value, err := strconv.ParseUint(val, base, 64) 252 | if err != nil { 253 | return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark) 254 | } 255 | 256 | if isNumberValue { 257 | v.SetString(strconv.FormatUint(value, 10)) 258 | } else { 259 | if v.OverflowUint(value) { 260 | return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark) 261 | } 262 | 263 | v.SetUint(value) 264 | } 265 | 266 | return yaml_INT_TAG, nil 267 | } 268 | 269 | func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { 270 | val = strings.Replace(val, "_", "", -1) 271 | var value float64 272 | 273 | isNumberValue := v.Type() == numberType 274 | typeBits := 64 275 | if !isNumberValue { 276 | typeBits = v.Type().Bits() 277 | } 278 | 279 | sign := 1 280 | if val[0] == '-' { 281 | sign = -1 282 | val = val[1:] 283 | } else if val[0] == '+' { 284 | val = val[1:] 285 | } 286 | 287 | valLower := strings.ToLower(val) 288 | if valLower == ".inf" { 289 | value = math.Inf(sign) 290 | } else if valLower == ".nan" { 291 | value = math.NaN() 292 | } else { 293 | var err error 294 | value, err = strconv.ParseFloat(val, typeBits) 295 | value *= float64(sign) 296 | 297 | if err != nil { 298 | return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark) 299 | } 300 | } 301 | 302 | if isNumberValue { 303 | v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits)) 304 | } else { 305 | if v.OverflowFloat(value) { 306 | return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark) 307 | } 308 | 309 | v.SetFloat(value) 310 | } 311 | 312 | return yaml_FLOAT_TAG, nil 313 | } 314 | 315 | func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) { 316 | var parsedTime time.Time 317 | matches := ymd_regexp.FindStringSubmatch(val) 318 | if len(matches) > 0 { 319 | year, _ := strconv.Atoi(matches[1]) 320 | month, _ := strconv.Atoi(matches[2]) 321 | day, _ := strconv.Atoi(matches[3]) 322 | parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) 323 | } else { 324 | matches = timestamp_regexp.FindStringSubmatch(val) 325 | if len(matches) == 0 { 326 | return "", fmt.Errorf("Invalid timestap: '%s' at %s", val, event.start_mark) 327 | } 328 | 329 | year, _ := strconv.Atoi(matches[1]) 330 | month, _ := strconv.Atoi(matches[2]) 331 | day, _ := strconv.Atoi(matches[3]) 332 | hour, _ := strconv.Atoi(matches[4]) 333 | min, _ := strconv.Atoi(matches[5]) 334 | sec, _ := strconv.Atoi(matches[6]) 335 | 336 | nsec := 0 337 | if matches[7] != "" { 338 | millis, _ := strconv.Atoi(matches[7]) 339 | nsec = int(time.Duration(millis) * time.Millisecond) 340 | } 341 | 342 | loc := time.UTC 343 | if matches[8] != "" { 344 | sign := matches[8][0] 345 | hr, _ := strconv.Atoi(matches[8][1:]) 346 | min := 0 347 | if matches[9] != "" { 348 | min, _ = strconv.Atoi(matches[9]) 349 | } 350 | 351 | zoneOffset := (hr*60 + min) * 60 352 | if sign == '-' { 353 | zoneOffset = -zoneOffset 354 | } 355 | 356 | loc = time.FixedZone("", zoneOffset) 357 | } 358 | parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc) 359 | } 360 | 361 | v.Set(reflect.ValueOf(parsedTime)) 362 | return "", nil 363 | } 364 | 365 | func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) { 366 | val := string(event.value) 367 | if len(event.tag) == 0 && !event.implicit { 368 | return "", val 369 | } 370 | 371 | if len(val) == 0 { 372 | return yaml_NULL_TAG, nil 373 | } 374 | 375 | var result interface{} 376 | 377 | sign := false 378 | c := val[0] 379 | switch { 380 | case bytes.IndexByte(signs, c) != -1: 381 | sign = true 382 | fallthrough 383 | case c >= '0' && c <= '9': 384 | i := int64(0) 385 | result = &i 386 | if useNumber { 387 | var n Number 388 | result = &n 389 | } 390 | 391 | v := reflect.ValueOf(result).Elem() 392 | if _, err := resolve_int(val, v, useNumber, event); err == nil { 393 | return yaml_INT_TAG, v.Interface() 394 | } 395 | 396 | f := float64(0) 397 | result = &f 398 | if useNumber { 399 | var n Number 400 | result = &n 401 | } 402 | 403 | v = reflect.ValueOf(result).Elem() 404 | if _, err := resolve_float(val, v, useNumber, event); err == nil { 405 | return yaml_FLOAT_TAG, v.Interface() 406 | } 407 | 408 | if !sign { 409 | t := time.Time{} 410 | if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil { 411 | return "", t 412 | } 413 | } 414 | case bytes.IndexByte(nulls, c) != -1: 415 | if null_values[val] { 416 | return yaml_NULL_TAG, nil 417 | } 418 | b := false 419 | if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil { 420 | return yaml_BOOL_TAG, b 421 | } 422 | case c == '.': 423 | f := float64(0) 424 | result = &f 425 | if useNumber { 426 | var n Number 427 | result = &n 428 | } 429 | 430 | v := reflect.ValueOf(result).Elem() 431 | if _, err := resolve_float(val, v, useNumber, event); err == nil { 432 | return yaml_FLOAT_TAG, v.Interface() 433 | } 434 | case bytes.IndexByte(bools, c) != -1: 435 | b := false 436 | if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil { 437 | return yaml_BOOL_TAG, b 438 | } 439 | } 440 | 441 | if hasBinaryTag(event) { 442 | bytes, err := decode_binary(event.value, event) 443 | if err == nil { 444 | return yaml_BINARY_TAG, bytes 445 | } 446 | } 447 | 448 | return yaml_STR_TAG, val 449 | } 450 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go: -------------------------------------------------------------------------------- 1 | /* 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package candiedyaml 16 | 17 | import ( 18 | "fmt" 19 | "os" 20 | ) 21 | 22 | func Run_parser(cmd string, args []string) { 23 | for i := 0; i < len(args); i++ { 24 | fmt.Printf("[%d] Scanning '%s'", i, args[i]) 25 | file, err := os.Open(args[i]) 26 | if err != nil { 27 | panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error())) 28 | } 29 | 30 | parser := yaml_parser_t{} 31 | yaml_parser_initialize(&parser) 32 | yaml_parser_set_input_reader(&parser, file) 33 | 34 | failed := false 35 | token := yaml_token_t{} 36 | count := 0 37 | for { 38 | if !yaml_parser_scan(&parser, &token) { 39 | failed = true 40 | break 41 | } 42 | 43 | if token.token_type == yaml_STREAM_END_TOKEN { 44 | break 45 | } 46 | count++ 47 | } 48 | 49 | file.Close() 50 | 51 | msg := "SUCCESS" 52 | if failed { 53 | msg = "FAILED" 54 | if parser.error != yaml_NO_ERROR { 55 | m := parser.problem_mark 56 | fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n", 57 | parser.context, parser.problem, m.line, m.column) 58 | } 59 | } 60 | fmt.Printf("%s (%d tokens)\n", msg, count) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go: -------------------------------------------------------------------------------- 1 | /* 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package candiedyaml 16 | 17 | import ( 18 | "reflect" 19 | "sort" 20 | "strings" 21 | "sync" 22 | "unicode" 23 | ) 24 | 25 | // A field represents a single field found in a struct. 26 | type field struct { 27 | name string 28 | tag bool 29 | index []int 30 | typ reflect.Type 31 | omitEmpty bool 32 | flow bool 33 | } 34 | 35 | // byName sorts field by name, breaking ties with depth, 36 | // then breaking ties with "name came from json tag", then 37 | // breaking ties with index sequence. 38 | type byName []field 39 | 40 | func (x byName) Len() int { return len(x) } 41 | 42 | func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 43 | 44 | func (x byName) Less(i, j int) bool { 45 | if x[i].name != x[j].name { 46 | return x[i].name < x[j].name 47 | } 48 | if len(x[i].index) != len(x[j].index) { 49 | return len(x[i].index) < len(x[j].index) 50 | } 51 | if x[i].tag != x[j].tag { 52 | return x[i].tag 53 | } 54 | return byIndex(x).Less(i, j) 55 | } 56 | 57 | // byIndex sorts field by index sequence. 58 | type byIndex []field 59 | 60 | func (x byIndex) Len() int { return len(x) } 61 | 62 | func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 63 | 64 | func (x byIndex) Less(i, j int) bool { 65 | for k, xik := range x[i].index { 66 | if k >= len(x[j].index) { 67 | return false 68 | } 69 | if xik != x[j].index[k] { 70 | return xik < x[j].index[k] 71 | } 72 | } 73 | return len(x[i].index) < len(x[j].index) 74 | } 75 | 76 | // typeFields returns a list of fields that JSON should recognize for the given type. 77 | // The algorithm is breadth-first search over the set of structs to include - the top struct 78 | // and then any reachable anonymous structs. 79 | func typeFields(t reflect.Type) []field { 80 | // Anonymous fields to explore at the current level and the next. 81 | current := []field{} 82 | next := []field{{typ: t}} 83 | 84 | // Count of queued names for current level and the next. 85 | count := map[reflect.Type]int{} 86 | nextCount := map[reflect.Type]int{} 87 | 88 | // Types already visited at an earlier level. 89 | visited := map[reflect.Type]bool{} 90 | 91 | // Fields found. 92 | var fields []field 93 | 94 | for len(next) > 0 { 95 | current, next = next, current[:0] 96 | count, nextCount = nextCount, map[reflect.Type]int{} 97 | 98 | for _, f := range current { 99 | if visited[f.typ] { 100 | continue 101 | } 102 | visited[f.typ] = true 103 | 104 | // Scan f.typ for fields to include. 105 | for i := 0; i < f.typ.NumField(); i++ { 106 | sf := f.typ.Field(i) 107 | if sf.PkgPath != "" { // unexported 108 | continue 109 | } 110 | tag := sf.Tag.Get("yaml") 111 | if tag == "-" { 112 | continue 113 | } 114 | name, opts := parseTag(tag) 115 | if !isValidTag(name) { 116 | name = "" 117 | } 118 | index := make([]int, len(f.index)+1) 119 | copy(index, f.index) 120 | index[len(f.index)] = i 121 | 122 | ft := sf.Type 123 | if ft.Name() == "" && ft.Kind() == reflect.Ptr { 124 | // Follow pointer. 125 | ft = ft.Elem() 126 | } 127 | 128 | // Record found field and index sequence. 129 | if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { 130 | tagged := name != "" 131 | if name == "" { 132 | name = sf.Name 133 | } 134 | fields = append(fields, field{name, tagged, index, ft, 135 | opts.Contains("omitempty"), opts.Contains("flow")}) 136 | if count[f.typ] > 1 { 137 | // If there were multiple instances, add a second, 138 | // so that the annihilation code will see a duplicate. 139 | // It only cares about the distinction between 1 or 2, 140 | // so don't bother generating any more copies. 141 | fields = append(fields, fields[len(fields)-1]) 142 | } 143 | continue 144 | } 145 | 146 | // Record new anonymous struct to explore in next round. 147 | nextCount[ft]++ 148 | if nextCount[ft] == 1 { 149 | next = append(next, field{name: ft.Name(), index: index, typ: ft}) 150 | } 151 | } 152 | } 153 | } 154 | 155 | sort.Sort(byName(fields)) 156 | 157 | // Delete all fields that are hidden by the Go rules for embedded fields, 158 | // except that fields with JSON tags are promoted. 159 | 160 | // The fields are sorted in primary order of name, secondary order 161 | // of field index length. Loop over names; for each name, delete 162 | // hidden fields by choosing the one dominant field that survives. 163 | out := fields[:0] 164 | for advance, i := 0, 0; i < len(fields); i += advance { 165 | // One iteration per name. 166 | // Find the sequence of fields with the name of this first field. 167 | fi := fields[i] 168 | name := fi.name 169 | for advance = 1; i+advance < len(fields); advance++ { 170 | fj := fields[i+advance] 171 | if fj.name != name { 172 | break 173 | } 174 | } 175 | if advance == 1 { // Only one field with this name 176 | out = append(out, fi) 177 | continue 178 | } 179 | dominant, ok := dominantField(fields[i : i+advance]) 180 | if ok { 181 | out = append(out, dominant) 182 | } 183 | } 184 | 185 | fields = out 186 | sort.Sort(byIndex(fields)) 187 | 188 | return fields 189 | } 190 | 191 | // dominantField looks through the fields, all of which are known to 192 | // have the same name, to find the single field that dominates the 193 | // others using Go's embedding rules, modified by the presence of 194 | // JSON tags. If there are multiple top-level fields, the boolean 195 | // will be false: This condition is an error in Go and we skip all 196 | // the fields. 197 | func dominantField(fields []field) (field, bool) { 198 | // The fields are sorted in increasing index-length order. The winner 199 | // must therefore be one with the shortest index length. Drop all 200 | // longer entries, which is easy: just truncate the slice. 201 | length := len(fields[0].index) 202 | tagged := -1 // Index of first tagged field. 203 | for i, f := range fields { 204 | if len(f.index) > length { 205 | fields = fields[:i] 206 | break 207 | } 208 | if f.tag { 209 | if tagged >= 0 { 210 | // Multiple tagged fields at the same level: conflict. 211 | // Return no field. 212 | return field{}, false 213 | } 214 | tagged = i 215 | } 216 | } 217 | if tagged >= 0 { 218 | return fields[tagged], true 219 | } 220 | // All remaining fields have the same length. If there's more than one, 221 | // we have a conflict (two fields named "X" at the same level) and we 222 | // return no field. 223 | if len(fields) > 1 { 224 | return field{}, false 225 | } 226 | return fields[0], true 227 | } 228 | 229 | var fieldCache struct { 230 | sync.RWMutex 231 | m map[reflect.Type][]field 232 | } 233 | 234 | // cachedTypeFields is like typeFields but uses a cache to avoid repeated work. 235 | func cachedTypeFields(t reflect.Type) []field { 236 | fieldCache.RLock() 237 | f := fieldCache.m[t] 238 | fieldCache.RUnlock() 239 | if f != nil { 240 | return f 241 | } 242 | 243 | // Compute fields without lock. 244 | // Might duplicate effort but won't hold other computations back. 245 | f = typeFields(t) 246 | if f == nil { 247 | f = []field{} 248 | } 249 | 250 | fieldCache.Lock() 251 | if fieldCache.m == nil { 252 | fieldCache.m = map[reflect.Type][]field{} 253 | } 254 | fieldCache.m[t] = f 255 | fieldCache.Unlock() 256 | return f 257 | } 258 | 259 | // tagOptions is the string following a comma in a struct field's "json" 260 | // tag, or the empty string. It does not include the leading comma. 261 | type tagOptions string 262 | 263 | func isValidTag(s string) bool { 264 | if s == "" { 265 | return false 266 | } 267 | for _, c := range s { 268 | switch { 269 | case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): 270 | // Backslash and quote chars are reserved, but 271 | // otherwise any punctuation chars are allowed 272 | // in a tag name. 273 | default: 274 | if !unicode.IsLetter(c) && !unicode.IsDigit(c) { 275 | return false 276 | } 277 | } 278 | } 279 | return true 280 | } 281 | 282 | func fieldByIndex(v reflect.Value, index []int) reflect.Value { 283 | for _, i := range index { 284 | if v.Kind() == reflect.Ptr { 285 | if v.IsNil() { 286 | return reflect.Value{} 287 | } 288 | v = v.Elem() 289 | } 290 | v = v.Field(i) 291 | } 292 | return v 293 | } 294 | 295 | func typeByIndex(t reflect.Type, index []int) reflect.Type { 296 | for _, i := range index { 297 | if t.Kind() == reflect.Ptr { 298 | t = t.Elem() 299 | } 300 | t = t.Field(i).Type 301 | } 302 | return t 303 | } 304 | 305 | // stringValues is a slice of reflect.Value holding *reflect.StringValue. 306 | // It implements the methods to sort by string. 307 | type stringValues []reflect.Value 308 | 309 | func (sv stringValues) Len() int { return len(sv) } 310 | func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } 311 | func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } 312 | func (sv stringValues) get(i int) string { return sv[i].String() } 313 | 314 | // parseTag splits a struct field's json tag into its name and 315 | // comma-separated options. 316 | func parseTag(tag string) (string, tagOptions) { 317 | if idx := strings.Index(tag, ","); idx != -1 { 318 | return tag[:idx], tagOptions(tag[idx+1:]) 319 | } 320 | return tag, tagOptions("") 321 | } 322 | 323 | // Contains reports whether a comma-separated list of options 324 | // contains a particular substr flag. substr must be surrounded by a 325 | // string boundary or commas. 326 | func (o tagOptions) Contains(optionName string) bool { 327 | if len(o) == 0 { 328 | return false 329 | } 330 | s := string(o) 331 | for s != "" { 332 | var next string 333 | i := strings.Index(s, ",") 334 | if i >= 0 { 335 | s, next = s[:i], s[i+1:] 336 | } 337 | if s == optionName { 338 | return true 339 | } 340 | s = next 341 | } 342 | return false 343 | } 344 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go: -------------------------------------------------------------------------------- 1 | /* 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package candiedyaml 16 | 17 | /* 18 | * Set the writer error and return 0. 19 | */ 20 | 21 | func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { 22 | emitter.error = yaml_WRITER_ERROR 23 | emitter.problem = problem 24 | 25 | return false 26 | } 27 | 28 | /* 29 | * Flush the output buffer. 30 | */ 31 | 32 | func yaml_emitter_flush(emitter *yaml_emitter_t) bool { 33 | if emitter.write_handler == nil { 34 | panic("Write handler must be set") /* Write handler must be set. */ 35 | } 36 | if emitter.encoding == yaml_ANY_ENCODING { 37 | panic("Encoding must be set") /* Output encoding must be set. */ 38 | } 39 | 40 | /* Check if the buffer is empty. */ 41 | 42 | if emitter.buffer_pos == 0 { 43 | return true 44 | } 45 | 46 | /* If the output encoding is UTF-8, we don't need to recode the buffer. */ 47 | 48 | if emitter.encoding == yaml_UTF8_ENCODING { 49 | if err := emitter.write_handler(emitter, 50 | emitter.buffer[:emitter.buffer_pos]); err != nil { 51 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) 52 | } 53 | emitter.buffer_pos = 0 54 | return true 55 | } 56 | 57 | /* Recode the buffer into the raw buffer. */ 58 | 59 | var low, high int 60 | if emitter.encoding == yaml_UTF16LE_ENCODING { 61 | low, high = 0, 1 62 | } else { 63 | high, low = 1, 0 64 | } 65 | 66 | pos := 0 67 | for pos < emitter.buffer_pos { 68 | 69 | /* 70 | * See the "reader.c" code for more details on UTF-8 encoding. Note 71 | * that we assume that the buffer contains a valid UTF-8 sequence. 72 | */ 73 | 74 | /* Read the next UTF-8 character. */ 75 | 76 | octet := emitter.buffer[pos] 77 | 78 | var w int 79 | var value rune 80 | switch { 81 | case octet&0x80 == 0x00: 82 | w, value = 1, rune(octet&0x7F) 83 | case octet&0xE0 == 0xC0: 84 | w, value = 2, rune(octet&0x1F) 85 | case octet&0xF0 == 0xE0: 86 | w, value = 3, rune(octet&0x0F) 87 | case octet&0xF8 == 0xF0: 88 | w, value = 4, rune(octet&0x07) 89 | } 90 | 91 | for k := 1; k < w; k++ { 92 | octet = emitter.buffer[pos+k] 93 | value = (value << 6) + (rune(octet) & 0x3F) 94 | } 95 | 96 | pos += w 97 | 98 | /* Write the character. */ 99 | 100 | if value < 0x10000 { 101 | var b [2]byte 102 | b[high] = byte(value >> 8) 103 | b[low] = byte(value & 0xFF) 104 | emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) 105 | } else { 106 | /* Write the character using a surrogate pair (check "reader.c"). */ 107 | 108 | var b [4]byte 109 | value -= 0x10000 110 | b[high] = byte(0xD8 + (value >> 18)) 111 | b[low] = byte((value >> 10) & 0xFF) 112 | b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) 113 | b[low+2] = byte(value & 0xFF) 114 | emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) 115 | } 116 | } 117 | 118 | /* Write the raw buffer. */ 119 | 120 | // Write the raw buffer. 121 | if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { 122 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) 123 | } 124 | 125 | emitter.buffer_pos = 0 126 | emitter.raw_buffer = emitter.raw_buffer[:0] 127 | return true 128 | } 129 | -------------------------------------------------------------------------------- /vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go: -------------------------------------------------------------------------------- 1 | /* 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package candiedyaml 16 | 17 | const ( 18 | yaml_VERSION_MAJOR = 0 19 | yaml_VERSION_MINOR = 1 20 | yaml_VERSION_PATCH = 6 21 | yaml_VERSION_STRING = "0.1.6" 22 | ) 23 | --------------------------------------------------------------------------------