├── .babelrc ├── .editorconfig ├── .eslintrc ├── .gitignore ├── .nvmrc ├── .ruby-version ├── .travis.yml ├── AUTHORS ├── Buildfile ├── Gemfile ├── Gemfile.lock ├── LICENSE ├── README.md ├── Rakefile ├── cookbook ├── .kitchen.yml ├── .rubocop.yml ├── README.md ├── attributes │ └── default.rb ├── files │ └── default │ │ └── propsd_ohai_plugin.rb ├── libraries │ └── helpers.rb ├── metadata.rb ├── recipes │ ├── default.rb │ ├── nodejs.rb │ ├── ohai_plugin.rb │ └── test.rb └── templates │ └── default │ ├── json.erb │ ├── systemd.service.erb │ └── upstart.conf.erb ├── cortex.yaml ├── docs ├── class-storage.md ├── class-stringtemplate.md ├── getting-started │ ├── README.md │ ├── configuration.md │ ├── installation.md │ └── usage.md ├── http-api.md ├── schemas.md └── sources │ ├── s3 │ ├── class-source-s3-agent.md │ ├── class-source-s3-store.md │ └── class-source-s3.md │ ├── source-interface.md │ ├── source-metadata.md │ └── source-s3.md ├── jsconfig.json ├── package.json ├── src ├── bin │ └── server.js ├── config │ ├── defaults.json │ └── dev.json └── lib │ ├── control │ └── v1 │ │ ├── conqueso.js │ │ ├── core.js │ │ └── properties.js │ ├── logger.js │ ├── properties.js │ ├── properties │ ├── layer.js │ └── view.js │ ├── source │ ├── common.js │ ├── consul.js │ ├── consul │ │ └── parser.js │ ├── metadata.js │ ├── metadata │ │ ├── parser.js │ │ └── util.js │ ├── s3.js │ ├── tags.js │ └── tags │ │ └── parser.js │ ├── sources.js │ ├── sources │ ├── comparator.js │ └── iindex.js │ ├── string-template.js │ ├── transformers │ ├── tokend-client.js │ └── tokend.js │ └── util │ ├── index.js │ ├── metadata-client.js │ └── status-codes.js ├── test ├── .eslintrc ├── bin │ ├── .eslintrc │ ├── conqueso-diff │ ├── get-metadata │ ├── metadata-server │ ├── munge-consul │ ├── parse-matadata │ ├── s3-server.js │ └── version.js ├── conqueso-api-v1.js ├── consul.js ├── core-api-v1.js ├── data │ ├── config.json │ ├── consul-catalog-services.json │ ├── consul-checks.json │ ├── consul-health-service.json │ ├── consul-nodes.json │ ├── metadata-paths.json │ ├── metadata-values.json │ └── s3 │ │ ├── account │ │ └── 12345.json │ │ ├── ami-bcbffad6.json │ │ ├── global.json │ │ ├── index.json │ │ └── role │ │ └── fake-fake.json ├── lib │ ├── helpers.js │ └── stub │ │ ├── consul.js │ │ └── source.js ├── logger.js ├── metadata.js ├── properties.js ├── s3.js ├── source-common.js ├── sources.js ├── string-template.js ├── tags.js ├── tokend-client.js ├── tokend-transformer.js ├── util.js └── utils │ └── s3-stub.js └── yarn.lock /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": [ 3 | ["env", { 4 | "targets": { 5 | "node": "4" 6 | } 7 | }], 8 | "stage-2" 9 | ], 10 | "plugins": [ 11 | ["transform-runtime", { 12 | "polyfill": false 13 | }], 14 | "transform-decorators-legacy", 15 | "add-module-exports" 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: http://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # Unix-style newlines with a newline ending every file 7 | [*] 8 | end_of_line = lf 9 | insert_final_newline = true 10 | indent_style = space 11 | indent_size = 2 12 | tab_width = 2 13 | continuation_indent_size = 4 14 | charset = utf-8 15 | trim_trailing_whitespace = true 16 | insert_final_newline = true 17 | max_line_length = 120 18 | 19 | 20 | # Matches multiple files with brace expansion notation 21 | # Set default charset 22 | [*.js] 23 | quote_type = single 24 | curly_bracket_next_line = false 25 | spaces_around_operators = true 26 | spaces_around_brackets = none 27 | indent_brace_style = BSD KNF 28 | 29 | 30 | [*.html] 31 | quote_type = double 32 | -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "rapid7/node", 3 | "globals": { 4 | "Config": true, 5 | "Log": true 6 | }, 7 | "rules": { 8 | "strict": [0], 9 | "arrow-body-style": [2, "as-needed"] 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | 6 | # Runtime data 7 | pids 8 | *.pid 9 | *.seed 10 | 11 | # Directory for instrumented libs generated by jscoverage/JSCover 12 | lib-cov 13 | 14 | # Directory for generated packages 15 | pkg 16 | vendor 17 | 18 | # Coverage directory used by tools like istanbul 19 | coverage 20 | 21 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 22 | .grunt 23 | 24 | # node-waf configuration 25 | .lock-wscript 26 | 27 | # Compiled binary addons (http://nodejs.org/api/addons.html) 28 | build/Release 29 | 30 | # Dependency directory 31 | # https://docs.npmjs.com/misc/faq#should-i-check-my-node-modules-folder-into-git 32 | node_modules 33 | 34 | # Optional npm cache directory 35 | .npm 36 | 37 | # Optional REPL history 38 | .node_repl_history 39 | .bundle/ 40 | .builderator/ 41 | .idea/ 42 | 43 | cookbook/metadata.json 44 | 45 | # Build artifacts 46 | Berksfile 47 | Berksfile.lock 48 | *.tar.gz 49 | *.tgz 50 | .ruby-gemset 51 | npm-shrinkwrap.json 52 | cookbook/.kitchen/ 53 | cookbook/.kitchen.local.yml 54 | dist/ 55 | src/version.json 56 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | lts/carbon 2 | -------------------------------------------------------------------------------- /.ruby-version: -------------------------------------------------------------------------------- 1 | 2.6.3 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "8" 4 | sudo: false 5 | script: 6 | - npm run version 7 | - npm run transpile 8 | - npm run lint 9 | - npm run test 10 | after_success: 11 | npm run report-coverage 12 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | David Greene 2 | Frank Mitchell 3 | John Manero 4 | Richard Van Buren 5 | -------------------------------------------------------------------------------- /Buildfile: -------------------------------------------------------------------------------- 1 | build_name 'propsd' 2 | 3 | autoversion.create_tags false 4 | autoversion.search_tags false 5 | 6 | cookbook.depends 'propsd' do |propsd| 7 | propsd.path './cookbook' 8 | end 9 | 10 | profile :default do |default| 11 | default.chef.run_list ['propsd::nodejs', 'propsd::default'] 12 | end 13 | 14 | profile :test do |test| 15 | test.chef.run_list 'propsd::test' 16 | end 17 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gem 'rake', '~> 10.5' 4 | gem 'aws-sdk', '~> 2.2' 5 | gem 'fpm', '~> 1.6' 6 | gem 'octokit', '~> 4.0' 7 | gem 'mime-types', '~> 3.1' 8 | 9 | group :cookbook do 10 | gem 'builderator', '~> 1.0' 11 | gem 'test-kitchen' 12 | gem 'kitchen-vagrant' 13 | gem 'berkshelf', '~> 4.3' 14 | end 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 David Greene, Frank Mitchell, John Manero, Richard Van Buren, Rapid7 LLC. 2 | 3 | MIT License 4 | =========== 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining 7 | a copy of this software and associated documentation files (the 8 | "Software"), to deal in the Software without restriction, including 9 | without limitation the rights to use, copy, modify, merge, publish, 10 | distribute, sublicense, and/or sell copies of the Software, and to 11 | permit persons to whom the Software is furnished to do so, subject to 12 | the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be 15 | included in all copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 20 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 21 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deprecated 2 | This repository is now deprecated. This project will not receive new changes from Rapid7. 3 | 4 | # Propsd 5 | [![Build Status][travis-image]][travis-url] [![Coverage Status][coveralls-image]][coveralls-url] 6 | 7 | Propsd does dynamic property management at scale, across thousands of servers 8 | and changes from hundreds of developers. 9 | 10 | We built Propsd with lessons learned from years of running [Conqueso][] on 11 | large scale systems. High availability is achieved by leveraging [Amazon S3][] 12 | to deliver properties and [Consul][] to handle service discovery. Composable 13 | layering lets you set properties for an organization, a single server, and 14 | everything in between. Plus, flat file storage makes backups and audits 15 | a breeze. 16 | 17 | So if your [Conqueso][] server's starting to heat up or you just want an audit 18 | trail when things change, give Propsd a try. 19 | 20 | ## Features 21 | Propsd allows the user to supply a [index](https://github.com/rapid7/propsd/blob/master/docs/getting-started/usage.md#index-files) file which defines the layering of configuration sources. Propsd expects these configuration sources to be in json format. 22 | 23 | Propsd will serve up this layering of configuration (last in, first out) in combination of other services (including a given [Consul][] catalog) via HTTP to any requesting services. 24 | 25 | For example, if you have a configuration layering schema like this: 26 | 27 | ~~~text 28 | global 29 | |_global.json (foo = global) 30 | |_regional 31 | |_region-1 32 | |_region.json (foo = region-1) 33 | |_service 34 | |_service_name.json (foo = service_specific) 35 | ~~~ 36 | Propsd would return flattened configuration where the `service_name.json` value would win (netting a `foo` value of `service_specific`). 37 | 38 | Propsd can also consume from various source locations. This ranges from a Consul catalog, to local files, to remote S3 buckets. This feature helps the user package once and let the configuration source location contain the differences between environments. Said another way - with Propsd, you won't find yourself repackaging your software in order to move it through your environments. 39 | 40 | Propsd will regularly inspect and reload these configuration settings, include new layers, etc. - no restarts of Propsd required. 41 | 42 | ## Usage 43 | 44 | See the [getting started guide][gsg] for help installing, configuring, and 45 | using Propsd. 46 | 47 | ### Development 48 | 49 | To run Propsd locally without dependencies on the AWS Instance Metadata Service or AWS S3, you can run local test versions of both. Currently these services are only tested on OS X. 50 | 51 | The services listed below are wrapped up in an npm task which allows you to get up and running quickly. Simply clone the repository, run `npm install` and run `npm run dev-server`. That's it! 52 | 53 | #### Metadata Service 54 | 55 | To run the test metadata server: 56 | ~~~bash 57 | npm run metadata-server 58 | ~~~ 59 | 60 | The server will listen on `http://127.0.0.1:8080`. It can be used in the same way that you would use the AWS Instance Metadata Service (e.g. `curl http://127.0.0.1:8080/latest/meta-data/ami-id`). To specify the metadata service host and port that Propsd will use modify the config file: 61 | ~~~json 62 | { 63 | ... 64 | "metadata": { 65 | "host": "127.0.0.1:8080" 66 | } 67 | ... 68 | } 69 | ~~~ 70 | 71 | #### S3 Server 72 | To run the test S3 server: 73 | ~~~bash 74 | npm run s3-server -- -d 75 | ~~~ 76 | The `-d` or `--data` argument is required. If you need to specify different server options you can view the S3 server help by running `npm run s3-server -- --help`. 77 | 78 | Sample data ships with the Propsd project for the test suite to use but the same data can be served from the test server as well. Run the following command to start with test data: 79 | ~~~bash 80 | npm run s3-server -- -d test/data/s3 81 | ~~~ 82 | This will start the test server and serve S3 objects from the `test/data/s3` directory. Because of some peculiarities in the library being used for the server, you cannot live-edit the test data. Instead, you must shut down the server, edit the data, then restart it. 83 | 84 | The bucket the objects are served from is `propsd-` and the endpoint is the server IP address. So, for example, if you use the provided test data, the relevant section of the config file would be: 85 | ~~~json 86 | { 87 | ... 88 | "index": { 89 | "endpoint": "http://127.0.0.1:4569", 90 | "path": "index.json", 91 | "interval": 30000 92 | } 93 | ... 94 | } 95 | ~~~ 96 | 97 | If an `endpoint` key is provided, the S3 source and it's underlying client will assume that you are serving data from another location and will ignore the `region` key, if provided. 98 | 99 | ## Releasing 100 | To cut a release do the following: 101 | * [Bump the version][npm-version] 102 | * Build and upload a package 103 | * Create a new release on github.com 104 | 105 | This can be accomplished by running the following commands: 106 | ~~~bash 107 | $ npm version minor 108 | $ bundle exec rake default 109 | ~~~ 110 | To be able to create a new release on [github.com], you must have the following environment variables set: 111 | * `GITHUB_CLIENT_ID` 112 | * `GITHUB_CLIENT_TOKEN` 113 | 114 | and the user and token must have the appropriate permissions in this repository. 115 | 116 | [Node.js]: https://nodejs.org/en/ 117 | [http-api]: docs/http-api.md 118 | [travis-image]: https://travis-ci.org/rapid7/propsd.svg?branch=master 119 | [travis-url]: https://travis-ci.org/rapid7/propsd 120 | [coveralls-image]: https://coveralls.io/repos/rapid7/propsd/badge.svg?branch=master&service=github 121 | [coveralls-url]: https://coveralls.io/github/rapid7/propsd?branch=master 122 | [npm-version]: https://docs.npmjs.com/cli/version 123 | [Conqueso]: https://github.com/rapid7/conqueso 124 | [Consul]: https://www.consul.io/ 125 | [Amazon S3]: https://aws.amazon.com/s3/ 126 | [gsg]: ./docs/getting-started/ 127 | [github.com]: https://github.com 128 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'json' 2 | require 'fileutils' 3 | require 'mkmf' 4 | require 'aws-sdk' 5 | require 'logger' 6 | require 'rake/clean' 7 | require 'octokit' 8 | 9 | include FileUtils 10 | 11 | CLIENT_ID = ENV['GITHUB_CLIENT_ID'] 12 | CLIENT_TOKEN = ENV['GITHUB_CLIENT_TOKEN'] 13 | ARTIFACT_BUCKET = ENV['ARTIFACT_BUCKET'] 14 | 15 | def yarn_exists? 16 | @yarn_exists ||= system('which yarn > /dev/null') 17 | end 18 | 19 | def package_json 20 | @package_json ||= JSON.parse(File.read('package.json')) 21 | end 22 | 23 | def version 24 | package_json['version'] 25 | end 26 | 27 | def name 28 | package_json['name'] 29 | end 30 | 31 | def description 32 | package_json['description'] 33 | end 34 | 35 | def license 36 | package_json['license'] 37 | end 38 | 39 | def homepage 40 | package_json['homepage'] 41 | end 42 | 43 | def repo 44 | package_json['repository']['url'].sub('.git', '') 45 | end 46 | 47 | def target_version 48 | `node --version`.strip.delete('v') 49 | end 50 | 51 | def max_version 52 | target_version.split('.').first.to_f + 1 53 | end 54 | 55 | def install_dir 56 | ::File.join('pkg', 'opt', "#{name}-#{version}") 57 | end 58 | 59 | def config_dir 60 | ::File.join(install_dir, 'config') 61 | end 62 | 63 | def pkg_dir 64 | ::File.join(base_dir, 'pkg') 65 | end 66 | 67 | def base_dir 68 | @base_dir ||= File.dirname(File.expand_path(__FILE__)) 69 | end 70 | 71 | def github_client 72 | @client unless @client.nil? 73 | @client = Octokit::Client.new(:client_id => CLIENT_ID, :access_token => CLIENT_TOKEN) 74 | end 75 | 76 | def github_repo 77 | @repo unless @repo.nil? 78 | @repo = Octokit::Repository.from_url(repo) 79 | end 80 | 81 | def install_packages(production) 82 | prod = production ? '--production' : '' 83 | bin = yarn_exists? ? 'yarn' : 'npm' 84 | 85 | sh "#{bin} install #{prod}".strip 86 | sh "#{bin} check #{prod}".strip if yarn_exists? && production 87 | 88 | # This is required because the conditional package bundles a devDependency 89 | # that bundles conditional and causes shrinkwrap to complain 90 | sh "#{bin} prune #{prod}".strip unless yarn_exists? 91 | end 92 | 93 | task :install do 94 | install_packages(true) 95 | end 96 | 97 | task :install_dev do 98 | install_packages(false) 99 | end 100 | 101 | task :transpile do 102 | if yarn_exists? 103 | sh 'yarn run transpile' 104 | else 105 | sh 'npm run transpile' 106 | end 107 | end 108 | 109 | task :shrinkwrap => [:install_dev, :transpile, :install] do 110 | sh 'npm shrinkwrap' unless yarn_exists? 111 | end 112 | 113 | task :pack => [:shrinkwrap] do 114 | sh 'npm pack' 115 | end 116 | 117 | task :package_dirs do 118 | mkdir_p ::File.join(base_dir, install_dir) 119 | mkdir_p ::File.join(base_dir, config_dir) 120 | end 121 | 122 | task :source => [:install_dev, :transpile, :install] do 123 | ['dist/bin/', 'dist/lib/', 'node_modules/', 'LICENSE', 'dist/version.json'].each do |src| 124 | cp_r ::File.join(base_dir, src), ::File.join(base_dir, install_dir) 125 | end 126 | cp ::File.join(base_dir, 'dist', 'config', 'defaults.json'), ::File.join(base_dir, config_dir) 127 | end 128 | 129 | task :chdir_pkg => [:package_dirs] do 130 | cd pkg_dir 131 | end 132 | 133 | task :deb => [:chdir_pkg, :source] do 134 | command = [ 135 | 'bundle', 136 | 'exec', 137 | 'fpm', 138 | '--deb-no-default-config-files', 139 | "--deb-recommends \"nodejs >= #{target_version}\"", 140 | "--deb-recommends \"nodejs << #{max_version}\"", 141 | "--license \"#{license}\"", 142 | "--url \"#{homepage}\"", 143 | '--vendor Rapid7', 144 | '--maintainer Rapid7', 145 | "--description \"#{description}\"", 146 | '-s dir', 147 | '-t deb', 148 | "-n \"#{name}\"", 149 | "-v #{version}", 150 | 'opt/' 151 | ].join(' ') 152 | sh command 153 | end 154 | 155 | task :upload_packages do 156 | cd pkg_dir 157 | mkdir 'copy_to_s3' 158 | deb = Dir["#{name}_#{version}_*.deb"].first 159 | cp deb, 'copy_to_s3/' 160 | s3 = Aws::S3::Resource.new(region: 'us-east-1', logger: Logger.new(STDOUT)) 161 | Dir["copy_to_s3/**/#{name}*"].each do |package| 162 | upload_package = ::File.basename(package) 163 | s3.bucket(ARTIFACT_BUCKET) 164 | .object("#{name}/#{upload_package}") 165 | .upload_file(package, :server_side_encryption => :AES256) 166 | end 167 | end 168 | 169 | desc "Release #{name} and prepare to create a release on github.com" 170 | task :release do 171 | puts 172 | puts "Create a new #{version} release on github.com and upload the #{name} tarball" 173 | puts 'You can find directions here: https://github.com/blog/1547-release-your-software' 174 | puts 'Make sure you add release notes!' 175 | 176 | cp ::File.join(base_dir, "#{name}-#{version}.tgz"), pkg_dir 177 | 178 | begin 179 | latest_release = github_client.latest_release(github_repo) 180 | rescue Octokit::NotFound 181 | latest_release = OpenStruct.new(name: 'master') 182 | end 183 | 184 | release = github_client.create_release( 185 | github_repo, 186 | "v#{version}", 187 | :name => "v#{version}", :draft => true 188 | ) 189 | 190 | [ 191 | ::File.join(pkg_dir, "#{name}-#{version}.tgz"), 192 | ::File.join(pkg_dir, "#{name}_#{version}_amd64.deb") 193 | ].each do |f| 194 | github_client.upload_asset(release.url, f) 195 | end 196 | puts "Draft release created at #{release.html_url}. Make sure you add release notes!" 197 | compare_url = "#{github_repo.url}/compare/#{latest_release.name}...#{release.name}" 198 | puts "You can find a diff between this release and the previous one here: #{compare_url}" 199 | end 200 | 201 | desc "Package #{name}" 202 | task :package => [:install_dev, :transpile, :install, :shrinkwrap, :pack, :deb] 203 | 204 | CLEAN.include 'npm-shrinkwrap.json' 205 | CLEAN.include "#{name}-*.tgz" 206 | CLEAN.include 'pkg/' 207 | CLEAN.include '**/.DS_Store' 208 | CLEAN.include 'node_modules/' 209 | CLEAN.include 'dist/' 210 | 211 | task :default => [:clean, :package, :release] 212 | task :upload => [:clean, :package, :upload_packages] 213 | -------------------------------------------------------------------------------- /cookbook/.kitchen.yml: -------------------------------------------------------------------------------- 1 | --- 2 | driver: 3 | name: vagrant 4 | 5 | provisioner: 6 | name: chef_solo 7 | 8 | platforms: 9 | - name: ubuntu-14.04 10 | - name: ubuntu-16.04 11 | 12 | suites: 13 | - name: default 14 | run_list: 15 | - recipe[propsd::default] 16 | attributes: 17 | propsd: 18 | config: 19 | index: 20 | bucket: 'some.bucket.name.that.does.not.exist.com' 21 | -------------------------------------------------------------------------------- /cookbook/.rubocop.yml: -------------------------------------------------------------------------------- 1 | Metrics/AbcSize: 2 | Max: 48 3 | Metrics/CyclomaticComplexity: 4 | Max: 24 5 | Metrics/MethodLength: 6 | Max: 48 7 | Metrics/PerceivedComplexity: 8 | Max: 12 9 | 10 | Encoding: 11 | Enabled: false 12 | LineLength: 13 | Enabled: false 14 | HashSyntax: 15 | Enabled: false 16 | FileName: 17 | Enabled: false 18 | RescueModifier: 19 | Enabled: false 20 | SpaceInsideStringInterpolation: 21 | Enabled: false 22 | -------------------------------------------------------------------------------- /cookbook/README.md: -------------------------------------------------------------------------------- 1 | # Propsd Cookbook 2 | 3 | ## Recipies 4 | 5 | ### default.rb 6 | 7 | Install and configure Propsd. You will need to overide the `propsd-configuration` and the `props-service` to do anything useful; see the `test.rb` recipe for an example. 8 | 9 | ### ohai_plugin.rb 10 | 11 | Install Propsd Ohai plugin 12 | 13 | ## Attributes 14 | 15 | * `node['propsd']['user']` - User that owns the Propsd installation (default: `propsd`) 16 | * `node['propsd']['group']` - Group that owns the Propsd installation (default: `propsd`) 17 | * `node['propsd']['paths']['directory']` - The location of the Propsd installation (default: `/opt/propsd`) 18 | * `node['propsd']['paths']['configuration']` - The location of the Propsd configuration file (default: `/etc/propsd/config.json`) 19 | * `node['propsd']['ohai_plugin_path']` - Ohai Plugin Path; directory the Propsd plugin will get installed 20 | 21 | ## Usage 22 | 23 | Simply add `recipe[propsd::default]` to a run list. 24 | 25 | Additionally, to use the ohai plugin add `recipe[propsd::ohai_plugin]` to a run list. 26 | -------------------------------------------------------------------------------- /cookbook/attributes/default.rb: -------------------------------------------------------------------------------- 1 | 2 | default['propsd']['user'] = 'propsd' 3 | default['propsd']['group'] = 'propsd' 4 | 5 | default['propsd']['paths']['directory'] = '/opt/propsd' 6 | default['propsd']['paths']['executable'] = ::File.join(node['propsd']['paths']['directory'], 'bin/server.js') 7 | default['propsd']['paths']['configuration'] = '/etc/propsd/config.json' 8 | 9 | default['propsd']['config'] = Mash.new 10 | default['propsd']['version'] = nil 11 | default['propsd']['enable'] = true 12 | 13 | default['propsd']['ohai_plugin_path'] = nil 14 | -------------------------------------------------------------------------------- /cookbook/files/default/propsd_ohai_plugin.rb: -------------------------------------------------------------------------------- 1 | require 'json' 2 | 3 | Ohai.plugin(:Propsd) do 4 | provides 'propsd_plugin' 5 | 6 | PROPSD_HOST = 'localhost' unless defined?(PROPSD_HOST) 7 | PROPSD_PORT = 9100 unless defined?(PROPSD_PORT) 8 | 9 | def can_propsd_connect?(addr, port, timeout = 2) 10 | t = Socket.new(Socket::Constants::AF_INET, Socket::Constants::SOCK_STREAM, 0) 11 | saddr = Socket.pack_sockaddr_in(port, addr) 12 | connected = false 13 | 14 | begin 15 | t.connect_nonblock(saddr) 16 | rescue Errno::EINPROGRESS 17 | r, w, e = IO.select(nil, [t], nil, timeout) 18 | if !w.nil? 19 | connected = true 20 | else 21 | begin 22 | t.connect_nonblock(saddr) 23 | rescue Errno::EISCONN 24 | t.close 25 | connected = true 26 | rescue SystemCallError 27 | end 28 | end 29 | rescue SystemCallError 30 | end 31 | Ohai::Log.debug("can_propsd_connect? == #{connected}") 32 | connected 33 | end 34 | 35 | def get_properties 36 | response = http_client.get('/v1/properties') 37 | if response.code != "200" 38 | raise 'Unable to get properties from propsd' 39 | else 40 | props = response.body 41 | props = ::JSON.parse(props) 42 | end 43 | rescue JSON::JSONError 44 | raise 'Error parsing JSON properties' 45 | rescue StandardError 46 | raise 'Error connecting to propsd' 47 | end 48 | 49 | def http_client 50 | Net::HTTP.start(PROPSD_HOST, PROPSD_PORT).tap { |h| h.read_timeout = 30 } 51 | end 52 | 53 | collect_data(:default) do 54 | propsd_plugin Mash.new 55 | if can_propsd_connect?(PROPSD_HOST, PROPSD_PORT) 56 | props = get_properties 57 | props.each_pair do |k,v| 58 | propsd_plugin[k] = v 59 | end 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /cookbook/libraries/helpers.rb: -------------------------------------------------------------------------------- 1 | class Chef::Recipe 2 | ## 3 | # Allow a recipe to find its cookbook's version 4 | ## 5 | def cookbook_version 6 | run_context.cookbook_collection[cookbook_name].version 7 | end 8 | end 9 | 10 | ## 11 | # Version and Download URI helpers 12 | ## 13 | module Propsd 14 | module Helpers 15 | class << self 16 | def github_download(owner, repo, version) 17 | "https://github.com/#{owner}/#{repo}/releases/download/v#{version}/propsd_#{version}_amd64.deb" 18 | end 19 | end 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /cookbook/metadata.rb: -------------------------------------------------------------------------------- 1 | ## Use package.json as the source of truth 2 | project_path = ::File.expand_path('../../', __FILE__) 3 | package_dot_json = JSON.parse(IO.read(::File.join(project_path, 'package.json'))) 4 | 5 | name package_dot_json['name'] 6 | description 'Install and configure https://github.com/rapid7/propsd' 7 | 8 | maintainer 'Rapid7 Inc.' 9 | maintainer_email 'coreservices@rapid7.com' 10 | 11 | issues_url package_dot_json['bugs']['url'] 12 | source_url package_dot_json['homepage'] 13 | 14 | license package_dot_json.fetch('license', 'MIT License, 2017') 15 | long_description IO.read(::File.join(project_path, 'README.md')) rescue '' 16 | version package_dot_json.fetch('version', '0.0.1') 17 | 18 | depends 'nodejs' 19 | depends 'ohai', '~> 4.2' 20 | depends 'ark', '~> 3.0.0' 21 | -------------------------------------------------------------------------------- /cookbook/recipes/default.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: propsd 3 | # Recipe:: default 4 | # 5 | # Copyright (C) 2017 Rapid7 LLC. 6 | # 7 | # Distributed under terms of the MIT License. All rights not explicitly granted 8 | # in the MIT license are reserved. See the included LICENSE file for more details. 9 | # 10 | 11 | node.default['propsd']['version'] = cookbook_version 12 | 13 | group node['propsd']['group'] do 14 | system true 15 | end 16 | 17 | user node['propsd']['user'] do 18 | comment 'propsd operator' 19 | system true 20 | 21 | gid node['propsd']['group'] 22 | home node['propsd']['paths']['directory'] 23 | end 24 | 25 | ## Fetch and install propsd 26 | remote_file 'propsd' do 27 | source Propsd::Helpers.github_download('rapid7', 'propsd', node['propsd']['version']) 28 | path ::File.join(Chef::Config['file_cache_path'], "propsd-#{node['propsd']['version']}.deb") 29 | 30 | action :create_if_missing 31 | backup false 32 | end 33 | 34 | version_dir = "#{ node['propsd']['paths']['directory'] }-#{ node['propsd']['version'] }" 35 | 36 | package 'propsd' do 37 | source resources('remote_file[propsd]').path 38 | provider Chef::Provider::Package::Dpkg 39 | version node['propsd']['version'] 40 | 41 | notifies :create, "link[#{node['propsd']['paths']['directory']}]", :immediately 42 | end 43 | 44 | ## Symlink the version dir to the specified propsd directory 45 | link node['propsd']['paths']['directory'] do 46 | to version_dir 47 | 48 | action :nothing 49 | notifies :restart, 'service[propsd]' if node['propsd']['enable'] 50 | end 51 | 52 | if Chef::VersionConstraint.new("> 14.04").include?(node['platform_version']) 53 | service_script_path = '/etc/systemd/system/propsd.service' 54 | service_script = 'systemd.service.erb' 55 | service_provider = Chef::Provider::Service::Systemd 56 | else 57 | service_script_path = '/etc/init/propsd.conf' 58 | service_script = 'upstart.conf.erb' 59 | service_provider = Chef::Provider::Service::Upstart 60 | end 61 | 62 | # Set service script 63 | template service_script_path do 64 | source service_script 65 | variables( 66 | :description => 'propsd configuration service', 67 | :user => node['propsd']['user'], 68 | :executable => node['propsd']['paths']['executable'], 69 | :flags => [ 70 | "-c #{node['propsd']['paths']['configuration']}" 71 | ] 72 | ) 73 | end 74 | 75 | directory 'propsd-configuration-directory' do 76 | path ::File.dirname(node['propsd']['paths']['configuration']) 77 | mode '0755' 78 | 79 | recursive true 80 | end 81 | 82 | template 'propsd-configuration' do 83 | path node['propsd']['paths']['configuration'] 84 | source 'json.erb' 85 | 86 | variables(:properties => node['propsd']['config']) 87 | notifies :restart, 'service[propsd]' if node['propsd']['enable'] 88 | end 89 | 90 | service 'propsd' do 91 | action node['propsd']['enable'] ? [:start, :enable] : [:stop, :disable] 92 | provider service_provider 93 | end 94 | -------------------------------------------------------------------------------- /cookbook/recipes/nodejs.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: propsd 3 | # Recipe:: nodejs 4 | # 5 | # Copyright (C) 2017 Rapid7 LLC. 6 | # 7 | # Distributed under terms of the MIT License. All rights not explicitly granted 8 | # in the MIT license are reserved. See the included LICENSE file for more details. 9 | # 10 | 11 | node.default['nodejs']['version'] = '4.8.2' 12 | node.default['nodejs']['binary']['checksum'] = '4d4a37f980bb2770c44d7123864650d0823bae696d7db09d9ed83028cab32fd3' 13 | 14 | include_recipe 'nodejs::nodejs_from_binary' 15 | -------------------------------------------------------------------------------- /cookbook/recipes/ohai_plugin.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: propsd 3 | # Recipe:: ohai_plugin 4 | # 5 | # Copyright (C) 2017 Rapid7 LLC. 6 | # 7 | # Distributed under terms of the MIT License. All rights not explicitly granted 8 | # in the MIT license are reserved. See the included LICENSE file for more details. 9 | # 10 | 11 | ohai_plugin 'propsd_ohai_plugin' do 12 | path node['propsd']['ohai_plugin_path'] 13 | end 14 | -------------------------------------------------------------------------------- /cookbook/recipes/test.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: propsd 3 | # Recipe:: test 4 | # 5 | # Copyright (C) 2017 Rapid7 LLC. 6 | # 7 | # Distributed under terms of the MIT License. All rights not explicitly granted 8 | # in the MIT license are reserved. See the included LICENSE file for more details. 9 | # 10 | 11 | node.default['propsd']['config']['index']['bucket'] = 'my.test.bucket' 12 | 13 | include_recipe 'propsd::default' 14 | 15 | resources('service[propsd]').action([:start, :enable]) 16 | 17 | include_recipe "#{ cookbook_name}::ohai_plugin" 18 | 19 | ::Chef::Log.info("PROPSD PLUGIN -- #{node['propsd_plugin']}") 20 | -------------------------------------------------------------------------------- /cookbook/templates/default/json.erb: -------------------------------------------------------------------------------- 1 | <%= JSON.pretty_generate(@properties) %> 2 | -------------------------------------------------------------------------------- /cookbook/templates/default/systemd.service.erb: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=<%= @description %> 3 | 4 | [Service] 5 | ExecStart=<%= ([@executable] + @flags).join(' ') %> 6 | <% unless @user.nil? %>User=<%= @user %><% end %> 7 | <% unless @group.nil? %>Group=<%= @group %><% end %> 8 | Restart=on-abort 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /cookbook/templates/default/upstart.conf.erb: -------------------------------------------------------------------------------- 1 | description "<%= @description %>" 2 | 3 | start on (local-filesystems and net-device-up IFACE!=lo) 4 | stop on runlevel [!12345] 5 | 6 | respawn 7 | respawn limit 10 5 8 | 9 | <% unless @user.nil? %>setuid <%= @user %><% end %> 10 | <% unless @group.nil? %>setgid <%= @group %><% end %> 11 | console log 12 | 13 | exec <%= ([@executable] + @flags).join(' ') %> 14 | -------------------------------------------------------------------------------- /cortex.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | info: 3 | title: Propsd 4 | description: Dynamic property management at scale 5 | x-cortex-git: 6 | github: 7 | alias: r7org 8 | repository: rapid7/propsd 9 | x-cortex-tag: propsd 10 | x-cortex-type: service 11 | x-cortex-groups: 12 | - infra 13 | - configuration 14 | x-cortex-domain-parents: 15 | - tag: pd-services 16 | openapi: 3.0.1 17 | servers: 18 | - url: "/" 19 | -------------------------------------------------------------------------------- /docs/class-storage.md: -------------------------------------------------------------------------------- 1 | Class: Storage 2 | ============== 3 | 4 | The `Storage` module is responsible for managing data sources and merging their properties into a single document in the correct order. In "Version 1", it will use a dedicated S3Watcher to fetch an index object from S3, and add/remove sources accoding to an array defined therein. 5 | 6 | _In a future version, the Storage module should be able to discover new sources from fetched objects, removing the need for an out-of-band definition of sources in an index. Instead, the Storage module would initialize its first source(s) from local configuration, and find additional sources recursively from their own contents._ 7 | 8 | ## Interface 9 | 10 | ### Class Attribute: `properties` 11 | 12 | ``` 13 | Storage.properties 14 | ``` 15 | 16 | The hash of merged properties from all sources. The value should be re-composed from sources' own properties whenever a source emits an `update` event. 17 | 18 | ### Class Attribute: `sources` 19 | 20 | ``` 21 | Storage.sources 22 | ``` 23 | 24 | An array of active Source instances 25 | 26 | ### Instance Method: `update()` 27 | 28 | The PluginManager calls the `update` method to notify the Storage class when Source instances have updated properites. 29 | 30 | ### Instance Method: `register(source)` 31 | 32 | The PluginManager registers Sources with the Storage class by calling the `register` method. The `register` method takes the source as an argument. Only registered sources' properties will be merged when `update` is called. 33 | 34 | ## Class: Storage.Index 35 | 36 | A Scheme for defining an ordered set of sources to be managed by the Storage module. This scheme will parse a JSON array of Source definitions: 37 | 38 | ```json 39 | { 40 | "sources": [{ 41 | "name": "s3-data-source", 42 | "type": "s3", 43 | "bucket": "bucket name (optional, default: from local configuration)", 44 | "path": "path/to/source/object.json", 45 | "interval": "Number in ms (opional, default in local config)" 46 | }, { 47 | "name": "s3-account", 48 | "type": "s3", 49 | "path": "/account/{{ instance.account-id }}.json", 50 | "interval": 30000 51 | }, { 52 | "name": "s3-region", 53 | "type": "s3", 54 | "bucket": "non-default-bucket", 55 | "path": "/region/{{ instance.region }}.json" 56 | }, { 57 | "name": "s3-vpc", 58 | "type": "s3", 59 | "path": "/region/{{ instance.region }}/{{ instance.vpc-id }}.json" 60 | }, 61 | ... 62 | { 63 | "name": "s3-instance", 64 | "type": "s3", 65 | "path": "/region/{{ instance.region }}/{{ instance.vpc-id }}/this_is_an_example/{{ instance.instance-id }}.json" 66 | }] 67 | } 68 | ``` 69 | 70 | It should expose an array of Source objects, which becomes the value of `Storage.sources`. 71 | 72 | ### TODO: Complete when the Source interfce has been defined 73 | -------------------------------------------------------------------------------- /docs/class-stringtemplate.md: -------------------------------------------------------------------------------- 1 | Class: StringTemplate 2 | ===================== 3 | 4 | Parse strings with embedded template values. 5 | 6 | ## Interface 7 | 8 | ### Constructor(template, scope) 9 | -------------------------------------------------------------------------------- /docs/getting-started/README.md: -------------------------------------------------------------------------------- 1 | # Getting started with Propsd # 2 | 3 | * [How to install Propsd](./installation.md) 4 | * [Installing from the Debian package](./installation.md#installing-from-the-debian-package) 5 | * [Installing from source](./installation.md#installing-from-source) 6 | * [How to configure Propsd](./configuration.md) 7 | * [Command-line Options](./configuration.md#command-line-options) 8 | * [Configuration Files](./configuration.md#configuration-files) 9 | * [Minimal Configuration File](./configuration.md#minimal-configuration-file) 10 | * [Default Configuration File](./configuration.md#default-configuration-file) 11 | * [Configuration Key Reference](./configuration.md#configuration-key-reference) 12 | * [Interpolated Properties](./configuration.md#interpolated-properties) 13 | * [How to use Propsd](./usage.md) 14 | * [Running Propsd](./usage.md#running-propsd) 15 | * [Stopping Propsd](./usage.md#stopping-propsd) 16 | * [Monitoring Propsd](./usage.md#monitoring-propsd) 17 | * [Index Files](./usage.md#index-files) 18 | * [Minimal Index File](./usage.md#minimal-index-file) 19 | * [Amazon S3 Source Key Reference](./usage.md#amazon-s3-source-key-reference) 20 | * [Amazon S3 Bucket Permissions](./usage.md#amazon-s3-bucket-permissions) 21 | * [Property Files](./usage.md#property-files) 22 | * [Minimal Properties File](./usage.md#minimal-properties-file) 23 | * [Property Files Key Reference](./usage.md#property-files-key-reference) 24 | -------------------------------------------------------------------------------- /docs/getting-started/configuration.md: -------------------------------------------------------------------------------- 1 | # How to configure Propsd # 2 | 3 | Configuration options for Propsd can be specified by providing a configuration 4 | file on the command-line. 5 | 6 | ## Command-line Options ## 7 | 8 | The options below are specified on the command-line. 9 | 10 | * `--config` - A configuration file to load. For more information on the format 11 | of this file, see the **Configuration Files** section. Only one configuration 12 | file can be specified. Multiple uses of this argument will result in only the 13 | last configuration file being read. 14 | 15 | ## Configuration Files ## 16 | 17 | Configuration files are JSON formatted. They are a single JSON object 18 | containing configuration values. 19 | 20 | ### Minimal Configuration File ### 21 | 22 | The configuration file below is the minimal settings that must be specified in 23 | order for Propsd to run. 24 | 25 | ~~~json 26 | { 27 | "index": { 28 | "bucket": "propsd.s3.amazonaws.com" 29 | } 30 | } 31 | ~~~ 32 | 33 | ### Default Configuration File ### 34 | 35 | The configuration file below is the default settings for Propsd. 36 | 37 | ~~~json 38 | { 39 | "service": { 40 | "hostname": "127.0.0.1", 41 | "port": 9100 42 | }, 43 | "log": { 44 | "level": "info" 45 | }, 46 | "index": { 47 | "path": "index.json", 48 | "interval": 30000, 49 | "region": "us-east-1" 50 | } 51 | } 52 | ~~~ 53 | 54 | ### Configuration Key Reference ### 55 | 56 | * `service` - These settings control the HTTP API. 57 | 58 | The following keys are available: 59 | 60 | * `hostname` - The address the HTTP API binds to. Defaults to "127.0.0.1". 61 | 62 | * `port` - The port the HTTP API listens on. Defaults to 9100. 63 | 64 | * `log` - These settings control logging. 65 | 66 | Propsd treats logging as an event stream and logs to `stdout`. Logged events 67 | are formatted as JSON objects separated by newlines. If you need routing or 68 | storage of logs, you'll want to handle that outside Propsd. 69 | 70 | The following keys are available: 71 | 72 | * `level` - The level to log at. Valid values are "debug", "verbose", "info", 73 | "warn", and "error". Each log level encompasses all the ones below it. So 74 | "debug" is the most verbose and "error" is the least verbose. Defaults to 75 | "info". 76 | 77 | * `index` - These settings control the first file properties are read from. 78 | 79 | Propsd reads properties from files stored in Amazon S3. Property files are 80 | JSON documents. A single property file must be configured as an index that 81 | lists other property files to read. Property files are polled periodically for 82 | changes. This allows new property files to be read at run time without 83 | requiring a restart of Propsd. 84 | 85 | * `bucket` - The S3 bucket to read the index property file from. This has no 86 | default value and must be explicitly configured. 87 | 88 | * `region` - The AWS region where the S3 bucket is located. Defaults to 89 | "us-east-1". 90 | 91 | * `path` - The path in the S3 bucket to read as the index property file. 92 | Defaults to "index.json". 93 | 94 | * `interval` - The time in milliseconds to poll the index property file for 95 | changes. Defaults to 30000 (30 seconds). 96 | 97 | * `consul` - These settings control service discovery via [Consul][]. 98 | 99 | Propsd can use Consul for service discovery. Services registered with Consul 100 | show up in Propsd as properties that look like "conqueso.service.ips=127.0.0.1". 101 | IP addresses are comma separated and only services whose health checks are all 102 | passing will be reported. Consul is polled periodically for changes. This 103 | allows service discovery to happen without requiring a restart of Propsd. 104 | 105 | * `host` - The host to connect to Consul on. Defaults to 127.0.0.1. 106 | 107 | * `port` - The HTTP port to connect to Consul on. Defaults to 8500. 108 | 109 | * `secure` - Whether to use HTTPS when connecting to Consul. Defaults to false 110 | and uses HTTP. 111 | 112 | * `interval` - The time in milliseconds to poll Consul for changes. Defaults 113 | to 60000 (60 seconds). 114 | 115 | * `properties` - An arbitrary JSON object for injecting values into the index. 116 | 117 | Propsd supports treating the index document as a template and injecting 118 | static properties into it. This can be useful for loading additional 119 | properties files on a per server basis. For more information on the format of 120 | properties, see the **Interpolated Properties** section. 121 | 122 | ## Interpolated Properties ## 123 | 124 | Propsd supports injecting static values defined in configuration files into the 125 | property documents read from S3. This provides a way to read instance specific 126 | properties. 127 | 128 | Suppose you have two configurations for metrics polling, fast and slow. Fast 129 | polls every thirty seconds and the configuration for it lives in 130 | a `metrics/fast.json` document in S3. Slow polls every five minutes, and the 131 | configuration for it lives in a `metrics/slow.json` document in S3. 132 | 133 | Interpolated properties let you configure Propsd to read either the fast or 134 | slow document. You start by adding a `{{speed}}` template parameter to your 135 | `index.json` document in S3. 136 | 137 | ~~~json 138 | { 139 | "version": 1.0, 140 | "sources": [{ 141 | "name": "metrics", 142 | "type": "s3", 143 | "parameters": { 144 | "path": "metrics/{{speed}}.json" 145 | } 146 | }] 147 | } 148 | ~~~ 149 | 150 | When Propsd reads the index template, it tries to replace `{{speed}}` with 151 | a value from in the `properties` key in the configuration file. So the 152 | configuration to read the "fast" document looks like this. 153 | 154 | ~~~json 155 | { 156 | "properties": { 157 | "speed": "fast" 158 | } 159 | } 160 | ~~~ 161 | 162 | If the `properties:speed` key was configured as "slow", the `metrics/slow.json` 163 | document would be read instead. 164 | 165 | Interpolated properties in templated documents are enclosed in double curly 166 | braces: `{{` and `}}`. The value between the double curly braces is a key from 167 | the `properties` object. Nested keys within the `properties` object are 168 | accessed by separating the keys with colons. 169 | 170 | 171 | [Consul]: https://www.consul.io/ 172 | -------------------------------------------------------------------------------- /docs/getting-started/installation.md: -------------------------------------------------------------------------------- 1 | # How to install Propsd # 2 | 3 | [Releases of Propsd][releases] include both source tarballs and Debian 4 | packages. Debian and Ubuntu based Linux distributions can use the pre-built 5 | packages. Other operating systems should install Propsd from source. 6 | 7 | ## Installing from the Debian package ## 8 | 9 | Propsd runs on the 4.4.x LTS version of Node.js, so follow the [instructions 10 | for installing Node.js on Debian based systems][node-debian]. 11 | 12 | Download a pre-built Debian package of Propsd from [the releases 13 | page][releases] and save it. These instructions assume you've saved the package 14 | to `/tmp/propsd.deb`. 15 | 16 | Use `dpkg` to install Propsd. 17 | 18 | ~~~bash 19 | dpkg -i /tmp/propsd.deb 20 | ~~~ 21 | 22 | Propsd is installed into `/opt/propsd`. 23 | 24 | ## Installing from source ## 25 | 26 | Propsd runs on the 4.4.x LTS version of Node.js, so follow the [instructions 27 | for installing Node.js][node-source]. 28 | 29 | Download a tarball of the Propsd sources from [the releases page][releases] and 30 | save it. These instructions assume you've saved the tarball to 31 | `/tmp/propsd.tar.gz`. 32 | 33 | Create a new folder for Propsd. These instructions assume you're using 34 | `/opt` as that folder. 35 | 36 | ~~~bash 37 | mkdir /opt 38 | ~~~ 39 | 40 | Use `npm` to install Propsd. 41 | 42 | ~~~bash 43 | cd /opt 44 | npm install /tmp/propsd.tar.gz 45 | ~~~ 46 | 47 | Propsd is installed into `/opt/node_modules/propsd`. 48 | 49 | [releases]: https://github.com/rapid7/propsd/releases/latest 50 | [node-debian]: https://nodejs.org/en/download/package-manager/#debian-and-ubuntu-based-linux-distributions 51 | [node-source]: https://nodejs.org/en/download/ 52 | -------------------------------------------------------------------------------- /docs/getting-started/usage.md: -------------------------------------------------------------------------------- 1 | # How to use Propsd # 2 | 3 | The Propsd service is the core process in Propsd. It's responsible for 4 | providing the HTTP API and fetching properties from sources like Amazon S3 and 5 | Consul. Propsd is machine aware, and is designed to run on every server that 6 | needs to retrieve properties. 7 | 8 | ## Running Propsd ## 9 | 10 | The Propsd service is started by running the `bin/server.js` binary. The binary 11 | can be found in the folder where [Propsd is installed][installation]. The 12 | service blocks, running forever or until it's told to quit. The binary supports 13 | several [configuration options][configuration]. 14 | 15 | When running Propsd you should see output similar to this: 16 | 17 | ~~~text 18 | {"level":"info","message":"Initializing index and metadata","timestamp":"2016-04-29T15:28:39.574Z"} 19 | {"source":"s3-propsd-s3-index.json","type":"s3","level":"info","message":"Initializing s3 source s3-propsd-s3-index.json","timestamp":"2016-04-29T15:28:39.577Z"} 20 | {"source":"ec2-metadata","type":"ec2-metadata","level":"info","message":"Initializing ec2-metadata source ec2-metadata","timestamp":"2016-04-29T15:28:39.590Z"} 21 | {"level":"info","message":"Listening on 127.0.0.1:9100","timestamp":"2016-04-29T15:28:39.602Z"} 22 | {"source":"s3-propsd-s3-index.json","type":"s3","level":"info","message":"Updated source s3-propsd-s3-index.json","timestamp":"2016-04-29T15:28:39.620Z"} 23 | {"source":"ec2-metadata","type":"ec2-metadata","level":"info","message":"Updated source ec2-metadata","timestamp":"2016-04-29T15:28:39.658Z"} 24 | {"source":"s3-propsd-s3-global.json","type":"s3","level":"info","message":"Initializing s3 source s3-propsd-s3-global.json","timestamp":"2016-04-29T15:28:39.659Z"} 25 | ~~~ 26 | 27 | ## Stopping Propsd ## 28 | 29 | Propsd can be stopped by sending it an interrupt signal. This is usually done 30 | by sending `Ctrl-C` from a terminal or by running `kill -INT $propsd_pid`. 31 | 32 | ## Monitoring Propsd ## 33 | 34 | Propsd provides two HTTP endpoints for monitoring its status. The first is 35 | a health endpoint that provides basic information about Propsd. Issue a GET 36 | request to `/v1/health` and you'll see output similar to this: 37 | 38 | ~~~json 39 | { 40 | "status": 200, 41 | "uptime": 3193957, 42 | "plugins": { 43 | "s3": 1, 44 | "consul": 1, 45 | }, 46 | "version": "1.2.5" 47 | } 48 | ~~~ 49 | 50 | The "status" attribute is the response code. Response codes from the health 51 | endpoint are compatible with [Consul's HTTP health checks][consul]. The 52 | "uptime" attribute is the number of milliseconds the service has been running. 53 | The "plugins" attribute is a map from plugin type to the number of instances of 54 | the plugin that are running. The "version" attribute is the version of Propsd. 55 | 56 | The second endpoint is a status endpoint that provides detailed information 57 | about Propsd. Issue a GET request to `/v1/status` and you'll see output 58 | similar to this: 59 | 60 | ~~~json 61 | { 62 | "status": 200, 63 | "uptime": 18160502, 64 | "index": { 65 | "running": true, 66 | "name": "index", 67 | "type": "s3", 68 | "ok": true, 69 | "state": "RUNNING", 70 | "updated": "2016-06-10T14:53:08.453Z", 71 | "interval": 30000, 72 | "resource": "s3://bucket/index.json", 73 | "etag": "e81944e6e597d8e9e5db01b1cf9dfd7d" 74 | }, 75 | "sources": [ 76 | { 77 | "status": "okay", 78 | "interval": 60000, 79 | "updated": "2016-06-10T18:45:07.182Z", 80 | "state": "RUNNING", 81 | "ok": true, 82 | "type": "consul", 83 | "name": "consul" 84 | }, 85 | { 86 | "status": "okay", 87 | "name": "global", 88 | "type": "s3", 89 | "ok": true, 90 | "state": "RUNNING", 91 | "updated": "2016-06-10T14:53:09.613Z", 92 | "interval": 60000, 93 | "resource": "s3://bucket/global.json", 94 | "etag": "4856c7b6c749068ea986f23668a41c46" 95 | } 96 | ], 97 | "vesion": "1.2.6" 98 | } 99 | ~~~ 100 | 101 | The "status", "uptime", and "version" attributes match the ones from the health 102 | endpoint. The "index" attribute provides metadata about the index property file, 103 | such as the last time it was updated. The "sources" array provides metadata 104 | about each of the sources Propsd is reading properties from. 105 | 106 | ## Index Files ## 107 | 108 | The first file Propsd reads is called the index file. Index files are JSON 109 | formatted. They are a single JSON object containing a version identifier and 110 | a list of sources to read properties from. You must set [configuration 111 | options][configuration] to tell Propsd where to find the index file. 112 | 113 | ### Minimal Index File ### 114 | 115 | ~~~json 116 | { 117 | "version": "1.0", 118 | "sources": [{ 119 | "name": "global", 120 | "type": "s3", 121 | "parameters": { 122 | "path": "global.json" 123 | } 124 | }] 125 | } 126 | ~~~ 127 | 128 | The "version" attribute is required and must be "1.0". The "sources" attribute 129 | is an array of source objects to read properties from. Propsd supports reading 130 | properties from [Amazon S3][]. 131 | 132 | ### Amazon S3 Source Key Reference ### 133 | 134 | * `name` - An arbitrary string. Must be unique within all other sources of the 135 | same type. You'll see the name of the source in logs, so pick something 136 | meaningful. 137 | 138 | * `type` - Must be "s3" to configure a S3 source. 139 | 140 | * `parameters` - These settings control the S3 source. 141 | 142 | The following keys are available: 143 | 144 | * `path` - The path to the properties file in S3. 145 | 146 | * `bucket` - The bucket in S3 where the properties file is found. Defaults to 147 | the bucket where the index file was found. 148 | 149 | ### Amazon S3 Bucket Permissions ### 150 | 151 | Propsd reads from S3, so you need to configure your bucket to allow read 152 | access. The example bucket policy below shows how to grant read-only access to 153 | a separate AWS account. 154 | 155 | ~~~json 156 | { 157 | "Version": "2012-10-17", 158 | "Id": "propsd", 159 | "Statement": [{ 160 | "Sid": "read-only-for-XXXXXXXXXXXX", 161 | "Effect": "Allow", 162 | "Principal": { 163 | "AWS": "arn:aws:iam::XXXXXXXXXXXX:root" 164 | }, 165 | "Action": [ 166 | "s3:ListBucket", 167 | "s3:GetObject", 168 | "s3:GetBucketLocation" 169 | ], 170 | "Resource": [ 171 | "arn:aws:s3:::propsd.s3.amazonaws.com/*", 172 | "arn:aws:s3:::propsd.s3.amazonaws.com" 173 | ] 174 | }] 175 | } 176 | ~~~ 177 | 178 | See [Amazon's documentation around bucket policies and user 179 | policies][bucket-policies] for more details around controlling access to S3 180 | buckets. 181 | 182 | ## Property Files ## 183 | 184 | Property files are JSON formatted. They are a single JSON object containing 185 | a version identifier and property values. Properties are read in the 186 | order they're defined in the index file. Property files with matching values 187 | overwrite those read before them. 188 | 189 | ### Minimal Properties File ### 190 | 191 | ~~~json 192 | { 193 | "version": "1.0", 194 | "properties": { 195 | "nodejs.version": "4.4.3" 196 | } 197 | } 198 | ~~~ 199 | 200 | ### Property Files Key Reference ### 201 | 202 | * `version` - A string that tells Propsd what version of the properties file 203 | it's reading. Must be "1.0". 204 | 205 | * `properties` - A JSON object of properties. 206 | 207 | Keys with string, number and boolean values are converted directly to Java 208 | properties. Nested JSON objects are flattened, with their keys separated by 209 | periods. Arrays are converted into numbered properties e.g. the first item is 210 | "key.0", the second item is "key.1", the third item is "key.2", etc. 211 | 212 | The keys "instance", "consul", and "conqueso" are reserved for use by Propsd 213 | internally. Defining your own properties with those keys may result in 214 | unexpected behavior. 215 | 216 | 217 | [installation]: "./installation.md" 218 | [configuration]: "./configuration.md" 219 | [consul]: https://www.consul.io/docs/agent/checks.html 220 | [Amazon S3]: https://aws.amazon.com/s3/ 221 | [bucket-policies]: http://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html 222 | -------------------------------------------------------------------------------- /docs/http-api.md: -------------------------------------------------------------------------------- 1 | # HTTP API # 2 | 3 | The main interface to Propsd is a HTTP API. The API can be used to retrieve 4 | properties and perform status checks. The endpoints are versioned to enable 5 | changes without breaking backwards compatibility. 6 | 7 | Each endpoint manages a different aspect of Propsd: 8 | 9 | * health - Basic health check 10 | * status - Detailed configuration information 11 | * conqueso - Conqueso compatible API 12 | 13 | ## Health ## 14 | 15 | The health endpoint is used to validate Propsd is running. The endpoint responds 16 | to GET requests with a JSON body and a response code. Any other type of 17 | request returns a 405 (Method Not Allowed) response code and includes an 18 | `Allow: GET` header. 19 | 20 | ### /v1/health ### 21 | 22 | An example response from the health API is: 23 | 24 | ~~~json 25 | { 26 | "status": "okay", 27 | "uptime": 1222101, 28 | "plugins": ["s3", "consul"] 29 | } 30 | ~~~ 31 | 32 | The status field is a string with one of the following values: "okay", 33 | "warn", "fail". The okay status means all of the plugins are working. The 34 | warn status means some of the plugins are working. The fail status means none 35 | of the plugins are working. 36 | 37 | The uptime field is an integer representing the number of milliseconds Propsd has been running. 38 | 39 | The plugins field is an array of strings listing all the installed plugins. 40 | 41 | Response codes are compatible with Consul HTTP health checks. A 200 (OK) is 42 | returned with an okay status. A 429 (Too Many Requests) is returned with a 43 | warning status. A 500 (Internal Server Error) is returned with a fail status. 44 | 45 | ## Status ## 46 | 47 | The status endpoint is used to retrieve detailed configuration information about 48 | Propsd. The endpoint responds to GET requests with a JSON body and a 200 (OK) 49 | response code. Any other type of request returns a 405 (Method Not Allowed) 50 | response code and includes an `Allow: GET` header. 51 | 52 | ### /v1/status ### 53 | 54 | An example response from the status API is: 55 | 56 | ~~~json 57 | { 58 | "status": "okay", 59 | "uptime": 1222101, 60 | "index": "okay", 61 | "plugins": [{ 62 | "type": "s3", 63 | "name": "global-properties", 64 | "bucket": "bucket", 65 | "path": "global.json", 66 | "status": "okay", 67 | "mtime": "2016-01-06T16:47:45-05:00" 68 | },{ 69 | "type": "consul", 70 | "name": "consul:service:rabbitmq", 71 | "status": "okay", 72 | "mtime": "2016-01-06T16:47:45-05:00" 73 | }] 74 | } 75 | ~~~ 76 | 77 | The status and uptime fields matches what's returned from the health API. The index field provides the status of the index source. 78 | 79 | The plugins field is an array of plugin objects. A plugin object will always 80 | have the following fields: "name", "type", "status", "mtime". Some plugin 81 | objects may include additional fields. 82 | 83 | A plugin's type field is a string that matches the plugin's type as it appears 84 | in the "plugins" field from the health API. 85 | 86 | A plugin's name field is a string describing the plugin. The name field is 87 | unique for all instances of the same type of plugin. 88 | 89 | A plugin's status field is a string with one of the following values: "okay", 90 | "fail". The okay status means the plugin is working. The fail status means the 91 | plugin is not working. 92 | 93 | A plugin's mtime field is a time stamp of the last time the plugin checked 94 | for updates. The time stamp is formatted as an ISO-8601 string with one second 95 | resolution. 96 | 97 | ## Conqueso ## 98 | 99 | The Conqueso endpoint provides a partial implementation of the RESTful API 100 | defined by [Conqueso][]. The endpoint responds to GET requests with text output 101 | and a 200 (OK) response code. Output is formatted as Java compatible properties. 102 | 103 | ### /v1/conqueso ### 104 | 105 | An example response from the Conqueso API is: 106 | 107 | ~~~text 108 | aws.metrics.enabled=false 109 | fitness.value=88.33 110 | web.url.private=http://localhost:2600/ 111 | conqueso.frontend.ips=10.0.0.1,10.0.0.2 112 | ~~~ 113 | 114 | PUT and POST requests return an empty body and a 200 (OK) response code. They 115 | don't create or update any properties internally. 116 | 117 | OPTIONS requests return an empty body and a 200 (OK) response code. They include 118 | an `Allow: GET, POST, PUT, OPTIONS` header. 119 | 120 | Any other type of request returns a 405 (Method Not Allowed) response code and 121 | includes an `Allow: GET, POST, PUT, OPTIONS` header. 122 | 123 | ## Formatted JSON output ## 124 | 125 | All JSON output is minimized by default. Formatted JSON is returned if a 126 | `pretty` parameter is provided as part of a query string. 127 | 128 | 129 | [Conqueso]: https://github.com/rapid7/conqueso "Conqueso (Rapid7): Centrally manage dynamic properties across services" 130 | -------------------------------------------------------------------------------- /docs/schemas.md: -------------------------------------------------------------------------------- 1 | # Schemas 2 | 3 | ## Manifest file (index.yaml / index.json) 4 | Propsd will be configured to consume a manifest file (index.yaml) from the bucket. 5 | 6 | The format of this yaml file will dictate the ordering of the configuration layering. They are as follows: 7 | 8 | ##### index.yaml 9 | 10 | ```yaml 11 | - global 12 | - account 13 | - region 14 | - vpc 15 | - product 16 | |- stack 17 | - service 18 | |- version 19 | - asg 20 | - instance 21 | ``` 22 | 23 | The files will be stored in the bucket in the following structure: 24 | 25 | ``` 26 | - global 27 | - account 28 | - region 29 | |- vpc 30 | |- asg 31 | |- instance 32 | - product 33 | |- stack 34 | - service 35 | | - version 36 | ``` 37 | 38 | ## Github stored yaml configuration files 39 | The content of the template configuration file stored in Github should have the following structure: 40 | 41 | ##### 1.0 schema 42 | 43 | ```yaml 44 | --- 45 | version: 1.0 46 | properties: 47 | foo: bar 48 | ``` 49 | 50 | ##### Future proposed schema 51 | 52 | ```yaml 53 | --- 54 | version: 1.1 55 | sources: 56 | # This source will be coming from a consul lookup 57 | # which is looking for rabbitmq nodes with the listed 58 | # tags. 59 | - name: rabbitmq 60 | type: consul 61 | tags: [tag,tag,...] 62 | properties: 63 | foo: bar 64 | ``` 65 | 66 | ## S3 stored json configuration files 67 | The content of the initial configuration file stored in S3 should have the following structure: 68 | 69 | ##### 1.0 Schema 70 | 71 | ```json 72 | { 73 | "version": 1.0, 74 | "properties": { 75 | "key": "value", 76 | "key": "value" 77 | } 78 | } 79 | ``` 80 | 1.0 will get back the entire consul catalog of instances. 81 | 82 | ##### Proposed 1.1 Schema 83 | 84 | ```json 85 | { 86 | "version": 1.1, 87 | "constant": 4, 88 | "sources": { 89 | "rabbitmq": { 90 | "type": "consul", 91 | "tags": ["tag", "tag", "tag", "..."] 92 | }, 93 | "cassandra": { 94 | "type": "consul", 95 | "tags": ["tag", "tag", "tag", "..."] 96 | } 97 | }, 98 | "properties": { 99 | "disks": "{{constant}}", 100 | "rabbit.nodes": "{{sources.rabbitmq.ipaddress}}" 101 | } 102 | } 103 | ``` 104 | 105 | With the multitude of layering options, the sources are read from global down to instance name (last in, first out). Then the sources should be compiled down and de-duplicated before property expansion occurs. Then property expansion occurs and the resulting property set is returned. 106 | 107 | ##### Proposed recursive/remote loading 108 | 109 | ```json 110 | { 111 | "version": 1.1, 112 | "constant": 4, 113 | "sources": [ 114 | { 115 | "name": "rabbitmq", 116 | "type": "consul", 117 | "tags": ["tag", "tag", "tag", "..."] 118 | }, 119 | { 120 | "name": "config", 121 | "type": "S3", 122 | "path": "", 123 | "bucket": "(optional)" 124 | } 125 | ], 126 | "properties": { 127 | "disks": "{{constant}}", 128 | "rabbit.nodes": "{{sources.rabbitmq.ipaddress}}" 129 | } 130 | } 131 | ``` 132 | Where we could load an arbitrary S3 path to provide additional configuration to be compiled down for later token expansion. 133 | -------------------------------------------------------------------------------- /docs/sources/s3/class-source-s3-agent.md: -------------------------------------------------------------------------------- 1 | ## Sources.S3.Agent 2 | Instantiated with a S3 bucket and path. On first run the `agent` will use the AWS SDK to query for a new version of the specified S3 object. 3 | 4 | ### Methods 5 | 6 | * `constructor(bucket, path)` 7 | 8 | * `_createS3Params(eTag = null)` 9 | * Generates the params object that the `aws-sdk` requires to retrieve an object. 10 | 11 | * `fetch(eTag)` 12 | * Fetches an object from S3 and returns a `Promise`. 13 | 14 | 15 | ### Properties 16 | * `_bucket`: (`string`) the S3 bucket 17 | * `_path`: (`string`) the path to the S3 object 18 | * `_s3`: (`AWS.S3`) an instance of an authenticated AWS S3 client -------------------------------------------------------------------------------- /docs/sources/s3/class-source-s3-store.md: -------------------------------------------------------------------------------- 1 | ## Sources.S3.Store 2 | A thin storage layer to manage object ETags. 3 | 4 | ### Methods 5 | 6 | * `constructor()` 7 | * `set(key, value)` 8 | * `get(key)` -------------------------------------------------------------------------------- /docs/sources/s3/class-source-s3.md: -------------------------------------------------------------------------------- 1 | ## Sources.S3 2 | The `Sources.S3` class initializes the component in the following order: 3 | 4 | 1. Initializes an instance of `Sources.S3.Store`. 5 | 1. Initializes an instance of `Sources.S3.Agent`. 6 | 1. Creates an `intervalObject` that executes the callback ever `{{interval}}` milliseconds 7 | 8 | ### Methods 9 | * `constructor(bucket, path, interval)` 10 | * `bucket`: (`string`) The S3 bucket 11 | * `path`: (`string`) The path to the S3 object 12 | * `interval`: (`int`) Interval between invocation of {{callback}} (in milliseconds) 13 | 14 | * `getName()` 15 | * Returns a unique name for the plugin instance 16 | 17 | * `getType()` 18 | * Returns the plugin type (S3) 19 | 20 | * `fetch(callback = null, args = {})` 21 | * `callback`: (`Function`) Function invoked on expiration of {{interval}} (default is null) 22 | * `args`: (`Object`) Options to bind into the callback (default is {}) 23 | 24 | Creates a timer that executes every `{{interval}}` milliseconds. 25 | 26 | * `defaultFetch(options)` 27 | * `options`: 28 | 29 | ```javascript 30 | { 31 | agent: (Sources.S3.Agent), 32 | store: (Sources.S3.Store), 33 | args: (Object) 34 | 35 | } 36 | ``` 37 | 38 | `defaultFetch()` is the default callback for `fetch()` if one is not provided. 39 | 40 | ### Properties 41 | * `_bucket`: (`string`) The S3 bucket 42 | * `_path`: (`string`) The path to the S3 object 43 | * `_interval`: (`int`) Interval between invocations of the callback provided to fetch()` 44 | * `_store`: (`Sources.S3.Store`) 45 | * `_agent`: (`Sources.S3.Agent`) 46 | * `_timer`: (`intervalObject `) -------------------------------------------------------------------------------- /docs/sources/source-interface.md: -------------------------------------------------------------------------------- 1 | # Source Interface 2 | 3 | All source plugins must expose the following: 4 | 5 | ## Methods 6 | 1. A constructor that accepts an object of options. These options are source-specific and should be created based on the source documentation. 7 | 8 | 1. `Source#configure(params)` 9 | 1. `Source#initialize()` 10 | 1. `Source#status()`: (`Object`) Returns the plugin's status. 11 | 1. `Source#shutdown()`: Cleans up any open handles (fs, timer, etc.) the plugin has open. 12 | 1. `Source#clear()`: Clears the underlying `Source#properties` data. 13 | 14 | ## Properties 15 | 1. `Source#interval`: (`Integer`) The interval between execution attempts. 16 | 1. `Source#type`: (`String`) The source type. This is a static value for each source type. 17 | 1. `Source#name`: (`String`) A unique name comprised of `Source#type` and other information, such as the S3 bucket and key. 18 | 1. `Source#properties`: (`Object`) The properties retrieved and parsed from the source's underlying data. 19 | 1. `Source#service`: (`AWS.S3|AWS.MetadataService|Object`) The underlying service that retrieves data. 20 | 21 | Specific source types have other exposed properties that are only specific to that plugin. For example, the `Metadata` source exposes `Metadata#signature` which is the sha1 signature of the `Metadata#properties` object used to prevent re-parsing if there's no change to the underlying data. 22 | 23 | ## Events 24 | 1. `startup` 25 | 1. `shutdown` 26 | 1. `update`: Emitted with an instance of the source plugin. 27 | 1. `no-update` 28 | 1. `error`: Emitted with an instance of `Error` describing what went wrong. 29 | -------------------------------------------------------------------------------- /docs/sources/source-metadata.md: -------------------------------------------------------------------------------- 1 | Class: Source.Metadata 2 | ====================== 3 | 4 | Periodically fetch the local EC2 Metadata tree. 5 | 6 | ## Interface 7 | ### Instance Property: `properties` 8 | The hash of properties retrieved from the Instance Metadata Service. 9 | 10 | ### Instance Property: `interval` 11 | The timer interval. 12 | ```javascript 13 | const m = new Metadata({ 14 | interval: 300 15 | }); 16 | m.interval // 300 17 | ``` 18 | 19 | ### Instance Property: `name` 20 | The unique name for the source instance. There should only be one Metadata source active at any given time. 21 | 22 | ### Instance Property: `service` 23 | An instance of `AWS.MetadataService` used to query the Metadata tree. 24 | 25 | ### Instance Property: `signature` 26 | A SHA-1 hash of the property data retrieved from the Instance Metadata Service. This is used to only emit the `update` event when new data is actually retrieved. 27 | 28 | ### Class Attribute: `type` 29 | ``` 30 | Metadata.type 31 | ``` 32 | The plugin type. Will always be set to `'ec2-metadata'`. 33 | 34 | ### Instance Method: `configure(params)` 35 | Allows the plugin to be reconfigured on the fly. The `Source.Metadata` implementation only exposes the `interval` property. 36 | 37 | ### Instance Method: `initialize()` 38 | Initializes a timer object and starts the first request to the Metadata endpoint. 39 | 40 | ### Instance Method: `shutdown()` 41 | Clears the timer and emits the `shutdown` event. 42 | 43 | ### Instance Method: `status()` 44 | Returns the status of the plugin instance. 45 | ``` 46 | { 47 | ok: true|false, // True if the instance is working correctly 48 | updated: Date, // Last updated 49 | interval: Timer, // The timer instance 50 | running: true|false // True if the plugin has been initialized 51 | }; 52 | ``` 53 | 54 | ### Static Method: `setIfChanged(scope, key, value)` 55 | Helper method to detect parameter changes. This works together with the `configure()` method to allow an instance `interval` to be changed after creation. 56 | 57 | ### Event `shutdown` 58 | Issued after the timer is cleared when the `shutdown()` method is called. 59 | 60 | ### Event `update` 61 | Issued whenever `properties` has been updated. The plugin manager should subscribe to this event in order to marshall up-to-date data to the `Storage` object. 62 | 63 | ### Event `error` 64 | Issued whenever an error occurs. Only emitted if there are error handlers set to handle it in order to avoid an uncaught exception. 65 | 66 | ### Event `no-update` 67 | Issued on each successful run in which there is no updated data. 68 | -------------------------------------------------------------------------------- /docs/sources/source-s3.md: -------------------------------------------------------------------------------- 1 | # Propsd S3 Plugin 2 | 3 | ## Purpose 4 | The S3 Plugin manages retrieving data from S3. On a set interval the plugin will send a request using the aws-sdk to a config object in S3, determine if the object has been updated more recently than the previously retrieved object, and, if so, will retrieve the new object, parse its `Body`, and emit an event for the storage layer to consume. 5 | 6 | ## High level composition 7 | The watcher component will be split into the following modules: 8 | 9 | * `S3` 10 | * `S3.Agent` 11 | * `S3.Store` 12 | * `S3.Parser` 13 | 14 | ## Classes 15 | 16 | * [Sources.S3](s3/class-source-s3.md) 17 | * [Sources.S3.Agent](s3/class-source-s3-agent.md) 18 | * [Sources.S3.Store](s3/class-source-s3-store.md) 19 | * [Sources.S3.Parser](s3/class-source-s3-parser.md) -------------------------------------------------------------------------------- /jsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES6", 4 | "module": "commonjs" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "propsd", 3 | "version": "3.1.3", 4 | "description": "A local daemon for loading static and dynamic properties", 5 | "scripts": { 6 | "start": "nodemon ./src/bin/server.js --exec babel-node --", 7 | "clean": "rimraf dist", 8 | "version": "./test/bin/version.js", 9 | "transpile": "npm run version && babel src --out-dir dist --copy-files", 10 | "test": "npm run transpile && mocha", 11 | "lint": "eslint src/**/*.js test/**/*.js", 12 | "coverage": "npm run transpile && istanbul cover _mocha -- -R spec", 13 | "report-coverage": "npm run coverage && cat ./coverage/lcov.info | coveralls && rm -rf ./coverage", 14 | "metadata-server": "nodemon ./test/bin/metadata-server --", 15 | "s3-server": "nodemon ./test/bin/s3-server.js --", 16 | "dev-server": "npm run metadata-server & npm run s3-server -- -d test/data/s3 & npm start -- -c src/config/dev.json" 17 | }, 18 | "repository": { 19 | "type": "git", 20 | "url": "git+https://github.com/rapid7/propsd.git" 21 | }, 22 | "license": "MIT", 23 | "bugs": { 24 | "url": "https://github.com/rapid7/propsd/issues" 25 | }, 26 | "homepage": "https://github.com/rapid7/propsd#readme", 27 | "devDependencies": { 28 | "aws-sdk-mock": "~1.7.0", 29 | "babel-cli": "^6.24.1", 30 | "babel-plugin-add-module-exports": "^0.2.1", 31 | "babel-plugin-transform-decorators-legacy": "^1.3.4", 32 | "babel-plugin-transform-runtime": "^6.23.0", 33 | "babel-preset-env": "^1.6.0", 34 | "babel-preset-stage-2": "^6.24.1", 35 | "chai": "~3.5.0", 36 | "chai-as-promised": "~5.2.0", 37 | "chokidar": "~3.0.0", 38 | "coveralls": "~2.11.8", 39 | "eslint": "~3.17.1", 40 | "eslint-config-rapid7": "~2.9.0", 41 | "eslint-plugin-import": "~2.2.0", 42 | "eslint-plugin-react": "~6.10.0", 43 | "istanbul": "~0.4.2", 44 | "mocha": "~3.3.0", 45 | "mocha-lcov-reporter": "~1.2.0", 46 | "nock": "^9.0.13", 47 | "nodemon": "^1.11.0", 48 | "proxyquire": "~1.7.4", 49 | "rimraf": "~2.5.2", 50 | "s3rver": "2.2.9", 51 | "should": "~8.1.1", 52 | "should-sinon": "0.0.5", 53 | "sinon": "~1.17.3", 54 | "supertest": "~1.2.0", 55 | "walk": "~2.3.9" 56 | }, 57 | "dependencies": { 58 | "aws-sdk": "~2.2.28", 59 | "babel-runtime": "^6.23.0", 60 | "clone": "~1.0.2", 61 | "consul": "~0.23.0", 62 | "deep-diff": "~0.3.4", 63 | "deep-equal": "~1.0.1", 64 | "depd": "~1.1.0", 65 | "express": "~4.13.3", 66 | "express-winston": "~1.3.0", 67 | "flat": "~2.0.0", 68 | "immutable": "~3.8.1", 69 | "lodash.isplainobject": "^4.0.6", 70 | "nconf": "~0.8.4", 71 | "winston": "~2.1.1", 72 | "yargs": "~4.1.0" 73 | }, 74 | "resolutions": { 75 | "babel-cli/chokidar/**/tar": "^4.4.2" 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/bin/server.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /* global Config, Log */ 4 | 'use strict'; 5 | 6 | const args = require('yargs') 7 | .usage('Usage: $0 [args]') 8 | .option('c', { 9 | alias: 'config', 10 | default: '/etc/propsd/config.json', 11 | describe: 'Path to local propsd configuration', 12 | type: 'string' 13 | }) 14 | .option('colorize', { 15 | describe: 'Colorize log output', 16 | type: 'boolean', 17 | default: false 18 | }) 19 | .help('help') 20 | .argv; 21 | 22 | const deprecate = require('depd')('propsd'); 23 | const express = require('express'); 24 | const HTTP = require('http'); 25 | const Path = require('path'); 26 | const Logger = require('../lib/logger'); 27 | 28 | const Properties = require('../lib/properties'); 29 | const Sources = require('../lib/sources'); 30 | const Metadata = require('../lib/source/metadata'); 31 | const Tags = require('../lib/source/tags'); 32 | const S3 = require('../lib/source/s3'); 33 | 34 | const app = express(); 35 | const server = HTTP.createServer(app); 36 | 37 | // Load nconf into the global namespace 38 | global.Config = require('nconf').env() 39 | .argv(); 40 | 41 | if (args.c) { 42 | Config.file(Path.resolve(process.cwd(), args.c)); 43 | } 44 | Config.defaults(require('../config/defaults.json')); 45 | 46 | global.Log = Logger.attach(Config.get('log:level'), Config.get('log:filename')); 47 | 48 | // Add request logging middleware 49 | if (Config.get('log:access')) { 50 | deprecate('Separate logging control for access logs has been deprecated and will be removed in a later version.'); 51 | } 52 | 53 | app.use(Logger.requests(Log, Config.get('log:access:level') || Config.get('log:level'))); 54 | 55 | const properties = new Properties(); 56 | const sources = new Sources(properties); 57 | 58 | // Add metadata and some statics 59 | properties.dynamic(new Metadata(Config.get('metadata')), 'instance'); 60 | properties.dynamic(new Tags(Config.get('tags')), 'instance:tags'); 61 | properties.static(Config.get('properties')); 62 | 63 | // Create the Index source 64 | sources.index(new S3('index', Config.get('index'))); 65 | 66 | // Go! 67 | sources.initialize(); 68 | 69 | // Register endpoints 70 | require('../lib/control/v1/core').attach(app, sources); 71 | require('../lib/control/v1/properties').attach(app, properties); 72 | require('../lib/control/v1/conqueso').attach(app, properties); 73 | 74 | // Instantiate server and start it 75 | const host = Config.get('service:hostname'); 76 | const port = Config.get('service:port'); 77 | 78 | server.listen(port, host, () => { 79 | Log.log('INFO', `Listening on ${host}:${port}`); 80 | }); 81 | -------------------------------------------------------------------------------- /src/config/defaults.json: -------------------------------------------------------------------------------- 1 | { 2 | "index": { 3 | "path": "index.json", 4 | "interval": 30000, 5 | "region": "us-east-1" 6 | }, 7 | "service": { 8 | "port": 9100, 9 | "hostname": "127.0.0.1" 10 | }, 11 | "log": { 12 | "level": "info", 13 | "json": true 14 | }, 15 | "consul": { 16 | "host": "127.0.0.1", 17 | "port": 8500, 18 | "secure": false 19 | }, 20 | "tokend": { 21 | "host": "127.0.0.1", 22 | "port": 4500, 23 | "interval": 300000, 24 | "cacheTTL": 300000 25 | }, 26 | "metadata": { 27 | "host": "169.254.169.254", 28 | "interval": 30000 29 | }, 30 | "tags": { 31 | "interval": 300000 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/config/dev.json: -------------------------------------------------------------------------------- 1 | { 2 | "index": { 3 | "endpoint": "http://127.0.0.1:4569", 4 | "bucket": "propsd-s3", 5 | "path": "index.json" 6 | }, 7 | "metadata": { 8 | "host": "127.0.0.1:8080" 9 | }, 10 | "log": { 11 | "level": "info", 12 | "json": false 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/lib/control/v1/conqueso.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const flatten = require('flat'); 4 | const clone = require('clone'); 5 | 6 | const STATUS_CODES = require('../../util/status-codes'); 7 | 8 | /** 9 | * Format the given data as Java properties 10 | * 11 | * @param {Object} data 12 | * @return {String} 13 | */ 14 | function makeJavaProperties(data) { 15 | const results = []; 16 | 17 | for (const key in data) { 18 | if (data.hasOwnProperty(key)) { 19 | results.push(key + '=' + data[key]); 20 | } 21 | } 22 | 23 | return results.join('\n'); 24 | } 25 | 26 | /** 27 | * Converts Consul addresess to Conqueso addresses 28 | * Warning! Original Consul properties are removed 29 | * 30 | * Addresses returned by the Consul plugin are formatted as 31 | * { 32 | * "consul.service.addresses": ["x.x.x.x", "y.y.y.y"] 33 | * } 34 | * 35 | * Addresses returned by Conqueso are formatted as 36 | * { 37 | * "conqueso.service.ips": "x.x.x.x, y.y.y.y" 38 | * } 39 | * 40 | * @param {Object} properties Properties as returned from Storage#properties 41 | * @return {Object} Properties with Consul address converted to Conqueso ones 42 | */ 43 | function translateConquesoAddresses(properties) { 44 | if (properties.consul) { 45 | Object.keys(properties.consul).forEach((service) => { 46 | const cluster = properties.consul[service].cluster; 47 | 48 | /* eslint-disable no-param-reassign */ 49 | properties[`conqueso.${cluster}.ips`] = properties.consul[service].addresses.join(','); 50 | }); 51 | delete properties.consul; 52 | } 53 | 54 | /* eslint-enable no-param-reassign */ 55 | return properties; 56 | } 57 | 58 | /** 59 | * Format the given properties as Conqueso properties 60 | * 61 | * @param {Object} properties Properties as returned from Storage#properties 62 | * @return {String} Flattened Java properties as returned by Conqueso 63 | */ 64 | function makeConquesoProperties(properties) { 65 | let results = clone(properties); 66 | 67 | // Remove properties that came from the EC2 metadata API. 68 | delete results.instance; 69 | delete results.tags; 70 | 71 | results = translateConquesoAddresses(results); 72 | results = flatten(results); 73 | 74 | return results; 75 | } 76 | 77 | /** 78 | * Conqueso compatible API 79 | * 80 | * @param {Express.App} app 81 | * @param {Properties} storage 82 | */ 83 | function Conqueso(app, storage) { 84 | // Conqueso compatible APIs are defined before the generic catch all route. 85 | app.get('/v1/conqueso/api/roles/:role/properties/:property', (req, res) => { 86 | const property = req.params.property; 87 | 88 | storage.properties.then((props) => { 89 | const properties = makeConquesoProperties(props); 90 | 91 | res.set('Content-Type', 'text/plain'); 92 | 93 | if (property && (properties.hasOwnProperty(property))) { 94 | res.end(String(properties[property])); 95 | } else { 96 | res.end(); 97 | } 98 | }); 99 | }); 100 | 101 | // Handle any other requests by returning all properites. 102 | const route = app.route('/v1/conqueso*'); 103 | const allowedMethods = 'GET,POST,PUT,OPTIONS'; 104 | 105 | /** 106 | * Sends 405 response with 'Allow' header to any disallowed HTTP methods 107 | * @param {app.request} req 108 | * @param {app.response} res 109 | */ 110 | function methodNotAllowed(req, res) { 111 | res.set('Allow', allowedMethods); 112 | res.status(STATUS_CODES.METHOD_NOT_ALLOWED); 113 | res.end(); 114 | } 115 | 116 | route.get((req, res) => { 117 | storage.properties.then((props) => { 118 | res.set('Content-Type', 'text/plain'); 119 | res.end(makeJavaProperties(makeConquesoProperties(props))); 120 | }); 121 | }); 122 | 123 | // Express defaults to using the GET route for HEAD requests. 124 | // So we need to explicitly reject HEAD request. 125 | route.head(methodNotAllowed); 126 | 127 | route.post((req, res) => { 128 | res.end(); 129 | }); 130 | 131 | route.put((req, res) => { 132 | res.end(); 133 | }); 134 | 135 | route.options((req, res) => { 136 | res.set('Allow', allowedMethods); 137 | res.end(); 138 | }); 139 | 140 | // Reject anything else e.g. DELETE, TRACE, etc. 141 | route.all(methodNotAllowed); 142 | } 143 | 144 | exports.attach = Conqueso; 145 | -------------------------------------------------------------------------------- /src/lib/control/v1/core.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | let VERSION; 4 | 5 | try { 6 | VERSION = require('../../../version').version; 7 | } catch (ex) { 8 | VERSION = '0.0.0'; 9 | } 10 | 11 | const STATUS_CODES = require('../../util/status-codes'); 12 | const Source = require('../../source/common'); 13 | 14 | const started = Date.now(); 15 | 16 | /** 17 | * Core API 18 | * 19 | * @param {Express.app} app 20 | * @param {Sources} sources 21 | */ 22 | function Core(app, sources) { 23 | const routes = { 24 | health: app.route('/v1/health'), 25 | status: app.route('/v1/status') 26 | }; 27 | const allowedMethods = 'GET'; 28 | 29 | /** 30 | * Sets headers and status for routes that should return a 405 31 | * @param {Express.req} req 32 | * @param {Express.res} res 33 | */ 34 | const methodNotAllowed = (req, res) => { 35 | res.set('Allow', allowedMethods); 36 | res.status(STATUS_CODES.METHOD_NOT_ALLOWED); 37 | res.end(); 38 | }; 39 | 40 | routes.health.get((req, res) => { 41 | const health = sources.health(); 42 | const plugins = {}; 43 | 44 | health.sources.forEach((source) => { 45 | if (Object.keys(plugins).indexOf(source.type) < 0) { 46 | plugins[source.type] = 1; 47 | } else { 48 | plugins[source.type] += 1; 49 | } 50 | }); 51 | 52 | res.status(health.code); 53 | res.json({ 54 | status: health.code, 55 | uptime: Date.now() - started, 56 | plugins, 57 | version: VERSION 58 | }); 59 | }); 60 | 61 | routes.status.get((req, res) => { 62 | const health = sources.health(); 63 | 64 | const v1sources = health.sources.map((source) => Object.assign(source, {status: source.ok ? 'okay' : 'fail'})); 65 | const v1indices = health.indices.map((index) => Object.assign(index, {running: index.state !== Source.CREATED})); 66 | 67 | res.status(health.code); 68 | res.json({ 69 | status: health.code, 70 | uptime: Date.now() - started, 71 | version: VERSION, 72 | index: v1indices[0], 73 | indices: v1indices, 74 | sources: v1sources 75 | }); 76 | }); 77 | 78 | // All other METHODs should return a 405 with an 'Allow' header 79 | for (const r in routes) { 80 | if (routes.hasOwnProperty(r)) { 81 | routes[r].all(methodNotAllowed); 82 | } 83 | } 84 | } 85 | 86 | exports.attach = Core; 87 | -------------------------------------------------------------------------------- /src/lib/control/v1/properties.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const getNestedProperty = require('../../util').getNestedProperty; 4 | const STATUS_CODES = require('../../util/status-codes'); 5 | 6 | /** 7 | * JSON output API 8 | * 9 | * @param {Express.App} app 10 | * @param {Properties} storage 11 | */ 12 | exports.attach = function attach(app, storage) { 13 | app.get('/v1/properties/:property*', function handler(req, res, next) { 14 | storage.properties.then((properties) => { 15 | const prop = req.params.property; 16 | 17 | Log.log('INFO', 'Params: ', req.params); 18 | 19 | if (!properties[prop]) { 20 | return next(new Error(`Property ${prop} not found`)); 21 | } 22 | 23 | const value = properties[prop]; 24 | const extra = req.params[0].split('/').filter(Boolean); 25 | 26 | res.json(getNestedProperty(value, Array.from(extra))); 27 | }); 28 | }); 29 | 30 | app.get('/v1/properties*', function handler(req, res) { 31 | storage.properties.then((properties) => { 32 | res.json(properties); 33 | }); 34 | }); 35 | 36 | app.use('/v1/properties/:property*', (err, req, res, next) => { // eslint-disable-line no-unused-vars 37 | Log.log('ERROR', err); 38 | res.status(STATUS_CODES.METHOD_NOT_ALLOWED).json({}); 39 | }); 40 | }; 41 | -------------------------------------------------------------------------------- /src/lib/logger.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const deprecate = require('depd')('propsd'); 4 | const Winston = require('winston'); 5 | const expressWinston = require('express-winston'); 6 | 7 | /** 8 | * Create a logger instance 9 | * @param {string} level 10 | * @param {string} filename 11 | * @returns {Winston.Logger} 12 | * @constructor 13 | */ 14 | function Logger(level, filename) { 15 | const logLevel = level.toUpperCase() || 'INFO'; 16 | 17 | /* eslint-disable rapid7/static-magic-numbers */ 18 | const javaLogLevels = { 19 | levels: { 20 | ERROR: 0, 21 | WARN: 1, 22 | INFO: 2, 23 | VERBOSE: 3, 24 | DEBUG: 4, 25 | SILLY: 5 26 | }, 27 | colors: { 28 | ERROR: 'red', 29 | WARN: 'yellow', 30 | INFO: 'green', 31 | DEBUG: 'blue' 32 | } 33 | }; 34 | 35 | /* eslint-enable rapid7/static-magic-numbers */ 36 | 37 | const logger = new Winston.Logger({ 38 | level: logLevel, 39 | levels: javaLogLevels.levels, 40 | colors: javaLogLevels.colors, 41 | transports: [ 42 | new Winston.transports.Console({ 43 | timestamp: true, 44 | json: Config.get('log:json'), 45 | stringify: Config.get('log:json'), 46 | colorize: Config.get('colorize') 47 | }) 48 | ] 49 | }); 50 | 51 | if (filename) { 52 | logger.add(Winston.transports.File, {filename, level}); 53 | deprecate('The file transport has been deprecated and will be removed in a later version'); 54 | } 55 | 56 | return logger; 57 | } 58 | 59 | /** 60 | * Generates middleware for Express to log incoming requests 61 | * @param {Winston.Logger} logger 62 | * @param {string} level 63 | * @returns {expressWinston.logger} 64 | * @constructor 65 | */ 66 | function RequestLogger(logger, level) { 67 | const logLevel = level.toUpperCase() || 'INFO'; 68 | 69 | return expressWinston.logger({ 70 | winstonInstance: logger, 71 | expressFormat: false, 72 | msg: '{{req.method}} {{req.url}} {{res.statusCode}} {{res.responseTime}}ms', 73 | level: logLevel, 74 | baseMeta: {sourceName: 'request'} 75 | }); 76 | } 77 | 78 | exports.attach = Logger; 79 | exports.requests = RequestLogger; 80 | -------------------------------------------------------------------------------- /src/lib/properties.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const EventEmitter = require('events').EventEmitter; 3 | const Layer = require('./properties/layer'); 4 | const View = require('./properties/view'); 5 | const TokendTransformer = require('./transformers/tokend'); 6 | const Immutable = require('immutable'); 7 | const isPlainObject = require('lodash.isplainobject'); 8 | 9 | /* eslint-disable eqeqeq */ 10 | /** 11 | * Deep-merge one Object into another. Do _not_ deep merge anything that isn't explicitly 12 | * a first-order instance of Object. 13 | * 14 | * @param {Object} destination The destination of the merge operation. This object is mutated 15 | * @param {Object} source The source that properties are merged from 16 | * @return {Object} The destination object 17 | */ 18 | const merge = (destination, source) => { 19 | // Ensure that the destination value is an Object. 20 | const dest = isPlainObject(destination) ? destination : {}; 21 | 22 | // Only merge source if it's an Object. 23 | if (!isPlainObject(source)) { 24 | return dest; 25 | } 26 | 27 | Object.keys(source).forEach((key) => { 28 | // Ignore null and undefined source values. `== null` covers both 29 | if (source[key] == null) { 30 | return; 31 | } 32 | 33 | // Is this an Object (but not something that inherits Object)? 34 | if (Object.getPrototypeOf(source[key]) === Object.prototype) { 35 | // Recursively merge source Object into destination 36 | dest[key] = merge(dest[key], source[key]); 37 | 38 | return; 39 | } 40 | 41 | dest[key] = source[key]; 42 | }); 43 | 44 | return dest; 45 | }; 46 | /* eslint-enable eqeqeq */ 47 | 48 | /** 49 | * Recursively traverses a layer namespace and sets the value at the corresponding place in the object 50 | * @param {Object} destination The destination of the merge operation. 51 | * @param {Array} namespaceArray An array of keys (namespaces) to traverse 52 | * @param {Object} source The source that properties are merged from 53 | * @return {Object} 54 | */ 55 | const recusiveNamespaceMerge = (destination, namespaceArray, source) => { 56 | const nextNamespace = namespaceArray.shift(); 57 | const dest = isPlainObject(destination) ? destination : {}; 58 | 59 | if (namespaceArray.length) { 60 | dest[nextNamespace] = recusiveNamespaceMerge(dest[nextNamespace], namespaceArray, source); 61 | 62 | return dest; 63 | } 64 | 65 | dest[nextNamespace] = merge(dest[nextNamespace], source); 66 | 67 | return dest; 68 | }; 69 | 70 | /** 71 | * A Properties instance manages multiple statically configured layers, 72 | * and an active View instance. 73 | */ 74 | class Properties extends EventEmitter { 75 | /** 76 | * Constructor 77 | */ 78 | constructor() { 79 | super(); 80 | 81 | this.initialized = false; 82 | 83 | this.layers = []; 84 | this._properties = Immutable.Map(); 85 | this.active = new View(this); 86 | this.tokendTransformer = new TokendTransformer(); 87 | } 88 | 89 | /** 90 | * Concatenate layers' sources with the active view's sources 91 | * 92 | * @return {Array} 93 | */ 94 | get sources() { 95 | return [] 96 | 97 | // Get Dynamic layers' source instances 98 | .concat(this.layers.map((layer) => layer.source).filter((source) => !!source)) 99 | 100 | // Add the active View's sources. 101 | // NOTE: This is a shallow copy and only copies object references to a new array. 102 | // DO NOT USE THIS GETTER TO PERFORM ANY MUTATING ACTIVITIES. 103 | .concat(this.active.sources.slice().reverse()); 104 | } 105 | 106 | /** 107 | * Return a transformed set of properties 108 | * @return {Promise} 109 | */ 110 | get properties() { 111 | return this.tokendTransformer 112 | .transform(this._properties).then((transformedProperties) => { // eslint-disable-line arrow-body-style 113 | return Immutable.Map(this._properties).mergeDeep(transformedProperties).toJS(); 114 | }); 115 | } 116 | 117 | /** 118 | * Register a dynamic Source layer 119 | * 120 | * @param {Source} source 121 | * @param {String} namespace 122 | */ 123 | dynamic(source, namespace) { 124 | this.layers.push(new Layer.Dynamic(source, namespace)); 125 | } 126 | 127 | /** 128 | * Register a static layer 129 | * 130 | * @param {Object} properties 131 | * @param {String} namespace 132 | */ 133 | static(properties, namespace) { 134 | this.layers.push(new Layer.Static(properties, namespace)); 135 | } 136 | 137 | /** 138 | * Instantiate a new View 139 | * 140 | * @param {Array} sources An optional set of Sources to be passed to the new View 141 | * @return {View} 142 | */ 143 | view(sources) { 144 | return new View(this, sources); 145 | } 146 | 147 | /** 148 | * Initialize persistent (Dynamic and Static) layers. 149 | * 150 | * @return {Promise} resolves after a build has completed 151 | */ 152 | initialize() { 153 | if (this.initialized) { 154 | return Promise.resolve(this); 155 | } 156 | this.initialized = true; 157 | 158 | return Promise.all( 159 | this.layers.concat(this.tokendTransformer).map((source) => source.initialize()) 160 | ).then((sources) => { 161 | // Once initialized, watch for sources' update events 162 | sources.forEach((source) => { 163 | source.on('update', () => this.build()); 164 | }); 165 | 166 | return this.build(); 167 | }); 168 | } 169 | 170 | /** 171 | * Flatten layers and view's sources into one properties object 172 | * 173 | * @return {Promise} 174 | */ 175 | build() { 176 | // return after starting building timeout 177 | const built = new Promise((resolve) => this.once('build', () => resolve(this))); 178 | 179 | if (this._building) { 180 | return built; 181 | } 182 | 183 | // Block building for a hold-down period to let multiple updates propagate 184 | this._building = setTimeout(() => { 185 | // Merge layers into their own object. This can be consumed as an input by 186 | // template renderers. 187 | const persistent = this.layers.reduce((properties, layer) => { 188 | if (!layer.namespace) { 189 | return merge(properties, layer.properties); 190 | } 191 | 192 | let namespace = layer.namespace.split(':'); 193 | const namespaceRoot = namespace.shift(); 194 | 195 | if (namespace.length > 0) { 196 | properties[namespaceRoot] = recusiveNamespaceMerge(properties[namespaceRoot], namespace, layer.properties); 197 | } else { 198 | properties[namespaceRoot] = merge(properties[namespaceRoot], layer.properties); 199 | } 200 | 201 | return properties; 202 | }, {}); 203 | 204 | this.persistent = persistent; 205 | this._properties = merge( 206 | this.active.sources.reduce((properties, source) => merge(properties, source.properties), {}), 207 | persistent 208 | ); 209 | 210 | this.emit('build', this.properties); 211 | delete this._building; 212 | }, Properties.BUILD_HOLD_DOWN); 213 | 214 | return built; 215 | } 216 | } 217 | 218 | // Build hold-down timeout 219 | Properties.BUILD_HOLD_DOWN = 1000; // eslint-disable-line rapid7/static-magic-numbers 220 | 221 | Properties.Layer = Layer; 222 | Properties.View = View; 223 | Properties.merge = merge; 224 | 225 | module.exports = Properties; 226 | -------------------------------------------------------------------------------- /src/lib/properties/layer.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const EventEmitter = require('events').EventEmitter; 3 | 4 | /** 5 | * Base class for Static and Dynamic layers 6 | * 7 | * @class Layer 8 | * @extends EventEmitter 9 | */ 10 | class Layer extends EventEmitter { 11 | /** 12 | * Constructor 13 | * @param {String} namespace 14 | */ 15 | constructor(namespace) { 16 | super(); 17 | 18 | this.namespace = namespace; 19 | } 20 | 21 | /** 22 | * Initialize hook for layers that require it 23 | * 24 | * @return {Promise} 25 | */ 26 | initialize() { 27 | return Promise.resolve(this); 28 | } 29 | } 30 | 31 | /** 32 | * A static Layer wraps an Object of properties with an optional namespace 33 | * 34 | * @class Layer.Static 35 | * @extends Layer 36 | */ 37 | class Static extends Layer { 38 | /** 39 | * Constructor 40 | * @param {Object} properties 41 | * @param {String} namespace 42 | */ 43 | constructor(properties, namespace) { 44 | super(namespace); 45 | this.properties = properties; 46 | } 47 | } 48 | 49 | /** 50 | * A Dynamic layer wraps a Source with an optional namespace and forwards its 51 | * update events to a parent Properties object. 52 | * 53 | * @class Layer.Dynamic 54 | * @extends Layer 55 | */ 56 | class Dynamic extends Layer { 57 | /** 58 | * Constructor 59 | * @param {Source} source 60 | * @param {String} namespace 61 | */ 62 | constructor(source, namespace) { 63 | super(namespace); 64 | this.source = source; 65 | 66 | source.on('update', () => this.emit('update', this)); 67 | } 68 | 69 | /** 70 | * The dynamic Source properties 71 | * @return {Object} 72 | */ 73 | get properties() { 74 | return this.source.properties; 75 | } 76 | 77 | /** 78 | * Initialize the underlying source, resolving with the layer 79 | * 80 | * @return {Promise} 81 | */ 82 | initialize() { 83 | return this.source.initialize() 84 | .then(() => this); 85 | } 86 | } 87 | 88 | Layer.Static = Static; 89 | Layer.Dynamic = Dynamic; 90 | module.exports = Layer; 91 | -------------------------------------------------------------------------------- /src/lib/properties/view.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * A View aggregates the initialized events of multiple sources and safely 5 | * forwards their update events to a parent Properties object. 6 | */ 7 | class View { 8 | /** 9 | * Constructor 10 | * @param {Properties} parent 11 | * @param {Array} sources 12 | */ 13 | constructor(parent, sources) { 14 | this.parent = parent; 15 | this.sources = sources instanceof Array ? sources : []; 16 | 17 | /** 18 | * Create a statically bound method to register/deregister update events 19 | */ 20 | this.onUpdate = function onUpdate() { 21 | /** 22 | * This is safe because the onUpdate handler is only registered to 23 | * sources after they have all initialized, and is removed before the View 24 | * is replaced. 25 | */ 26 | this.parent.build(); 27 | }.bind(this); 28 | } 29 | 30 | /** 31 | * Register a source to the View. 32 | * 33 | * The only use of this method in the codebase is during testing. 34 | * 35 | * @deprecated 36 | * @param {*} source 37 | */ 38 | register(source) { 39 | this.sources.push(source); 40 | } 41 | 42 | /** 43 | * Initialize registered sources. Waits for all sources' `initialize` promises 44 | * to resolve, then deregisters the current active View's listeners from its 45 | * sources, then registers its own listeners for sources' update events, then 46 | * sets itself as the active view and builds the Properties instance. 47 | * 48 | * @return {Promise} 49 | */ 50 | activate() { 51 | const current = this.parent.active; 52 | 53 | // Already active 54 | if (this.parent.active === this) { 55 | return Promise.resolve(this); 56 | } 57 | this.parent.active = this; 58 | 59 | // Wait for all sources to initialize 60 | return Promise.all(this.sources.map((source) => source.initialize())) 61 | .then(() => { 62 | // Deregister current active view's update listeners 63 | current.destroy(); 64 | 65 | // Register for sources' update events 66 | this.sources.forEach((source) => { 67 | source.addListener('update', this.onUpdate); 68 | }); 69 | 70 | // Rebuild properties 71 | return this.parent.build().then(() => this); 72 | }); 73 | } 74 | 75 | /** 76 | * Deregister handler from sources' update events 77 | */ 78 | destroy() { 79 | this.sources.forEach((source) => { 80 | source.removeListener('update', this.onUpdate); 81 | }); 82 | } 83 | } 84 | module.exports = View; 85 | -------------------------------------------------------------------------------- /src/lib/source/consul.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /* global Log, Config */ 4 | const Client = require('consul'); 5 | const Source = require('./common'); 6 | const Parser = require('./consul/parser'); 7 | const each = require('./metadata/util').each; 8 | 9 | /** 10 | * Consul Source 11 | * 12 | * Expose Consul catalog to the property-set 13 | * 14 | * @class Source.Consul 15 | * @extends Source.Polling 16 | * 17 | * @param {Parser} parser 18 | */ 19 | class Consul extends Source.Polling(Parser) { // eslint-disable-line new-cap 20 | /** 21 | * Constructor 22 | * @param {String} name Source instance name 23 | * @param {Object} opts Options that can be set for the plugin 24 | */ 25 | constructor(name, opts) { 26 | const options = Object.assign({ 27 | host: Config.get('consul:host'), 28 | port: Config.get('consul:port'), 29 | secure: Config.get('consul:secure') 30 | }, opts); 31 | 32 | super(name, options); 33 | 34 | this.client = Client({ // eslint-disable-line new-cap 35 | host: options.host, 36 | port: options.port, 37 | secure: !!options.secure 38 | }); 39 | } 40 | 41 | /** 42 | * Get a list services from the Consul catalog. 43 | * 44 | * @param {Function} callback Function to call when finished 45 | * @private 46 | */ 47 | _fetch(callback) { 48 | this.client.catalog.service.list({ 49 | consistent: false, 50 | stale: true 51 | }, (err, result) => { 52 | if (err) { 53 | return callback(err); 54 | } 55 | 56 | const properties = {}; 57 | 58 | const work = (name, next) => { 59 | this.client.health.service({ 60 | service: name, 61 | passing: true, 62 | consistent: false, 63 | stale: true 64 | }, (error, data) => { 65 | if (error) { 66 | return next(error); 67 | } 68 | 69 | properties[name] = data; 70 | next(); 71 | }); 72 | }; 73 | 74 | const done = (error) => { 75 | callback(error, properties); 76 | }; 77 | 78 | each(Object.keys(result), work, done); 79 | }); 80 | } 81 | } 82 | 83 | module.exports = Consul; 84 | -------------------------------------------------------------------------------- /src/lib/source/consul/parser.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * Consul Parser 5 | * 6 | * @class Parser 7 | */ 8 | class Parser { 9 | /** 10 | * Constructor 11 | */ 12 | constructor() { 13 | this.properties = {}; 14 | this.sources = {}; 15 | } 16 | 17 | /** 18 | * Parse data from the Consul source. Keys in the data are names of services 19 | * in Consul. Values in the data are results from Consul's /v1/health/service API. 20 | * 21 | * @param {Object} data 22 | */ 23 | update(data) { 24 | const properties = {}; 25 | 26 | Object.keys(data).forEach((name) => { 27 | const addresses = []; 28 | 29 | data[name].forEach((info) => { 30 | // Prefer the service address, not the Consul agent address. 31 | if (info.Service && info.Service.Address) { 32 | addresses.push(info.Service.Address); 33 | } else if (info.Node && info.Node.Address) { 34 | addresses.push(info.Node.Address); 35 | } 36 | }); 37 | 38 | properties[name] = { 39 | cluster: name, 40 | addresses 41 | }; 42 | }); 43 | 44 | // This is required in order to continue name-spacing consul properties 45 | this.properties.consul = properties; 46 | } 47 | } 48 | 49 | module.exports = Parser; 50 | -------------------------------------------------------------------------------- /src/lib/source/metadata.js: -------------------------------------------------------------------------------- 1 | /* global Log, Config */ 2 | 'use strict'; 3 | 4 | const Crypto = require('crypto'); 5 | const Aws = require('aws-sdk'); 6 | 7 | const Source = require('./common'); 8 | const Parser = require('./metadata/parser'); 9 | const Util = require('./metadata/util'); 10 | const MetadataClient = require('../util/metadata-client'); 11 | 12 | /** 13 | * Metadata Source 14 | * 15 | * Expose EC2 metadata to the property-set 16 | * 17 | * @class Source.Metadata 18 | * @extends Source.Polling 19 | * 20 | * @param {Parser} parser 21 | */ 22 | class Metadata extends Source.Polling(Parser) { // eslint-disable-line new-cap 23 | /** 24 | * Constructor 25 | * @param {Object} opts 26 | * - {Number} interval Polling interval. Default 30s. 27 | */ 28 | constructor(opts) { 29 | // Inject defaults into options 30 | const options = Object.assign({ 31 | timeout: Metadata.DEFAULT_TIMEOUT, 32 | host: Metadata.DEFAULT_HOST 33 | }, opts); 34 | 35 | super('ec2-metadata', options); 36 | 37 | /** 38 | * Initialize the metadata-service client 39 | */ 40 | this.service = new MetadataClient({ 41 | timeout: options.timeout, 42 | host: options.host 43 | }); 44 | } 45 | 46 | /** 47 | * Metadata version 48 | * 49 | * Used to prepend paths for calls to the EC2 Metadata Service, 50 | * e.g. //meta-data/ami-id 51 | * 52 | * @returns {string} 53 | */ 54 | get version() { 55 | return this.constructor.version; 56 | } 57 | 58 | /** 59 | * Fetch implementation for EC2 Metadata 60 | * @param {Function} callback 61 | * @private 62 | */ 63 | _fetch(callback) { 64 | Util.traverse(this.version, Parser.paths, 65 | 66 | // Call `Metadata.request` for each path 67 | (path, cb) => this.service.request(path, cb), 68 | 69 | // Handle results of metadata tree traversal 70 | (err, data) => { 71 | if (err) { 72 | return callback(err); 73 | } 74 | 75 | let p = Promise.resolve(data); 76 | 77 | // Grab the ASG from the instance-id 78 | const instanceId = data['meta-data/instance-id']; 79 | const az = data['meta-data/placement/availability-zone']; 80 | 81 | if (instanceId && az) { 82 | const region = az.slice(0, -1); 83 | 84 | if (!this.parser.properties['auto-scaling-group']) { 85 | Log.log('DEBUG', 'Retrieving auto-scaling-group data'); 86 | p = new Promise((resolve, reject) => { 87 | (new Aws.AutoScaling({region})).describeAutoScalingInstances({InstanceIds: [instanceId]}, (err, d) => { 88 | if (err) { 89 | Log.log('ERROR', err); 90 | 91 | return reject(err); 92 | } 93 | resolve(d); 94 | }); 95 | }).then((d) => { 96 | const asg = d.AutoScalingInstances.map((instance) => instance.AutoScalingGroupName); 97 | 98 | // No reason it should be longer than 1 but worth a check 99 | if (asg.length > 1) { 100 | Log.log('WARN', `Instance id ${instanceId} is in multiple auto-scaling groups`, asg); 101 | } 102 | 103 | // Check to see if an instance is actually part of an ASG 104 | if (asg.length !== 0) { 105 | data['auto-scaling-group'] = asg[0]; 106 | } 107 | 108 | return data; 109 | }).catch(() => data); 110 | } else { 111 | Log.log('DEBUG', 'Using cached auto-scaling-group data.'); 112 | data['auto-scaling-group'] = this.properties['auto-scaling-group']; 113 | p = Promise.resolve(data); 114 | } 115 | } 116 | 117 | p.then((data) => { 118 | // Detect change by hashing the fetched data 119 | const hash = Crypto.createHash('sha1'); 120 | const paths = Object.keys(data); 121 | 122 | Log.log('DEBUG', `Source/Metadata: Fetched ${paths.length} paths from the ec2-metadata service`, this.status()); 123 | 124 | paths.sort().forEach((key) => { 125 | hash.update(`${key}:${data[key]}`); 126 | }); 127 | 128 | const signature = hash.digest('base64'); 129 | 130 | if (this._state === signature) { 131 | return callback(null, Source.NO_UPDATE); 132 | } 133 | 134 | this._state = signature; 135 | callback(null, data); 136 | }); 137 | } 138 | ); 139 | } 140 | } 141 | 142 | Metadata.version = 'latest'; 143 | Metadata.DEFAULT_TIMEOUT = 500; // eslint-disable-line rapid7/static-magic-numbers 144 | Metadata.DEFAULT_HOST = '169.254.169.254:80'; 145 | 146 | /* Export */ 147 | module.exports = Metadata; 148 | -------------------------------------------------------------------------------- /src/lib/source/metadata/parser.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const Path = require('path'); 3 | 4 | /* eslint-disable no-param-reassign */ 5 | 6 | /** 7 | * Metadata Parser 8 | * 9 | * @class MetadataParser 10 | * 11 | */ 12 | class Parser { 13 | /** 14 | * Constructor 15 | */ 16 | constructor() { 17 | this.properties = {}; 18 | } 19 | 20 | /** 21 | * Parse the property set and update the parser's properties and sources 22 | * @param {Object} data 23 | */ 24 | update(data) { 25 | const properties = {}; 26 | 27 | // Call mapping functions 28 | Object.keys(Parser.mappings).forEach((path) => { 29 | Parser.mappings[path](path, data, properties); 30 | }); 31 | 32 | // Sweep out undefined values 33 | Object.keys(properties).forEach((prop) => { 34 | if (typeof properties[prop] === 'undefined') { 35 | delete properties[prop]; 36 | } 37 | }); 38 | 39 | this.properties = properties; 40 | } 41 | 42 | /** 43 | * Get relevant metadata paths 44 | * @return {Array} 45 | */ 46 | static get paths() { 47 | return Object.keys(this.mappings); 48 | } 49 | } 50 | module.exports = Parser; 51 | 52 | /** 53 | * Helper to insert a value from a metadata path at it's basename in properties 54 | * 55 | * @param {String} path The property's Metadata API path 56 | * @param {Object} metadata Path/value mappings fro the Metadata API 57 | * @param {Object} properties The properties object 58 | */ 59 | function atBasename(path, metadata, properties) { 60 | properties[Path.basename(path)] = metadata[path]; 61 | } 62 | 63 | /** 64 | * Map Metadata API paths to properties. 65 | * 66 | * @type {Object} 67 | */ 68 | Parser.mappings = { 69 | 'meta-data/ami-id': atBasename, 70 | 'meta-data/placement/availability-zone': atBasename, 71 | 'meta-data/hostname': atBasename, 72 | 'meta-data/instance-id': atBasename, 73 | 'meta-data/instance-type': atBasename, 74 | 'meta-data/local-ipv4': atBasename, 75 | 'meta-data/local-hostname': atBasename, 76 | 'meta-data/public-hostname': atBasename, 77 | 'meta-data/public-ipv4': atBasename, 78 | 'meta-data/reservation-id': atBasename, 79 | 'meta-data/security-groups': atBasename, 80 | 81 | 'dynamic/instance-identity/document': (path, metadata, properties) => { 82 | // Return early if there's no data here 83 | if (!metadata[path]) { 84 | return; 85 | } 86 | 87 | if (!properties.identity) { 88 | properties.identity = {}; 89 | } 90 | 91 | properties.identity.document = metadata[path]; 92 | const identity = JSON.parse(metadata[path]); 93 | 94 | properties.account = identity.accountId; 95 | properties.region = identity.region; 96 | }, 97 | 'dynamic/instance-identity/pkcs7': (path, metadata, properties) => { 98 | // Return early if there's no data here 99 | if (!metadata[path]) { 100 | return; 101 | } 102 | 103 | if (!properties.identity) { 104 | properties.identity = {}; 105 | } 106 | 107 | properties.identity.pkcs7 = metadata[path]; 108 | }, 109 | 110 | 'meta-data/iam/security-credentials/': (path, metadata, properties) => { 111 | const match = new RegExp('^' + path); 112 | const roles = Object.keys(metadata).filter((p) => match.test(p)); 113 | 114 | // Instance does not have a Profile/Role 115 | if (roles.length === 0) { 116 | return; 117 | } 118 | 119 | properties['iam-role'] = Path.basename(roles[0]); 120 | const credentials = JSON.parse(metadata[roles[0]]); 121 | 122 | if (!properties.credentials) { 123 | properties.credentials = {}; 124 | } 125 | 126 | properties.credentials.lastUpdated = credentials.LastUpdated; 127 | properties.credentials.type = credentials.Type; 128 | properties.credentials.accessKeyId = credentials.AccessKeyId; 129 | properties.credentials.secretAccessKey = credentials.SecretAccessKey; 130 | properties.credentials.expires = credentials.Expiration; 131 | }, 132 | 133 | 'meta-data/mac': () => { }, 134 | 'meta-data/network/interfaces/macs/': (path, metadata, properties) => { 135 | const mac = metadata['meta-data/mac']; 136 | 137 | // Return early if there's no data here 138 | if (!mac) { 139 | return; 140 | } 141 | 142 | if (!properties.interface) { 143 | properties.interface = {}; 144 | } 145 | 146 | [ 147 | 'vpc-ipv4-cidr-block', 148 | 'subnet-ipv4-cidr-block', 149 | 'public-ipv4s', 150 | 'mac', 151 | 'local-ipv4s', 152 | 'interface-id' 153 | ].forEach((key) => { 154 | properties.interface[key] = metadata[Path.join(path, mac, key)]; 155 | }); 156 | 157 | properties['vpc-id'] = metadata[Path.join(path, mac, 'vpc-id')]; 158 | }, 159 | 'auto-scaling-group': atBasename 160 | }; 161 | -------------------------------------------------------------------------------- /src/lib/source/metadata/util.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const Path = require('path'); 3 | 4 | const DEFAULT_PARALLEL = 1; 5 | 6 | /** 7 | * Iterate over a list of tasks in parallel, passing one to each call of the 8 | * `work` function. When all tasks are complete, or an error is encountered, call the 9 | * `done` function, with an error if one occurred. 10 | * 11 | * This implementation iterates over a work-list non-destructively, without cloning it. 12 | * meaning that more tasks can be added safely while the work-loop is running. 13 | * 14 | * @param {Array} list Set of tasks to work on 15 | * @param {Function} work The operation to perform on each element of `list` 16 | * @param {Function} done Callback on error or completion 17 | * @param {Object} s { parallel: Number limit parallel tasks. Default 16 } 18 | */ 19 | function each(list, work, done, s) { 20 | const state = s || { 21 | parallel: DEFAULT_PARALLEL, 22 | error: false 23 | }; 24 | 25 | // Default values 26 | if (!Number(state.running)) { 27 | state.running = 0; 28 | } 29 | if (!Number(state.cursor)) { 30 | state.cursor = 0; 31 | } 32 | 33 | // No more items to process 34 | if (state.cursor >= list.length) { 35 | // Call done if this was the last branch 36 | if (state.running === 0 && !state.error) { 37 | done(); 38 | } 39 | 40 | return; 41 | } 42 | 43 | // Already enough tasks in flight 44 | if (state.running >= state.parallel) { 45 | return; 46 | } 47 | 48 | // Get an item off of the list, move up the cursor, and get a semaphore. 49 | const item = list[state.cursor]; 50 | 51 | // Obtain a semaphore 52 | state.running += 1; 53 | state.cursor += 1; 54 | 55 | // Branch parallel requests 56 | while (state.running < state.parallel && state.cursor < list.length) { 57 | each(list, work, done, state); 58 | } 59 | 60 | // Process this branch's task 61 | work(item, (err) => { 62 | // Release the semaphore 63 | state.running -= 1; 64 | 65 | // An error has occurred. Just bail. 66 | if (state.error) { 67 | return; 68 | } 69 | 70 | if (err) { 71 | state.error = true; 72 | 73 | return done(err); // eslint-disable-line consistent-return 74 | } 75 | 76 | // Iterate 77 | each(list, work, done, state); 78 | }); 79 | } 80 | exports.each = each; 81 | 82 | /** 83 | * Fetch all values from the EC2 metadata tree 84 | * 85 | * @param {String} version The Metadata API version to traverse 86 | * @param {Array} paths An initial array of paths to traverse 87 | * @param {Function} request(path, cb) Request handler. Called for each Metadata path 88 | * @param {Function} callback(err, paths) Return an error or a hash of paths and their values 89 | */ 90 | exports.traverse = function traverse(version, paths, request, callback) { 91 | const values = {}; 92 | 93 | each(paths, (path, next) => { 94 | request(Path.join('/', version, path), (err, data) => { 95 | if (err) { 96 | return next(err); 97 | } 98 | 99 | if (typeof data === 'undefined') { 100 | // If there's no data it means we should ignore that path 101 | return next(); 102 | } 103 | 104 | // This is a tree! Split new-line delimited strings into an array and add to paths 105 | if (path.slice(-1) === '/') { 106 | const items = data.trim().split('\n'); 107 | 108 | items.forEach((node) => paths.push(Path.join(path, node))); 109 | 110 | return next(); 111 | } 112 | 113 | // Remove leading `/` 114 | values[path] = data; 115 | next(); 116 | }); 117 | }, (err) => callback(err, values), null); 118 | }; 119 | -------------------------------------------------------------------------------- /src/lib/source/s3.js: -------------------------------------------------------------------------------- 1 | /* global Log, Config */ 2 | 'use strict'; 3 | 4 | const Aws = require('aws-sdk'); 5 | const Source = require('./common'); 6 | 7 | /** 8 | * Class to parse data returned from S3 9 | * 10 | * @class S3Parser 11 | */ 12 | class S3Parser { 13 | /** 14 | * Constructor 15 | */ 16 | constructor() { 17 | this.properties = {}; 18 | } 19 | 20 | /** 21 | * Parse the property set and update the parser's properties and sources 22 | * @param {Object} data 23 | */ 24 | update(data) { 25 | const object = JSON.parse(data.toString()); 26 | 27 | this.properties = object.properties || {}; 28 | this.sources = object.sources || []; 29 | } 30 | } 31 | 32 | /** 33 | * S3 Source 34 | * 35 | * Retrieve data from S3 and return it to the property-set 36 | * 37 | * @class Source.S3 38 | * @extends Source.Polling 39 | * 40 | * @param {Parser} parser 41 | */ 42 | class S3 extends Source.Polling(S3Parser) { // eslint-disable-line new-cap 43 | /** 44 | * Constructor 45 | * @param {String} name 46 | * @param {Object} opts 47 | */ 48 | constructor(name, opts) { 49 | // Inject defaults into options 50 | const options = Object.assign({ 51 | bucket: Config.get('index:bucket'), 52 | endpoint: Config.get('index:endpoint') 53 | }, opts); 54 | 55 | if (!options.hasOwnProperty('bucket') || !options.bucket) { 56 | throw new ReferenceError('Source/S3: Missing required parameter `bucket`!'); 57 | } 58 | 59 | if (!options.hasOwnProperty('path') || !options.path) { 60 | throw new ReferenceError('Source/S3: Missing required parameter `path`!'); 61 | } 62 | 63 | super(name, options); 64 | 65 | this.bucket = options.bucket; 66 | this.path = options.path; 67 | 68 | /** 69 | * Initialize the s3 client 70 | */ 71 | const config = {}; 72 | 73 | if (options.endpoint) { 74 | config.endpoint = new Aws.Endpoint(options.endpoint); 75 | config.s3ForcePathStyle = true; 76 | } else { 77 | config.region = Config.get('index:region'); 78 | } 79 | 80 | this.service = new Aws.S3(config); 81 | } 82 | 83 | /** 84 | * Get the S3 source status 85 | * @return {{name, type, ok, state, updated, resource, etag}|*} 86 | */ 87 | status() { 88 | const object = super.status(); 89 | 90 | object.resource = `s3://${this.bucket}/${this.path}`; 91 | object.etag = this._state; 92 | 93 | return object; 94 | } 95 | 96 | /** 97 | * 98 | * @param {Function} callback 99 | * @private 100 | */ 101 | _fetch(callback) { 102 | this.service.getObject({ 103 | Bucket: this.bucket, 104 | Key: this.path, 105 | IfNoneMatch: this._state 106 | }, (err, data) => { 107 | if (err) { 108 | if (err.code === 'NotModified') { 109 | return callback(null, Source.NO_UPDATE); 110 | } 111 | 112 | if (err.code === 'NoSuchKey') { 113 | return callback(null, Source.NO_EXIST); 114 | } 115 | 116 | return callback(err); 117 | } 118 | 119 | this._state = data.ETag; 120 | callback(null, data.Body); 121 | }); 122 | } 123 | } 124 | 125 | module.exports = S3; 126 | -------------------------------------------------------------------------------- /src/lib/source/tags.js: -------------------------------------------------------------------------------- 1 | /* global Log, Config */ 2 | 'use strict'; 3 | 4 | const Crypto = require('crypto'); 5 | const Aws = require('aws-sdk'); 6 | 7 | const Source = require('./common'); 8 | const Metadata = require('./metadata'); 9 | const MetadataClient = require('../util/metadata-client'); 10 | const Parser = require('./tags/parser'); 11 | 12 | /** 13 | * EC2 Tags Source 14 | * 15 | * Expose EC2 instance tags to the property-set 16 | * 17 | * @class Source.Tags 18 | * @extends Source.Polling 19 | * 20 | * @param {Parser} parser 21 | */ 22 | class Tags extends Source.Polling(Parser) { // eslint-disable-line new-cap 23 | /** 24 | * Constructor 25 | * @param {Object} opts 26 | */ 27 | constructor(opts) { 28 | super('ec2-tags', opts); 29 | 30 | const metadataOptions = Object.assign({ 31 | timeout: Metadata.DEFAULT_TIMEOUT, 32 | host: Metadata.DEFAULT_HOST 33 | }, Config.get('metadata'), opts); 34 | 35 | this._metadata = new MetadataClient(metadataOptions); 36 | } 37 | 38 | /** 39 | * Fetch implementation for EC2 tags API 40 | * @param {Function} callback 41 | * @private 42 | */ 43 | _fetch(callback) { 44 | const path = `/${Metadata.version}/dynamic/instance-identity/document`; 45 | 46 | new Promise((resolve) => { 47 | this._metadata.request(path, (err, data) => { 48 | if (data) { 49 | return resolve(data); 50 | } 51 | 52 | return callback(null, {}); 53 | }); 54 | }).then((data) => { 55 | let document; 56 | 57 | try { 58 | document = JSON.parse(data); 59 | } catch (e) { 60 | document = { 61 | instanceId: '', 62 | region: '' 63 | }; 64 | } 65 | 66 | return { 67 | instance: document.instanceId, 68 | region: document.region 69 | }; 70 | }).then((data) => { 71 | const client = new Aws.EC2({region: data.region}); 72 | const params = { 73 | Filters: [{Name: 'resource-id', Values: [data.instance]}] 74 | }; 75 | 76 | return new Promise((resolve) => { 77 | client.describeTags(params, (err, data) => { 78 | if (data) { 79 | return resolve(data); 80 | } 81 | 82 | return callback(null, {}); 83 | }); 84 | }); 85 | }).then((data) => { 86 | const hash = Crypto.createHash('sha1'); 87 | const tags = data.Tags; 88 | 89 | Log.log('DEBUG', `Source/Tags: Fetched ${tags.length} tags from the ec2 tags api`, this.status()); 90 | 91 | tags.sort().forEach((tag) => { 92 | hash.update(JSON.stringify(tag)); 93 | }); 94 | 95 | const signature = hash.digest('base64'); 96 | 97 | if (this._state === signature) { 98 | return callback(null, Source.NO_UPDATE); 99 | } 100 | 101 | this._state = signature; 102 | callback(null, data); 103 | }).catch((err) => { 104 | callback(err, null); 105 | }); 106 | } 107 | } 108 | 109 | module.exports = Tags; 110 | -------------------------------------------------------------------------------- /src/lib/source/tags/parser.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * EC2 Tags Parser 5 | * 6 | * @class TagsParser 7 | * 8 | */ 9 | class Parser { 10 | /** 11 | * Constructor 12 | */ 13 | constructor() { 14 | this.properties = {}; 15 | } 16 | 17 | /** 18 | * Parse the property set and update the parser's properties and sources 19 | * @param {Object} data 20 | */ 21 | update(data) { 22 | const properties = {}; 23 | 24 | if (data.hasOwnProperty('Tags')) { 25 | data.Tags.forEach((tag) => { 26 | properties[tag.Key] = tag.Value; 27 | }); 28 | } 29 | 30 | this.properties = properties; 31 | } 32 | } 33 | 34 | module.exports = Parser; 35 | -------------------------------------------------------------------------------- /src/lib/sources.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const EventEmitter = require('events').EventEmitter; 3 | const STATUS_CODES = require('./util/status-codes'); 4 | 5 | const Comparator = require('./sources/comparator'); 6 | const Index = require('./sources/iindex'); 7 | 8 | /** 9 | * A coordinating class that makes sure that Properties are resolved before 10 | * attempting to retrieve the Index. When the Index (or its underlying sources) 11 | * update, have Properties rebuild the view. 12 | * 13 | * @class Sources 14 | * @extends EventEmitter 15 | */ 16 | class Sources extends EventEmitter { 17 | /** 18 | * Constructor 19 | * @param {Properties} properties 20 | */ 21 | constructor(properties) { 22 | super(); 23 | 24 | this.setMaxListeners(Sources.MAX_LISTENERS); 25 | 26 | this.properties = properties; 27 | this.indices = []; 28 | 29 | this.initialized = false; 30 | this.current = new Index([], null); 31 | } 32 | 33 | /** 34 | * Add an index document source 35 | * 36 | * @param {Source} source 37 | */ 38 | index(source) { 39 | this.indices.push(source); 40 | } 41 | 42 | /** 43 | * Initialize the Properties instance, then all of the index sources, then 44 | * trigger the first update, then subscribe to future update events. 45 | * 46 | * @return {Promise} 47 | */ 48 | initialize() { 49 | // Instance is already initialized 50 | if (this.initialized) { 51 | return Promise.resolve(this); 52 | } 53 | 54 | // Resource is currently initializing. Resolve on initialized 55 | if (this.initializing) { 56 | return new Promise((resolve) => { 57 | this.once('initialized', () => resolve(this)); 58 | }); 59 | } 60 | 61 | // Resource is not yet initialized. Start initializing 62 | this.initializing = true; 63 | 64 | return this.properties.initialize() 65 | .then(() => Promise.all( 66 | this.indices.map((source) => source.initialize()) 67 | )) 68 | .then(() => this.update()) 69 | .then(() => { 70 | // Subscribe to indices' update events 71 | this.indices.forEach((source) => 72 | source.on('update', () => this.update())); 73 | 74 | // Subscribe to properties' build events once it's initialized 75 | this.properties.on('build', () => this.update()); 76 | 77 | this.initializing = false; 78 | this.initialized = true; 79 | this.emit('initialized', this); 80 | 81 | return this; 82 | }); 83 | } 84 | 85 | /** 86 | * Create a new Index, instantiate new sources, create and activate a new view, 87 | * then shutdown retired sources 88 | * 89 | * @return {Promise} 90 | */ 91 | update() { 92 | const updated = new Promise((resolve) => { 93 | this.once('_resolve_update', () => resolve(this)); 94 | }); 95 | 96 | if (this._updating) { 97 | return updated; 98 | } 99 | 100 | Log.log('INFO', 'Sources: Updating sources'); 101 | 102 | // Block for a hold-down period to let multiple updates propagate 103 | this._updating = setTimeout(() => { 104 | let configs = []; 105 | 106 | // Aggregate Source configurations from index sources 107 | this.indices.forEach((source) => { 108 | if (!(source.sources instanceof Array)) { 109 | Log.log('WARN', `Sources: Index source ${source.name} does not have any sources. Ignoring.`); 110 | 111 | return; 112 | } 113 | 114 | configs = configs.concat(source.sources); 115 | }); 116 | 117 | const next = new Index(configs, this.properties.persistent); 118 | const difference = Comparator.compare(this.current, next); 119 | 120 | this.current = difference.build(Sources.providers); 121 | 122 | // Nothing to see here. Don't trigger a view-update. 123 | if (!difference.changes) { 124 | delete this._updating; 125 | 126 | Log.log('INFO', 'Sources: Update successful, no changes.'); 127 | this.emit('_resolve_update', this); 128 | this.emit('noupdate', this); 129 | 130 | return; 131 | } 132 | 133 | // Build and activate a new view from the new Index, then clean up old sources. 134 | this.properties.view(next.ordered()).activate() 135 | .then(() => { 136 | difference.cleanup(); 137 | delete this._updating; 138 | 139 | Log.log('INFO', `Update successful, created ${difference.create.length} sources, ` + 140 | `shutting down ${difference.destroy.length} sources`); 141 | 142 | this.emit('_resolve_update', this); 143 | this.emit('update', this); 144 | }); 145 | }, Sources.UPDATE_HOLD_DOWN); 146 | 147 | return updated; 148 | } 149 | 150 | /** 151 | * Compute system status 152 | * 153 | * @return {Object} 154 | */ 155 | health() { 156 | const object = { 157 | code: STATUS_CODES.OK, 158 | status: 'OK' 159 | }; 160 | 161 | object.indices = this.indices.map((source) => { 162 | // TODO This logic is fairly ham-fisted right now. It'll work because nothing 163 | // is setting `state` to WARNING at the moment. When we do start supporting a 164 | // WARNING state, this will have to become aware that OK < WARING < ERROR. 165 | if (!source.ok) { 166 | object.code = STATUS_CODES.INTERNAL_SERVER_ERROR; 167 | object.status = source.state; 168 | } 169 | 170 | return source.status(); 171 | }); 172 | 173 | object.sources = this.properties.sources.map((source) => { 174 | if (!source.ok) { 175 | object.code = STATUS_CODES.INTERNAL_SERVER_ERROR; 176 | object.status = source.state; 177 | } 178 | 179 | return source.status(); 180 | }); 181 | 182 | return object; 183 | } 184 | } 185 | 186 | // Registered Source providers 187 | Sources.providers = { 188 | s3: require('./source/s3'), 189 | consul: require('./source/consul') 190 | }; 191 | 192 | // Update hold-down timeout 193 | Sources.UPDATE_HOLD_DOWN = 1000; // eslint-disable-line rapid7/static-magic-numbers 194 | Sources.MAX_LISTENERS = 100; // eslint-disable-line rapid7/static-magic-numbers 195 | 196 | Sources.Comparator = Comparator; 197 | Sources.Index = Index; 198 | module.exports = Sources; 199 | -------------------------------------------------------------------------------- /src/lib/sources/comparator.js: -------------------------------------------------------------------------------- 1 | /* global Log */ 2 | 'use strict'; 3 | const DeepEqual = require('deep-equal'); 4 | 5 | /** 6 | * Class to compare two indices and build the next Index. 7 | * 8 | * @class Comparator 9 | */ 10 | class Comparator { 11 | /** 12 | * Compare the configurations of two indices 13 | * 14 | * @param {Index} current 15 | * @param {Index} next 16 | * @return {Comparator} 17 | */ 18 | static compare(current, next) { 19 | const difference = new this(current, next); 20 | 21 | // Find new sources 22 | difference.create = next.order.filter((name) => 23 | !current.configurations.hasOwnProperty(name)); 24 | 25 | // Find removed sources 26 | difference.destroy = current.order.filter((name) => 27 | !next.configurations.hasOwnProperty(name)); 28 | 29 | // Find updated or unchanged sources 30 | next.order.forEach((name) => { 31 | if (!current.configurations.hasOwnProperty(name)) { 32 | // New source. Ignore. 33 | return; 34 | } 35 | 36 | const a = current.configurations[name]; 37 | const b = next.configurations[name]; 38 | 39 | // Same same. 40 | if (this.equals(a, b)) { 41 | difference.copy.push(name); 42 | 43 | return; 44 | } 45 | 46 | difference.destroy.push(name); 47 | difference.create.push(name); 48 | }); 49 | 50 | return difference; 51 | } 52 | 53 | /** 54 | * Class getter for the default comparison algorithm 55 | * 56 | * @return {Function} 57 | */ 58 | static get equals() { 59 | return DeepEqual; 60 | } 61 | 62 | /** 63 | * Constructor 64 | * @param {Index} current 65 | * @param {Index} next 66 | */ 67 | constructor(current, next) { 68 | this.current = current; 69 | this.next = next; 70 | 71 | this.create = []; 72 | this.copy = []; 73 | this.destroy = []; 74 | } 75 | 76 | /** 77 | * Return whether there are changes between the current Index and the next Index 78 | * @return {boolean} 79 | */ 80 | get changes() { 81 | return this.create.length !== 0 || this.destroy.length !== 0; 82 | } 83 | 84 | /** 85 | * Create or copy source instances for the NEXT index 86 | * 87 | * @param {Object} providers A hash of type-to-class for Sources 88 | * @return {Index} The NEXT index 89 | */ 90 | build(providers) { 91 | this.create.forEach((name) => { 92 | const config = this.next.configurations[name]; 93 | 94 | if (!providers.hasOwnProperty(config.type)) { // eslint-disable-line no-use-before-define 95 | Log.log('WARN', `Source type ${config.type} does not have a registered provider! Ignoring.`); 96 | 97 | return; 98 | } 99 | 100 | const Type = providers[config.type]; 101 | 102 | this.next.sources[name] = new Type(config.name, config.parameters); 103 | }); 104 | 105 | this.copy.forEach((name) => { 106 | this.next.sources[name] = this.current.sources[name]; 107 | }); 108 | 109 | return this.next; 110 | } 111 | 112 | /** 113 | * Shutdown removed sources in the CURRENT index 114 | * 115 | * @return {Index} The CURRENT index 116 | */ 117 | cleanup() { 118 | this.destroy.forEach((name) => { 119 | this.current.sources[name].shutdown(); 120 | }); 121 | 122 | return this.current; 123 | } 124 | } 125 | module.exports = Comparator; 126 | -------------------------------------------------------------------------------- /src/lib/sources/iindex.js: -------------------------------------------------------------------------------- 1 | /* global Log */ 2 | 'use strict'; 3 | const Crypto = require('crypto'); 4 | const StringTemplate = require('../string-template'); 5 | 6 | /** 7 | * Manage a version of the dynamic source index 8 | */ 9 | class Index { 10 | /** 11 | * Constructor 12 | * @param {Array} configs Array of configuration parameter objects for sources 13 | * @param {Object} properties Lookup for interpolated values in configuration objects 14 | */ 15 | constructor(configs, properties) { 16 | this.configurations = {}; 17 | this.sources = {}; 18 | this.order = []; 19 | 20 | // Store the order that source configurations were defined in 21 | configs.forEach((config) => { 22 | if (!config.hasOwnProperty('type') || !config.type) { 23 | Log.log('WARN', 'Source configuration does not have a `type` parameter! Ignoring.'); 24 | 25 | return; 26 | } 27 | 28 | // If the config object doesn't have a name, generate one... Begrudgingly. 29 | if (!config.hasOwnProperty('name') || !config.name) { 30 | config.name = config.type + ':' + // eslint-disable-line no-param-reassign 31 | Crypto.randomBytes(8).toString('hex'); // eslint-disable-line rapid7/static-magic-numbers 32 | 33 | Log.log('WARN', `Source configuration does not have a \`name\` parameter! Generated ${config.name}`); 34 | } 35 | 36 | // Hash configuration objects by name, and store original order 37 | if (properties instanceof Object) { 38 | // If a properties object was provided, pass config through StringTemplate 39 | try { 40 | this.configurations[config.name] = StringTemplate.render(config, properties); 41 | } catch (err) { 42 | // Ignore configurations that we can't perform string interpolation upon 43 | Log.log('WARN', `Unable to interpolate variables in configuration for ${config.name}: ${err.message}. Ignoring!`); 44 | 45 | return; 46 | } 47 | } else { 48 | this.configurations[config.name] = config; 49 | } 50 | 51 | this.order.push(config.name); 52 | }); 53 | } 54 | 55 | /** 56 | * Return an ordered set of the Source instances for this index 57 | * 58 | * @return {Array} 59 | */ 60 | ordered() { 61 | return this.order.filter((name) => this.sources.hasOwnProperty(name) && !!this.sources[name]) 62 | .map((name) => this.sources[name]); 63 | } 64 | } 65 | module.exports = Index; 66 | -------------------------------------------------------------------------------- /src/lib/string-template.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * Use mustache template-style strings to interpolate data from an object. 5 | * 6 | * @class StringTemplate 7 | */ 8 | class StringTemplate { 9 | /** 10 | * Constructor 11 | * @param {String} template The template input 12 | * @param {Object} scope The scope available for substitution values 13 | */ 14 | constructor(template, scope) { 15 | this.template = template; 16 | this.scope = scope; 17 | } 18 | 19 | /** 20 | * Test if the provided string is a valid template. 21 | * @param {String} string 22 | * @return {boolean} 23 | */ 24 | static isTemplate(string) { 25 | return this.CAPTURE.test(string); 26 | } 27 | 28 | /** 29 | * Check if string is a template, and if so, render it. 30 | * @param {String} string Template string 31 | * @param {Object} scope Parameters available to the template 32 | * @return {String} Rendered string 33 | */ 34 | static coerce(string, scope) { 35 | if (!this.isTemplate(string)) { 36 | return string; 37 | } 38 | 39 | return new this(string, scope).toString(); 40 | } 41 | 42 | /** 43 | * Try to interpolate strings in a deep object 44 | * 45 | * @param {Object} object An Object that may have strings to be interpolated 46 | * @param {Object} scope Parameters available to the template 47 | * @return {Object} A new object 48 | */ 49 | static render(object, scope) { 50 | return iter(object, (value) => this.coerce(value, scope)); // eslint-disable-line no-use-before-define 51 | } 52 | 53 | /** 54 | * Convert a template to an interpolated string 55 | * @return {string} 56 | */ 57 | toString() { 58 | return this.template.replace(this.constructor.CAPTURE, (match, capture) => { 59 | const path = capture.split(this.constructor.DELIMITER); 60 | let node = this.scope; 61 | 62 | // Traverse the scope object 63 | for (let i = 0; i < path.length; i++) { 64 | if (!node.hasOwnProperty(path[i])) { 65 | throw new ReferenceError('Undefined parameter ' + capture); 66 | } 67 | node = node[path[i]]; 68 | } 69 | 70 | return node; 71 | }); 72 | } 73 | } 74 | 75 | StringTemplate.CAPTURE = /\{\{ ?(.+?) ?\}\}/g; 76 | StringTemplate.DELIMITER = ':'; 77 | StringTemplate.prototype.toJSON = StringTemplate.prototype.toString; 78 | 79 | /* Export */ 80 | module.exports = StringTemplate; 81 | 82 | /** 83 | * Helper: Recursively iterate through an object applying the callback to each value element 84 | * 85 | * @param {Object} object 86 | * @param {Function} handle 87 | * @returns {Object} 88 | */ 89 | function iter(object, handle) { 90 | // Array Values 91 | if (object instanceof Array) { 92 | return object.map((item) => { 93 | if (item instanceof Object) { 94 | return iter(item, handle); 95 | } 96 | 97 | return handle(item); 98 | }); 99 | } 100 | 101 | const keys = Object.keys(object); 102 | 103 | // This may be something other than a simple Object, or it's empty 104 | if (keys.length === 0) { 105 | return object; 106 | } 107 | 108 | const collected = {}; 109 | 110 | keys.forEach((key) => { 111 | if (object[key] instanceof Object) { 112 | return (collected[key] = iter(object[key], handle)); 113 | } 114 | 115 | collected[key] = handle(object[key]); 116 | }); 117 | 118 | return collected; 119 | } 120 | -------------------------------------------------------------------------------- /src/lib/transformers/tokend.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const TokendClient = require('./tokend-client'); 4 | const Immutable = require('immutable'); 5 | const isPlainObject = require('lodash.isplainobject'); 6 | const crypto = require('crypto'); 7 | 8 | /** 9 | * Walk a properties object looking for transformable values 10 | * 11 | * @param {Object} properties an object to search for transformable values 12 | * @param {Array} keyPath accumulated path of keys where transformable value was found 13 | * @return {Array} transformable data with injected key path 14 | */ 15 | function collectTransformables(properties, keyPath) { 16 | let results = []; 17 | 18 | // Ensure we're walking an Object. 19 | if (!isPlainObject(properties)) { 20 | return results; 21 | } 22 | 23 | // If we're walking a $tokend object, pass it to the callback. 24 | const keys = Object.keys(properties); 25 | 26 | if (keys.length === 1 && keys[0] === '$tokend') { 27 | return results.concat(Immutable.OrderedMap(properties.$tokend).set('keyPath', keyPath)); 28 | } 29 | 30 | Object.keys(properties).forEach((key) => { 31 | const value = properties[key]; 32 | 33 | // Don't walk anything that's not an Object. 34 | if (isPlainObject(value)) { 35 | results = results.concat(collectTransformables(value, keyPath.concat(key))); 36 | } 37 | }); 38 | 39 | return results; 40 | } 41 | 42 | /** 43 | * Transform properties by fetching secrets from Tokend 44 | */ 45 | class TokendTransformer { 46 | /** 47 | * Constructor 48 | * @param {Object} options See TokendClient for options 49 | */ 50 | constructor(options) { 51 | const opts = options || {}; 52 | 53 | this._client = new TokendClient(opts); 54 | this._cache = {}; 55 | } 56 | 57 | /** 58 | * Start polling Tokend for secrets 59 | * 60 | * @return {Promise} 61 | */ 62 | initialize() { 63 | return this._client.initialize(); 64 | } 65 | 66 | /** 67 | * Transform properties by fetching secrets from Tokend 68 | * 69 | * @param {Object} properties properties that may contain $tokend values 70 | * @return {Promise} properties that had $tokend values transformed 71 | */ 72 | transform(properties) { 73 | const seenProperties = []; 74 | const promises = collectTransformables(properties, []).map((info) => { 75 | const keyPath = info.get('keyPath'); 76 | const propertyName = keyPath.join("."); 77 | seenProperties.push(propertyName); 78 | 79 | const signature = crypto 80 | .createHash('sha1') 81 | .update(JSON.stringify(info.toJS())) 82 | .digest('base64'); 83 | 84 | 85 | if (this._cache.hasOwnProperty(propertyName) && this._cache[propertyName].signature === signature) { 86 | return Promise.resolve(Immutable.Map().setIn(keyPath, this._cache[propertyName].plaintext)); 87 | } 88 | 89 | let resolver = null, 90 | payload = {}, 91 | method = '', 92 | source = 'Vault'; 93 | 94 | switch (info.get('type')) { 95 | case 'generic': 96 | method = 'GET'; 97 | resolver = this._client.get(info.get('resource')); 98 | break; 99 | 100 | case 'transit': 101 | payload = { 102 | key: info.get('key'), 103 | ciphertext: info.get('ciphertext') 104 | }; 105 | method = 'POST'; 106 | 107 | resolver = this._client.post(info.get('resource'), payload); 108 | break; 109 | 110 | case 'kms': 111 | source = 'KMS'; 112 | payload = { 113 | key: source, 114 | ciphertext: info.get('ciphertext') 115 | }; 116 | 117 | if (info.get('region') && info.get('region') !== '') { 118 | payload.region = info.get('region'); 119 | } 120 | 121 | if (info.get('datakey') && info.get('datakey') !== '') { 122 | payload.datakey = info.get('datakey'); 123 | } 124 | method = 'POST'; 125 | resolver = this._client.post(info.get('resource'), payload); 126 | break; 127 | 128 | default: 129 | Log.log('WARN', `Invalid $tokend.type ${info.get('type')} for ${keyPath.join('.')}`); 130 | 131 | return Promise.resolve(Immutable.Map().setIn(keyPath, null)); 132 | } 133 | 134 | let requestId = `${info.get('resource')}.${payload.key}.${payload.ciphertext}`; 135 | 136 | // We have to strip out any undefined values to make sure that we correctly map the requestId to 137 | // the GET request cache key. 138 | if (method === 'GET') { 139 | requestId = requestId.split('.').filter((f) => f !== 'undefined').join('.'); 140 | } 141 | return resolver.then((data) => { 142 | this._client.clearCacheAtKey(method, requestId); 143 | 144 | if (!data.hasOwnProperty('plaintext')) { 145 | Log.log('WARN', `No "plaintext" key found in ${source} for ${keyPath.join('.')}`); 146 | 147 | return Promise.resolve(Immutable.Map().setIn(keyPath, null)); 148 | } 149 | 150 | this._cache[propertyName] = { 151 | signature, 152 | plaintext: data.plaintext 153 | }; 154 | 155 | return Promise.resolve(Immutable.Map().setIn(keyPath, data.plaintext)); 156 | }).catch((err) => { 157 | Log.log('WARN', err); 158 | this._client.clearCacheAtKey(method, requestId); 159 | if (this._cache.hasOwnProperty(propertyName)) { 160 | return Promise.resolve(Immutable.Map().setIn(keyPath, this._cache[propertyName].plaintext)); 161 | } 162 | 163 | Log.log('WARN', `'${propertyName}' not found in cache, '${propertyName}' will be set to null`) 164 | return Promise.resolve(Immutable.Map().setIn(keyPath, null)); 165 | }); 166 | }); 167 | 168 | return Promise.all(promises).then((values) => { 169 | let transformedProperties = Immutable.Map(); 170 | 171 | values.forEach((value) => { 172 | transformedProperties = transformedProperties.mergeDeep(value); 173 | }); 174 | 175 | /* 176 | * Remove entries from the cache if it has not been iterated on above 177 | * from collectTransformables 178 | */ 179 | Object.keys(this._cache) 180 | .forEach((propertyName) => { 181 | if (seenProperties.indexOf(propertyName) === -1) { 182 | delete this._cache[propertyName] 183 | } 184 | }) 185 | 186 | return transformedProperties.toJS(); 187 | }); 188 | } 189 | 190 | /** 191 | * Bind event listeners to the TokendClient 192 | * 193 | * @param {String} eventName - 194 | * @param {Function} listener - 195 | */ 196 | on(eventName, listener) { 197 | this._client.on(eventName, listener); 198 | } 199 | } 200 | 201 | module.exports = TokendTransformer; 202 | -------------------------------------------------------------------------------- /src/lib/util/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * Recursively index into an object until we get to the end of the queue 5 | * @param {Object} object 6 | * @param {Array} queue 7 | * @return {*} 8 | */ 9 | const getNestedProperty = (object, queue) => { 10 | if (queue.length === 0) { 11 | return object; 12 | } 13 | const k = queue.shift(); 14 | 15 | const prop = object[k]; 16 | 17 | if (typeof prop === 'undefined') { 18 | throw new TypeError(`Key '${k}' does not exist in object ${JSON.stringify(object)}.`); 19 | } 20 | 21 | return getNestedProperty(prop, queue); 22 | }; 23 | 24 | exports.getNestedProperty = getNestedProperty; 25 | -------------------------------------------------------------------------------- /src/lib/util/metadata-client.js: -------------------------------------------------------------------------------- 1 | /* global Log, Config */ 2 | 'use strict'; 3 | 4 | const Aws = require('aws-sdk'); 5 | 6 | /** 7 | * Wrapper class around the AWS MetadataService for error handling 8 | */ 9 | class MetadataClient { 10 | /** 11 | * Constructor 12 | * @param {Object} opts 13 | */ 14 | constructor(opts) { 15 | const options = Object.assign({ 16 | timeout: MetadataClient.DEFAULT_TIMEOUT, 17 | host: MetadataClient.DEFAULT_HOST 18 | }, opts); 19 | 20 | /** 21 | * Initialize the metadata-service client 22 | */ 23 | this._service = new Aws.MetadataService({ 24 | httpOptions: { 25 | timeout: options.timeout 26 | }, 27 | host: options.host 28 | }); 29 | } 30 | 31 | /** 32 | * Wrap the AWS MetadataService request function with error handling 33 | * @param {String} path 34 | * @param {Function} callback 35 | */ 36 | request(path, callback) { 37 | this._service.request(path, (err, data) => { 38 | if (err) { 39 | /* 40 | * AWS-SDK > 2.6.0 now raises an error with a null message when the underlying http 41 | * request returns a non-2xx status code. We don't want to abort the rest of the traversal 42 | * for this. Instead, log the error and swallow it. 43 | */ 44 | Log.log('ERROR', 'Aws-sdk returned the following error during the metadata service request ' + 45 | `to ${path}: %j`, err); 46 | 47 | return callback(null, undefined); 48 | } 49 | callback(err, data); 50 | }); 51 | } 52 | } 53 | 54 | MetadataClient.version = 'latest'; 55 | MetadataClient.DEFAULT_TIMEOUT = 500; // eslint-disable-line rapid7/static-magic-numbers 56 | MetadataClient.DEFAULT_HOST = '169.254.169.254:80'; 57 | 58 | /* Export */ 59 | module.exports = MetadataClient; 60 | -------------------------------------------------------------------------------- /src/lib/util/status-codes.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const HTTP = require('http'); 3 | 4 | const STATUS_CODES = 5 | module.exports = Object.assign({}, HTTP.STATUS_CODES); 6 | 7 | const M_REJECT_CHARS = /\W/g; 8 | const M_DELIMITERS = /\s/g; 9 | 10 | // Reverse-map status names to codes 11 | Object.keys(STATUS_CODES).forEach(function _(code) { 12 | const message = STATUS_CODES[code]; 13 | let name = message.replace(M_DELIMITERS, '_'); 14 | 15 | name = name.replace(M_REJECT_CHARS, ''); 16 | name = name.toUpperCase(); 17 | 18 | STATUS_CODES[name] = Number(code); 19 | }); 20 | -------------------------------------------------------------------------------- /test/.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "mocha": true 4 | }, 5 | "rules": { 6 | "max-nested-callbacks": 0, 7 | "rapid7/static-magic-numbers": 0, 8 | "no-unused-expressions": 0, 9 | "new-cap": 0, 10 | "no-loop-func": 0, 11 | "no-param-reassign": 0, 12 | "func-names": 0, 13 | "no-new": 0, 14 | "require-jsdoc": 0 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /test/bin/.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "node": true 4 | }, 5 | "rules": { 6 | "no-console": 0 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /test/bin/conqueso-diff: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 'use strict'; 3 | 4 | /** 5 | * Poll Conqueso and propsd to test for interface inconsistencies 6 | */ 7 | const HTTP = require('http'); 8 | const ONE_MINUTE = 60000; 9 | 10 | function requestProperties(uri) { 11 | console.log(`Polling ${uri}`); 12 | return new Promise((resolve, reject) => { 13 | const req = HTTP.get(uri, (res) => { 14 | res.setEncoding('utf8'); 15 | 16 | let data = ''; 17 | 18 | res.on('data', (d) => { 19 | data += d; 20 | }); 21 | 22 | res.on('error', reject); 23 | 24 | res.on('end', (d) => { 25 | if (d) data += d; 26 | resolve(data.split(/\n/g)); 27 | }); 28 | }); 29 | 30 | req.on('error', reject); 31 | }); 32 | } 33 | 34 | /** 35 | * Apply an operation to each value of a conqueso property if it passes a comparator 36 | * @param {Array} arr 37 | * @param {Function} comparator 38 | * @param {Function} operation 39 | * @returns {Array} 40 | */ 41 | function apply(arr, comparator, operation) { 42 | const ret = []; 43 | 44 | arr.forEach((el, i) => { 45 | const splitProp = el.split('='); 46 | const prop = splitProp[0]; 47 | let val = splitProp[1]; 48 | 49 | if (comparator(val)) { 50 | val = operation(val); 51 | } 52 | ret[i] = `${prop}=${val}`; 53 | }); 54 | return ret; 55 | } 56 | 57 | /* eslint-disable no-extend-native */ 58 | /** 59 | * Sort values that are comma-delimited 60 | * @returns {Array} 61 | */ 62 | Array.prototype.sortDelimitedValues = function sortDelimitedValues() { 63 | return apply(this, (val) => val.split(',').length > 1, (val) => val.split(',').sort()); 64 | }; 65 | 66 | /** 67 | * Truncates numeric properties for conqueso property string float -> js float comparison 68 | * @returns {Array} 69 | */ 70 | Array.prototype.truncateNumericProperties = function truncateNumericProperties() { 71 | return apply(this, (val) => !isNaN(val) && isFinite(val), (val) => parseFloat(val)); 72 | }; 73 | 74 | /* eslint-enable no-extend-native */ 75 | 76 | function compareProperties(a, b) { 77 | a = a.sort().sortDelimitedValues().truncateNumericProperties(); // eslint-disable-line no-param-reassign 78 | b = b.sort().sortDelimitedValues().truncateNumericProperties(); // eslint-disable-line no-param-reassign 79 | 80 | const difference = { 81 | added: [], 82 | removed: [] 83 | }; 84 | 85 | for (let i = 0; i < b.length; i++) { 86 | if (!~(a.indexOf(b[i]))) { // eslint-disable-line no-bitwise 87 | difference.added.push(b[i]); 88 | } 89 | } 90 | 91 | for (let j = 0; j < a.length; j++) { 92 | if (!~(b.indexOf(a[j]))) { // eslint-disable-line no-bitwise 93 | difference.removed.push(a[j]); 94 | } 95 | } 96 | 97 | return difference; 98 | } 99 | 100 | function different(difference) { 101 | return difference.added.length > 0 || difference.removed.length > 0; 102 | } 103 | 104 | const CONQUESO = process.argv[2]; 105 | const PROPSD = 'http://localhost:9100/v1/conqueso'; 106 | 107 | (function poll() { 108 | Promise.all([ 109 | requestProperties(CONQUESO), 110 | requestProperties(PROPSD) 111 | ]) 112 | .then((data) => { 113 | const conqueso = data[0]; 114 | const propsd = data[1]; 115 | 116 | const diff = compareProperties(conqueso, propsd); 117 | 118 | if (!different(diff)) { 119 | return console.log('Properties are identical'); 120 | } 121 | 122 | console.log('Properties are not identical!'); 123 | diff.added.forEach((p) => { 124 | console.log(` + ${p}`); 125 | }); 126 | 127 | diff.removed.forEach((p) => { 128 | console.log(` - ${p}`); 129 | }); 130 | }, (err) => { console.error(err); }); 131 | 132 | setTimeout(poll, ONE_MINUTE); 133 | }()); 134 | 135 | /* eslint-enable no-console */ 136 | -------------------------------------------------------------------------------- /test/bin/get-metadata: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Fetch an object mapping request paths to un-parsed response data from the EC2 metadata API 5 | * 6 | * WARNING: This will not obfuscate any data. You must manually remove any references 7 | * that you deem critical from the generated JSON. 8 | * 9 | * N.B, You should _definitely_ munge 10 | * - /latest/meta-data/iam/info 11 | * - /latest/meta-data/iam/security-credentials/ 12 | * - /latest/meta-data/iam/security-credentials/PROFILE-ID 13 | * - Public IPv4 addresses of persistent resources and EIPs 14 | * - Public domain names of persistent resources and EIPs 15 | * - Your account id in /latest/dynamic/instance-identity/document 16 | * - /latest/meta-data/network/interfaces/macs/MAC_ADDR/vpc-id 17 | */ 18 | 19 | const AWS = require('aws-sdk'); 20 | const FS = require('fs'); 21 | const Path = require('path'); 22 | const Util = require('../../lib/source/metadata/util'); 23 | 24 | const metadata = new AWS.MetadataService({ 25 | host: 'localhost:8080' 26 | }); 27 | 28 | const paths = {}; 29 | 30 | Util.traverse( 31 | 'latest', 32 | ['/meta-data/', '/dynamic/'], 33 | 34 | // Call `Metadata.request` for each path 35 | (path, cb) => metadata.request(path, (err, data) => { 36 | if (err) { return cb(err); } 37 | 38 | // Store unparsed data from each path 39 | paths[path] = data; 40 | 41 | cb(null, data); 42 | }), 43 | 44 | // Handle results of metadata tree traversal 45 | (err) => { 46 | if (err) { throw err; } 47 | FS.writeFileSync(Path.resolve(__dirname, '../data/metadata-paths.json'), JSON.stringify(paths, null, 2)); 48 | } 49 | ); 50 | -------------------------------------------------------------------------------- /test/bin/metadata-server: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 'use strict'; 3 | 4 | /* eslint-disable rapid7/static-magic-numbers, no-param-reassign */ 5 | 6 | /** 7 | * Serve mocked metadata path data from a local HTTP interface 8 | */ 9 | 10 | const HTTP = require('http'); 11 | const paths = require('../data/metadata-paths.json'); 12 | 13 | const NOT_FOUND_BODY = 14 | '\n' + 16 | '\n' + 17 | '\n' + 18 | ' 404 - Not Found\n' + 19 | ' \n' + 20 | ' \n' + 21 | '

404 - Not Found

\n' + 22 | ' \n' + 23 | '\n'; 24 | 25 | HTTP.createServer(function(req, res) { 26 | if (!paths.hasOwnProperty(req.url)) { 27 | res.statusCode = 404; 28 | res.setHeader('Content-Type', 'text/html'); 29 | res.setHeader('Content-Length', NOT_FOUND_BODY.length); 30 | res.write(NOT_FOUND_BODY); 31 | res.end(); 32 | 33 | return; 34 | } 35 | 36 | res.setHeader('Content-Type', 'text/plain'); 37 | res.setHeader('Content-Length', paths[req.url].length); 38 | res.write(paths[req.url]); 39 | res.end(); 40 | }).listen(8080); 41 | -------------------------------------------------------------------------------- /test/bin/munge-consul: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 'use strict'; 3 | 4 | /* eslint-disable no-param-reassign, rapid7/static-magic-numbers */ 5 | 6 | /** 7 | * Modify Consul Node, Service, and Check identifiers consistently in 8 | * data/consul-checks.json and data/consul-nodes.json 9 | */ 10 | 11 | const Crypto = require('crypto'); 12 | const FS = require('fs'); 13 | const Path = require('path'); 14 | 15 | const checks = require('../data/consul-checks.json'); 16 | const nodes = require('../data/consul-nodes.json'); 17 | 18 | const replacements = {}; 19 | 20 | function replace(input) { 21 | if (replacements.hasOwnProperty(input)) { return replacements[input]; } 22 | 23 | replacements[input] = Crypto.randomBytes(16).toString('hex'); 24 | return replacements[input]; 25 | } 26 | 27 | nodes.forEach(function _(node) { 28 | node.Node = replace(node.Node); 29 | }); 30 | 31 | checks.forEach(function _(check) { 32 | check.Node = replace(check.Node); 33 | check.CheckID = replace(check.CheckID); 34 | check.Name = replace(check.Name); 35 | check.Output = replace(check.Output); 36 | check.ServiceID = replace(check.ServiceID); 37 | check.ServiceName = replace(check.ServiceName); 38 | }); 39 | 40 | FS.writeFileSync(Path.resolve(__dirname, '../data/consul-checks.json'), JSON.stringify(checks, null, 2)); 41 | FS.writeFileSync(Path.resolve(__dirname, '../data/consul-nodes.json'), JSON.stringify(nodes, null, 2)); 42 | -------------------------------------------------------------------------------- /test/bin/parse-matadata: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Parse the JSON document generated by get-metadata into a JSON document that the 5 | * Metadata Parser can grok. 6 | */ 7 | 8 | const FS = require('fs'); 9 | const Path = require('path'); 10 | const Util = require('../../lib/source/metadata/util'); 11 | 12 | const metadata = require('../data/metadata-paths.json'); 13 | 14 | Util.traverse( 15 | 'latest', 16 | ['/meta-data/', '/dynamic/'], 17 | 18 | // Call `Metadata.request` for each path 19 | (path, cb) => cb(null, metadata[path]), 20 | 21 | // Handle results of metadata tree traversal 22 | (err, data) => { 23 | if (err) { throw err; } 24 | FS.writeFileSync(Path.resolve(__dirname, '../data/metadata-values.json'), JSON.stringify(data, null, 2)); 25 | } 26 | ); 27 | -------------------------------------------------------------------------------- /test/bin/s3-server.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /* global Config, Log */ 4 | 'use strict'; 5 | const fs = require('fs'); 6 | const S3rver = require('s3rver'); 7 | const os = require('os'); 8 | const Path = require('path'); 9 | const AWS = require('aws-sdk'); 10 | const rmdir = require('rimraf'); 11 | const walk = require('walk'); 12 | const chokidar = require('chokidar'); 13 | 14 | global.Config = require('nconf') 15 | .argv() 16 | .env() 17 | .file(Path.resolve(__dirname, '../data/config.json')) 18 | .defaults(require('../../src/config/defaults.json')); 19 | global.Log = require('../../src/lib/logger').attach(Config.get('log:level')); 20 | 21 | const DEFAULT_HTTP_PORT = 4569; 22 | 23 | const args = require('yargs') 24 | .usage('Usage: $0 [args]') 25 | .option('d', { 26 | alias: 'data', 27 | describe: 'Load data directory from filesystem', 28 | type: 'string', 29 | required: true 30 | }) 31 | .option('h', { 32 | alias: 'hostname', 33 | describe: 'The hostname or ip for the server', 34 | default: '127.0.0.1', 35 | type: 'string' 36 | }) 37 | .option('p', { 38 | alias: 'port', 39 | describe: 'The port number of the http server', 40 | default: DEFAULT_HTTP_PORT, 41 | type: 'number' 42 | }) 43 | .help('help') 44 | .argv; 45 | 46 | const hostname = args.h; 47 | const port = args.p; 48 | const path = Path.resolve(__dirname, `../../${args.d}`); 49 | 50 | const awsConfig = { 51 | s3ForcePathStyle: true, 52 | endpoint: new AWS.Endpoint(`http://${hostname}:${port}`) 53 | }; 54 | const awsClient = new AWS.S3(awsConfig); 55 | 56 | function getBucketName(p) { 57 | return `propsd-${Path.basename(p)}`; 58 | } 59 | 60 | /** 61 | * Removes the "bucket" created in the temp folder 62 | */ 63 | function cleanupTempDir() { 64 | const tmpDirBucketPath = Path.resolve(os.tmpdir(), `propsd-temp-s3-server`); 65 | 66 | try { 67 | rmdir.sync(tmpDirBucketPath); 68 | } catch (err) { 69 | throw err; 70 | } 71 | } 72 | 73 | /** 74 | * Creates a directory for the "bucket" in the temp folder 75 | * @returns {String} 76 | */ 77 | function createTempDirForBucket() { 78 | const tmpDirBucketPath = Path.resolve(os.tmpdir(), `propsd-temp-s3-server`); 79 | 80 | try { 81 | cleanupTempDir(); 82 | fs.mkdirSync(tmpDirBucketPath); 83 | } catch (err) { 84 | throw err; 85 | } 86 | 87 | return tmpDirBucketPath; 88 | } 89 | 90 | /** 91 | * Copies files from the watched folder the the "bucket" 92 | * @param {String} bucket 93 | */ 94 | function copyFiles(bucket) { 95 | const walker = walk.walk(path); 96 | 97 | walker.on('file', (root, fileStats, next) => { 98 | const pathToFile = Path.join(Path.relative(path, root), fileStats.name); 99 | const stream = fs.createReadStream(Path.join(path, pathToFile)); 100 | 101 | awsClient.putObject({ 102 | Bucket: bucket, 103 | Key: pathToFile, 104 | Body: stream 105 | }, (err) => { 106 | if (err) { 107 | Log.log('ERROR', err, err.stack); 108 | } 109 | next(); 110 | }); 111 | }); 112 | } 113 | 114 | /** 115 | * Deletes the contents of the "bucket" 116 | * @param {String} bucket 117 | */ 118 | function emptyBucket(bucket) { 119 | awsClient.listObjects({Bucket: bucket}, (listErr, listData) => { 120 | if (listErr) { 121 | throw listErr; 122 | } 123 | listData.Contents.forEach((el) => { 124 | awsClient.deleteObject({Bucket: bucket, Key: el.Key}, (deleteErr) => { 125 | if (deleteErr) { 126 | throw deleteErr; 127 | } 128 | }); 129 | }); 130 | }); 131 | } 132 | 133 | /** 134 | * Handles cleanup on app exit 135 | * @param {Error} err 136 | */ 137 | function exitHandler(err) { 138 | let code = 0; 139 | 140 | if (err) { 141 | Log.log('ERROR', err, err.stack); 142 | code = 1; 143 | } 144 | const tmpDirBucketPath = Path.resolve(os.tmpdir(), `propsd-temp-s3-server`); 145 | 146 | Log.log('INFO', `Cleaning up temp directory: ${tmpDirBucketPath}`); 147 | cleanupTempDir(); 148 | process.exit(code); 149 | } 150 | 151 | function onFileChange(bucket) { 152 | emptyBucket(bucket); 153 | copyFiles(bucket); 154 | } 155 | 156 | function createBucket(bucket) { 157 | awsClient.createBucket({Bucket: bucket}, (createBucketErr) => { 158 | if (createBucketErr) { 159 | throw createBucketErr; 160 | } 161 | copyFiles(bucket); 162 | 163 | // Watch for both change and unlink events. 164 | chokidar.watch(path, {persistent: true}).on('change', () => onFileChange(bucket)); 165 | chokidar.watch(path, {persistent: true}).on('unlink', () => onFileChange(bucket)); 166 | }); 167 | } 168 | 169 | function init() { 170 | fs.stat(path, (err, stats) => { 171 | if (err) { 172 | Log.log('ERROR', err, err.stack); 173 | process.exit(1); 174 | } 175 | if (!stats.isDirectory()) { 176 | Log.log('ERROR', `${path} is not a directory`); 177 | process.exit(1); 178 | } 179 | const bucket = getBucketName(path); 180 | const tmpDir = createTempDirForBucket(); 181 | 182 | const client = new S3rver({ 183 | port, 184 | hostname, 185 | silent: false, 186 | directory: tmpDir 187 | }); 188 | 189 | client.run((serverErr, host, p) => { 190 | if (serverErr) { 191 | Log.log('ERROR', serverErr, serverErr.stack); 192 | process.exit(1); 193 | } 194 | 195 | createBucket(bucket); 196 | 197 | Log.log('INFO', `listening for S3 requests at http://${host}:${p}`); 198 | }); 199 | }); 200 | } 201 | 202 | init(); 203 | 204 | // do something when app is closing 205 | process.on('exit', exitHandler); 206 | 207 | // catches ctrl+c event 208 | process.on('SIGINT', exitHandler); 209 | 210 | // catches uncaught exceptions 211 | process.on('uncaughtException', (err) => { 212 | exitHandler(err); 213 | }); 214 | -------------------------------------------------------------------------------- /test/bin/version.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 'use strict'; 3 | 4 | let version; 5 | 6 | try { 7 | version = require('../../package').version; 8 | } catch (ex) { 9 | version = '0.0.0'; 10 | } 11 | 12 | const path = require('path'); 13 | const fs = require('fs'); 14 | 15 | fs.writeFileSync(path.resolve('src/version.json'), JSON.stringify({version})); 16 | -------------------------------------------------------------------------------- /test/conqueso-api-v1.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('./lib/helpers'); 4 | 5 | const ConsulStub = require('./lib/stub/consul'); 6 | const Consul = require('../dist/lib/source/consul'); 7 | 8 | const expect = require('chai').expect; 9 | const request = require('supertest'); 10 | 11 | const testServerPort = 3000; 12 | const fixedDate = new Date(); 13 | const fixedRegex = /ab+c/i; 14 | 15 | const HTTP_OK = 200; 16 | const HTTP_METHOD_NOT_ALLOWED = 405; 17 | 18 | const conquesoProperties = { 19 | instanceMetaData: { 20 | 'meta.property.1': 'songs you have never heard of', 21 | 'meta.property.2': 'artisanal cream cheese' 22 | }, 23 | properties: Promise.resolve({ 24 | date: fixedDate, 25 | regex: fixedRegex, 26 | name: 'hipster-mode-enabled', 27 | value: true, 28 | type: 'BOOLEAN' 29 | }), 30 | on() {} 31 | }; 32 | 33 | const nestedProperties = { 34 | instanceMetaData: { 35 | 'meta.property.1': 'songs you have never heard of', 36 | 'meta.property.2': 'artisanal cream cheese' 37 | }, 38 | properties: Promise.resolve({ 39 | name: 'hipster-mode-enabled', 40 | value: true, 41 | type: 'BOOLEAN', 42 | food: { 43 | name: 'tacos', 44 | value: true, 45 | type: 'BOOLEAN' 46 | } 47 | }), 48 | on() {} 49 | }; 50 | 51 | const javaProperties = [ 52 | `date=${fixedDate}`, 53 | 'regex=/ab+c/i', 54 | 'name=hipster-mode-enabled', 55 | 'value=true', 56 | 'type=BOOLEAN' 57 | ].join('\n'); 58 | const nestedJavaProperties = [ 59 | 'name=hipster-mode-enabled', 60 | 'value=true', 61 | 'type=BOOLEAN', 62 | 'food.name=tacos', 63 | 'food.value=true', 64 | 'food.type=BOOLEAN' 65 | ].join('\n'); 66 | 67 | /** 68 | * Create a new Express server for testing 69 | * 70 | * @param {Object} propsUnderTest 71 | * @return {http.Server} 72 | */ 73 | function makeServer(propsUnderTest) { 74 | const app = require('express')(); 75 | 76 | require('../dist/lib/control/v1/conqueso').attach(app, propsUnderTest); 77 | 78 | return app.listen(testServerPort); 79 | } 80 | 81 | describe('Conqueso API v1', function() { 82 | let server = null; 83 | 84 | beforeEach(function() { 85 | server = makeServer(conquesoProperties); 86 | }); 87 | 88 | afterEach(function(done) { 89 | server.close(done); 90 | }); 91 | 92 | it('acknowledges GET requests', function(done) { 93 | request(server) 94 | .get('/v1/conqueso/api/roles') 95 | .set('Accept', 'text/plain') 96 | .expect('Content-Type', 'text/plain; charset=utf-8') 97 | .expect(HTTP_OK, javaProperties, done); 98 | }); 99 | 100 | it('acknowledges POST requests', function(done) { 101 | request(server) 102 | .post('/v1/conqueso/api/roles/search/properties') 103 | .send(conquesoProperties) 104 | .expect(HTTP_OK, '', done); 105 | }); 106 | 107 | it('acknowledges PUT requests', function(done) { 108 | request(server) 109 | .put('/v1/conqueso/api/roles/search/properties') 110 | .send(conquesoProperties) 111 | .expect(HTTP_OK, '', done); 112 | }); 113 | 114 | it('acknowledges OPTIONS requests', function(done) { 115 | request(server) 116 | .options('/v1/conqueso') 117 | .expect('Allow', 'GET,POST,PUT,OPTIONS') 118 | .expect(HTTP_OK, '', done); 119 | }); 120 | 121 | it('rejects DELETE requests', function(done) { 122 | request(server) 123 | .delete('/v1/conqueso') 124 | .expect('Allow', 'GET,POST,PUT,OPTIONS') 125 | .expect(HTTP_METHOD_NOT_ALLOWED, '', done); 126 | }); 127 | 128 | it('rejects TRACE requests', function(done) { 129 | request(server) 130 | .trace('/v1/conqueso') 131 | .expect('Allow', 'GET,POST,PUT,OPTIONS') 132 | .expect(HTTP_METHOD_NOT_ALLOWED, '', done); 133 | }); 134 | 135 | it('rejects HEAD requests', function(done) { 136 | request(server) 137 | .head('/v1/conqueso') 138 | .expect('Allow', 'GET,POST,PUT,OPTIONS') 139 | .expect(HTTP_METHOD_NOT_ALLOWED, '', done); 140 | }); 141 | }); 142 | 143 | // This is split out into a separate 'describe' group because of the way express binds ports 144 | describe('Conqueso API v1', function() { 145 | let server; 146 | 147 | before(function() { 148 | server = makeServer(nestedProperties); 149 | }); 150 | it('emits properly flattened data', function(done) { 151 | request(server) 152 | .get('/v1/conqueso/api/roles') 153 | .set('Accept', 'text/plain') 154 | .expect('Content-Type', 'text/plain; charset=utf-8') 155 | .expect(HTTP_OK, nestedJavaProperties, done); 156 | }); 157 | 158 | it('retrieves a specific property if it exists', function(done) { 159 | request(server) 160 | .get('/v1/conqueso/api/roles/global/properties/food.name') 161 | .set('Accept', 'text/plain') 162 | .expect('Content-Type', 'text/plain; charset=utf-8') 163 | .expect(HTTP_OK, 'tacos', done); 164 | }); 165 | 166 | it('returns no data if a specific property does not exist', function(done) { 167 | request(server) 168 | .get('/v1/conqueso/api/roles/global/properties/food.gluten') 169 | .set('Accept', 'text/plain') 170 | .expect('Content-Type', 'text/plain; charset=utf-8') 171 | .expect(HTTP_OK, '', done); 172 | }); 173 | 174 | after(function(done) { 175 | server.close(done); 176 | }); 177 | }); 178 | 179 | describe('Conqueso API v1', function() { 180 | let consul = null, 181 | server = null; 182 | 183 | beforeEach(function(done) { 184 | consul = new Consul('consul'); 185 | consul.client = ConsulStub; 186 | 187 | consul.initialize().then(function() { 188 | server = makeServer({ 189 | properties: Promise.resolve(consul.properties) 190 | }); 191 | 192 | done(); 193 | }); 194 | }); 195 | 196 | afterEach(function(done) { 197 | consul.shutdown(); 198 | server.close(done); 199 | }); 200 | 201 | it('formats IP addresses for Consul services', function(done) { 202 | const expected = [ 203 | 'conqueso.postgresql.ips=10.0.0.2', 204 | 'conqueso.redis.ips=10.0.0.1', 205 | 'conqueso.consul.ips=10.0.0.1,10.0.0.2,10.0.0.3' 206 | ]; 207 | 208 | request(server) 209 | .get('/v1/conqueso/api/roles') 210 | .set('Accept', 'text/plain') 211 | .expect('Content-Type', 'text/plain; charset=utf-8') 212 | .expect((res) => { 213 | expect(res.text.split(/\n/g)).to.members(expected); 214 | }) 215 | .expect(HTTP_OK, done); 216 | }); 217 | 218 | it('removes reserved "instance" keyword from properties', function(done) { 219 | server.close(); 220 | 221 | server = makeServer({ 222 | properties: Promise.resolve({ 223 | instance: { 224 | food: 'tacos' 225 | }, 226 | gluten: 'free' 227 | }), 228 | on() {} 229 | }); 230 | 231 | request(server) 232 | .get('/v1/conqueso/api/roles') 233 | .set('Accept', 'text/plain') 234 | .expect('Content-Type', 'text/plain; charset=utf-8') 235 | .expect(HTTP_OK, 'gluten=free', done); 236 | }); 237 | }); 238 | -------------------------------------------------------------------------------- /test/consul.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('./lib/helpers'); 4 | 5 | const Stub = require('./lib/stub/consul'); 6 | const Consul = require('../dist/lib/source/consul'); 7 | 8 | const expect = require('chai').expect; 9 | 10 | describe('Consul', function() { 11 | it('instantiates a Consul Source with defaults', function() { 12 | const consul = new Consul('test'); 13 | 14 | // See https://github.com/silas/node-papi/blob/master/lib/client.js#L71 15 | expect(consul.client._opts.host).to.equal('127.0.0.1'); 16 | expect(consul.client._opts.port).to.equal(8500); 17 | expect(consul.client._opts.secure).to.equal(false); 18 | 19 | expect(consul.properties).to.be.empty; 20 | }); 21 | 22 | it('overrides defaults from constructor options', function() { 23 | const consul = new Consul('test', { 24 | host: '1.1.1.1', 25 | port: 1234, 26 | secure: true 27 | }); 28 | 29 | expect(consul.client._opts.host).to.equal('1.1.1.1'); 30 | expect(consul.client._opts.port).to.equal(1234); 31 | expect(consul.client._opts.secure).to.equal(true); 32 | }); 33 | 34 | it('sets up properties on initialize', function() { 35 | const consul = new Consul('test'); 36 | 37 | consul.client = Stub; 38 | 39 | return consul.initialize().then(() => { 40 | expect(consul.state).to.equal(Consul.RUNNING); 41 | expect(consul.properties).to.eql({ 42 | consul: { 43 | consul: { 44 | cluster: 'consul', 45 | addresses: ['10.0.0.1', '10.0.0.2', '10.0.0.3'] 46 | }, 47 | redis: { 48 | cluster: 'redis', 49 | addresses: ['10.0.0.1'] 50 | }, 51 | postgresql: { 52 | cluster: 'postgresql', 53 | addresses: ['10.0.0.2'] 54 | } 55 | } 56 | }); 57 | }); 58 | }); 59 | 60 | it('handles errors safely', function() { 61 | const consul = new Consul('test'); 62 | 63 | consul.client = Stub; 64 | consul.client.health.service = (options, callback) => { 65 | callback(new Error('This is a test error!'), null); 66 | }; 67 | 68 | return consul.initialize().then(() => { 69 | expect(consul.state).to.equal(Consul.ERROR); 70 | expect(consul.properties).to.eql({}); 71 | }); 72 | }); 73 | }); 74 | -------------------------------------------------------------------------------- /test/core-api-v1.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const request = require('supertest'); 4 | const Properties = require('../dist/lib/properties'); 5 | const Sources = require('../dist/lib/sources'); 6 | const S3 = require('../dist/lib/source/s3'); 7 | 8 | require('should'); 9 | 10 | const testServerPort = 3000; 11 | const HTTP_OK = 200; 12 | const HTTP_METHOD_NOT_ALLOWED = 405; 13 | 14 | const endpoints = { 15 | health: '/v1/health', 16 | status: '/v1/status' 17 | }; 18 | 19 | const expectedStatusResponse = { 20 | status: HTTP_OK, 21 | index: { 22 | ok: true, 23 | updated: null, 24 | interval: 60000, 25 | running: false, 26 | etag: null, 27 | state: 'CREATED', 28 | resource: 's3://test-bucket/index.json', 29 | name: 'index.json', 30 | type: 's3' 31 | }, 32 | indices: [{ 33 | ok: true, 34 | updated: null, 35 | interval: 60000, 36 | running: false, 37 | etag: null, 38 | state: 'CREATED', 39 | resource: 's3://test-bucket/index.json', 40 | name: 'index.json', 41 | type: 's3' 42 | }], 43 | sources: [{ 44 | name: 'foo-bar-baz.json', 45 | type: 's3', 46 | status: 'okay', 47 | updated: null, 48 | etag: null, 49 | state: 'CREATED', 50 | resource: 's3://test-bucket/foo-bar-baz.json', 51 | ok: true, 52 | interval: 60000 53 | }, { 54 | name: 'foo-quiz-buzz.json', 55 | type: 's3', 56 | status: 'okay', 57 | updated: null, 58 | etag: null, 59 | state: 'CREATED', 60 | resource: 's3://test-bucket/foo-quiz-buzz.json', 61 | ok: true, 62 | interval: 60000 63 | }] 64 | }; 65 | 66 | const properties = new Properties(); 67 | 68 | properties.dynamic(new S3('foo-bar-baz.json', { 69 | bucket: 'test-bucket', 70 | path: 'foo-bar-baz.json' 71 | }), 'test'); 72 | 73 | properties.dynamic(new S3('foo-quiz-buzz.json', { 74 | bucket: 'test-bucket', 75 | path: 'foo-quiz-buzz.json' 76 | }), 'test'); 77 | 78 | const sources = new Sources(properties); 79 | 80 | sources.index(new S3('index.json', { 81 | bucket: 'test-bucket', 82 | path: 'index.json' 83 | })); 84 | 85 | /** 86 | * Create a new Express server for testing 87 | * 88 | * @return {http.Server} 89 | */ 90 | const makeServer = () => { 91 | const app = require('express')(); 92 | 93 | require('../dist/lib/control/v1/core').attach(app, sources); 94 | 95 | return app.listen(testServerPort); 96 | }; 97 | 98 | describe('Core API v1', () => { 99 | let server = null; 100 | 101 | beforeEach(() => { 102 | server = makeServer(); 103 | }); 104 | 105 | afterEach((done) => { 106 | server.close(done); 107 | }); 108 | 109 | for (const endpoint in endpoints) { 110 | if (!endpoints.hasOwnProperty(endpoint)) { 111 | continue; 112 | } 113 | 114 | it(`acknowledges GET requests to the ${endpoint} endpoint`, (done) => { 115 | request(server) 116 | .get(endpoints[endpoint]) 117 | .set('Accept', 'application/json') 118 | .expect('Content-Type', 'application/json; charset=utf-8') 119 | .expect(HTTP_OK) 120 | .end(done); 121 | }); 122 | 123 | it(`rejects all other request types to the ${endpoint} endpoint`, (done) => { 124 | request(server) 125 | .delete(endpoints[endpoint]) 126 | .expect('Allow', 'GET') 127 | .expect(HTTP_METHOD_NOT_ALLOWED); 128 | 129 | request(server) 130 | .put(endpoints[endpoint]) 131 | .expect('Allow', 'GET') 132 | .expect(HTTP_METHOD_NOT_ALLOWED); 133 | 134 | request(server) 135 | .post(endpoints[endpoint]) 136 | .expect('Allow', 'GET') 137 | .expect(HTTP_METHOD_NOT_ALLOWED) 138 | .end(done); 139 | }); 140 | } 141 | 142 | it('responds correctly to a request to the /status endpoint', (done) => { 143 | request(server) 144 | .get(endpoints.status) 145 | .set('Accept', 'application/json') 146 | .expect('Content-Type', 'application/json; charset=utf-8') 147 | .expect(HTTP_OK) 148 | .end((err, res) => { 149 | res.body.should.have.properties(expectedStatusResponse); 150 | res.body.should.have.property('uptime'); 151 | res.body.should.have.property('version'); 152 | done(); 153 | }); 154 | }); 155 | 156 | it('responds correctly to a request to the /health endpoint', (done) => { 157 | request(server) 158 | .get(endpoints.health) 159 | .set('Accept', 'application/json') 160 | .expect('Content-Type', 'application/json; charset=utf-8') 161 | .expect(HTTP_OK) 162 | .end((err, res) => { 163 | res.body.should.have.properties({status: HTTP_OK, plugins: {s3: expectedStatusResponse.sources.length}}); 164 | res.body.should.have.property('uptime'); 165 | res.body.should.have.property('version'); 166 | done(); 167 | }); 168 | }); 169 | 170 | it('returns a 500 if any source plugins fail'); 171 | 172 | it('returns a 429 if any source plugins have a warning'); 173 | }); 174 | -------------------------------------------------------------------------------- /test/data/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "index": { 3 | "path": "homepage.json", 4 | "interval": 30, 5 | "region": "Neptune-99" 6 | }, 7 | "service": { 8 | "port": 1006, 9 | "hostname": "local" 10 | }, 11 | "log": { 12 | "level": "info" 13 | }, 14 | "consul": { 15 | "host": "196.198.10.10", 16 | "port": 8500, 17 | "secure": false 18 | }, 19 | "power": { 20 | "level": 9001, 21 | "interval": 1 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /test/data/consul-catalog-services.json: -------------------------------------------------------------------------------- 1 | { 2 | "consul": [], 3 | "redis": [], 4 | "postgresql": [ 5 | "master", 6 | "slave" 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /test/data/consul-health-service.json: -------------------------------------------------------------------------------- 1 | { 2 | "consul": [{ 3 | "Node": { 4 | "Name": "redis", 5 | "Address": "10.0.0.1" 6 | }, 7 | "Service": { 8 | "Name": "consul" 9 | }, 10 | "Checks": [{ 11 | "Node": "redis", 12 | "CheckID": "serfHealth", 13 | "Status": "passing" 14 | }] 15 | }, { 16 | "Node": { 17 | "Name": "postgresql-master", 18 | "Address": "127.0.0.1" 19 | }, 20 | "Service": { 21 | "Name": "consul", 22 | "Address": "10.0.0.2" 23 | }, 24 | "Checks": [{ 25 | "Node": "postgresql-master", 26 | "CheckID": "serfHealth", 27 | "Status": "passing" 28 | }] 29 | }, { 30 | "Node": { 31 | "Name": "postgresql-slave", 32 | "Address": "127.0.0.1" 33 | }, 34 | "Service": { 35 | "Name": "consul", 36 | "Address": "10.0.0.3" 37 | }, 38 | "Checks": [{ 39 | "Node": "postgresql-master", 40 | "CheckID": "serfHealth", 41 | "Status": "passing" 42 | }] 43 | }], 44 | "redis": [{ 45 | "Node": { 46 | "Name": "redis", 47 | "Address": "10.0.0.1" 48 | }, 49 | "Service": { 50 | "Name": "redis" 51 | }, 52 | "Checks": [{ 53 | "Node": "redis", 54 | "CheckID": "serfHealth", 55 | "Status": "passing" 56 | }] 57 | }], 58 | "postgresql": [{ 59 | "Node": { 60 | "Name": "postgresql-master", 61 | "Address": "127.0.0.1" 62 | }, 63 | "Service": { 64 | "Name": "postgresql", 65 | "Address": "10.0.0.2" 66 | }, 67 | "Checks": [{ 68 | "Node": "postgresql-master", 69 | "CheckID": "service:postgresql", 70 | "Status": "passing" 71 | }, { 72 | "Node": "postgresql-master", 73 | "CheckID": "serfHealth", 74 | "Status": "passing" 75 | }] 76 | }, { 77 | "Node": { 78 | "Name": "postgresql-slave", 79 | "Address": "127.0.0.1" 80 | }, 81 | "Service": { 82 | "Name": "postgresql", 83 | "Address": "10.0.0.3" 84 | }, 85 | "Checks": [{ 86 | "Node": "postgresql-slave", 87 | "CheckID": "service:postgresql", 88 | "Status": "failing" 89 | }, { 90 | "Node": "postgresql-master", 91 | "CheckID": "serfHealth", 92 | "Status": "passing" 93 | }] 94 | }] 95 | } 96 | -------------------------------------------------------------------------------- /test/data/metadata-paths.json: -------------------------------------------------------------------------------- 1 | { 2 | "/latest/dynamic/": "instance-identity/\n", 3 | "/latest/meta-data/": "ami-id\nami-launch-index\nami-manifest-path\nblock-device-mapping/\nhostname\niam/\ninstance-action\ninstance-id\ninstance-type\nlocal-hostname\nlocal-ipv4\nmac\nmetrics/\nnetwork/\nplacement/\nprofile\npublic-hostname\npublic-ipv4\npublic-keys/\nreservation-id\nsecurity-groups\nservices/", 4 | "/latest/dynamic/instance-identity/": "document\nsignature\ndsa2048\npkcs7\n", 5 | "/latest/meta-data/placement/": "availability-zone", 6 | "/latest/meta-data/network/": "interfaces/", 7 | "/latest/meta-data/metrics/": "vhostmd", 8 | "/latest/meta-data/mac": "0e:9c:a1:fe:2f:ef", 9 | "/latest/meta-data/local-ipv4": "10.196.24.63", 10 | "/latest/meta-data/local-hostname": "ip-10-196-24-63.ec2.internal", 11 | "/latest/meta-data/instance-id": "i-aaaf2d1a", 12 | "/latest/meta-data/instance-type": "t2.small", 13 | "/latest/meta-data/instance-action": "none", 14 | "/latest/meta-data/hostname": "ip-10-196-24-63.ec2.internal", 15 | "/latest/meta-data/iam/": "info\nsecurity-credentials/", 16 | "/latest/meta-data/block-device-mapping/": "ami\nephemeral0\nephemeral1\nroot", 17 | "/latest/meta-data/ami-manifest-path": "(unknown)", 18 | "/latest/meta-data/ami-launch-index": "0", 19 | "/latest/meta-data/ami-id": "ami-bcbffad6", 20 | "/latest/meta-data/profile": "default-hvm", 21 | "/latest/meta-data/public-hostname": "ec2-1-2-3-4.compute-1.amazonaws.com", 22 | "/latest/meta-data/public-ipv4": "1.2.3.4", 23 | "/latest/meta-data/public-keys/": "0=fakekeys", 24 | "/latest/meta-data/reservation-id": "r-fake", 25 | "/latest/meta-data/security-groups": "fake-fake\nfoo-bar-baz", 26 | "/latest/meta-data/services/": "domain\npartition", 27 | "/latest/dynamic/instance-identity/document": "{\n \"privateIp\" : \"10.196.24.63\",\n \"devpayProductCodes\" : null,\n \"availabilityZone\" : \"us-east-1a\",\n \"version\" : \"2010-08-31\",\n \"instanceId\" : \"i-aaaf2d1a\",\n \"billingProducts\" : null,\n \"instanceType\" : \"t2.small\",\n \"accountId\" : \"000000000\",\n \"imageId\" : \"ami-bcbffad6\",\n \"pendingTime\" : \"2015-11-18T19:01:04Z\",\n \"kernelId\" : null,\n \"ramdiskId\" : null,\n \"architecture\" : \"x86_64\",\n \"region\" : \"us-east-1\"\n}", 28 | "/latest/dynamic/instance-identity/signature": "gbd9X9RJfapaoOPN9Cm2a8A5FGPOJQjLA+SObRInmNnTyj3d72hUqgZvBHz8dq4xLVAJHSyEC3JX\nbgYF8jVAtAY7hYQM9PzgJEeHg0XjXz6ynIFlbPakdRgo4Se3ISX1hM6qU/SwxEJj9omL2S2n6Nao\nmqDew5xlXIgutDYQwgQ=", 29 | "/latest/dynamic/instance-identity/dsa2048": "ssINvJ9x9+zH6Tmll2JVr2ISzjmAlbvQA6dTZtAi2eWGyX2Mewedb59wKVWXnwihNudAMEGniDYt\ndNmWInburTFGZ/AUxtqK1CY2/bO4cg4tNxsmipxpknEOpSbsrMsc6Dd+SIi1FuiI61FKMrrIvHOn\nrHd6SylWfhnMgS7txLThFxw3SE/UqIZW8BCbvvO+c/mWrboAx14kNFq6a/1QsP5+8j9Zk6+aAM/L\npI8Q47XnJxz6W0DkK09f6pOOI+UyHStThAZUSm93Pe6j3U/RvCZM7fRpG3T/gDGyU68sGtIgoqTb\noF/PVDQYOISFYMn+T+98mk2HTuv96PNWMAvhDQ==", 30 | "/latest/dynamic/instance-identity/pkcs7": "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCAJIAEggGmewog\nICJwcml2YXRlSXAiIDogIjEwLjE5Ni4yNC42MyIsCiAgImRldnBheVByb2R1Y3RDb2RlcyIgOiBu\ndWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAi\nMjAxMC0wOC0zMSIsCiAgImluc3RhbmNlSWQiIDogImktYWFhZjJkMWEiLAogICJiaWxsaW5nUHJv\nZHVjdHMiIDogbnVsbCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5zbWFsbCIsCiAgImFjY291bnRJ\nZCIgOiAiNzE2NzU2MTk5NTYyIiwKICAiaW1hZ2VJZCIgOiAiYW1pLWJjYmZmYWQ2IiwKICAicGVu\nZGluZ1RpbWUiIDogIjIwMTUtMTEtMThUMTk6MDE6MDRaIiwKICAia2VybmVsSWQiIDogbnVsbCwK\nICAicmFtZGlza0lkIiA6IG51bGwsCiAgImFyY2hpdGVjdHVyZSIgOiAieDg2XzY0IiwKICAicmVn\naW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAAAAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkw\nFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6\nb24gV2ViIFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0BCQMx\nCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNTExMTgxOTAxMThaMCMGCSqGSIb3DQEJBDEW\nBBRl2oC56YzkPa83VvQzeoMUqMElUzAJBgcqhkjOOAQDBC8wLQIUC/Ab91UXE/K7obsWxdj3DNx2\nKEsCFQCkBRpQBr8yeJQAzUx3Kd8VhwGyhQAAAAAAAA==", 31 | "/latest/meta-data/placement/availability-zone": "us-east-1a", 32 | "/latest/meta-data/network/interfaces/": "macs/", 33 | "/latest/meta-data/metrics/vhostmd": "", 34 | "/latest/meta-data/iam/info": "{\n \"Code\" : \"Success\",\n \"LastUpdated\" : \"2016-06-06T20:42:46Z\",\n \"InstanceProfileArn\" : \"arn:aws:iam::716756199562:instance-profile/fake/fake/fake-fake\",\n \"InstanceProfileId\" : \"FAKE12344\"\n}", 35 | "/latest/meta-data/iam/security-credentials/": "fake-fake", 36 | "/latest/meta-data/block-device-mapping/ami": "/dev/sda1", 37 | "/latest/meta-data/block-device-mapping/ephemeral0": "sdb", 38 | "/latest/meta-data/block-device-mapping/ephemeral1": "sdc", 39 | "/latest/meta-data/block-device-mapping/root": "/dev/sda1", 40 | "/latest/meta-data/services/partition": "aws", 41 | "/latest/meta-data/services/domain": "amazonaws.com", 42 | "/latest/meta-data/network/interfaces/macs/": "0e:9c:a1:fe:2f:ef/", 43 | "/latest/meta-data/iam/security-credentials/fake-fake": "{\n \"Code\" : \"Success\",\n \"LastUpdated\" : \"2016-06-06T20:42:15Z\",\n \"Type\" : \"AWS-HMAC\",\n \"AccessKeyId\" : \"FAKE1234\",\n \"SecretAccessKey\" : \"foobarbazRandomBase64\",\n \"Token\" : \"nopenopenope=\",\n \"Expiration\" : \"2016-06-07T02:48:58Z\"\n}", 44 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/": "device-number\ninterface-id\nipv4-associations/\nlocal-hostname\nlocal-ipv4s\nmac\nowner-id\npublic-hostname\npublic-ipv4s\nsecurity-group-ids\nsecurity-groups\nsubnet-id\nsubnet-ipv4-cidr-block\nvpc-id\nvpc-ipv4-cidr-block", 45 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/vpc-id": "vpc-fake", 46 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/vpc-ipv4-cidr-block": "10.196.0.0/18", 47 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/subnet-ipv4-cidr-block": "10.196.24.0/25", 48 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/subnet-id": "subnet-fake", 49 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/security-groups": "fake-fake\nfoo-bar-baz", 50 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/security-group-ids": "sg-fake1\nsg-fake2", 51 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/public-ipv4s": "1.2.3.4", 52 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/public-hostname": "ec2-1-2-3-4.compute-1.amazonaws.com", 53 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/mac": "0e:9c:a1:fe:2f:ef", 54 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/owner-id": "716756199562", 55 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/local-ipv4s": "10.196.24.63", 56 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/local-hostname": "ip-10-196-24-63.ec2.internal", 57 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/ipv4-associations/": "1.2.3.4", 58 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/device-number": "0", 59 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/interface-id": "eni-fake", 60 | "/latest/meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/ipv4-associations/1.2.3.4": "10.196.24.63" 61 | } 62 | -------------------------------------------------------------------------------- /test/data/metadata-values.json: -------------------------------------------------------------------------------- 1 | { 2 | "dynamic/instance-identity/pkcs7": "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCAJIAEggGmewog\nICJwcml2YXRlSXAiIDogIjEwLjE5Ni4yNC42MyIsCiAgImRldnBheVByb2R1Y3RDb2RlcyIgOiBu\ndWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAi\nMjAxMC0wOC0zMSIsCiAgImluc3RhbmNlSWQiIDogImktYWFhZjJkMWEiLAogICJiaWxsaW5nUHJv\nZHVjdHMiIDogbnVsbCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5zbWFsbCIsCiAgImFjY291bnRJ\nZCIgOiAiNzE2NzU2MTk5NTYyIiwKICAiaW1hZ2VJZCIgOiAiYW1pLWJjYmZmYWQ2IiwKICAicGVu\nZGluZ1RpbWUiIDogIjIwMTUtMTEtMThUMTk6MDE6MDRaIiwKICAia2VybmVsSWQiIDogbnVsbCwK\nICAicmFtZGlza0lkIiA6IG51bGwsCiAgImFyY2hpdGVjdHVyZSIgOiAieDg2XzY0IiwKICAicmVn\naW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAAAAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkw\nFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6\nb24gV2ViIFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0BCQMx\nCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNTExMTgxOTAxMThaMCMGCSqGSIb3DQEJBDEW\nBBRl2oC56YzkPa83VvQzeoMUqMElUzAJBgcqhkjOOAQDBC8wLQIUC/Ab91UXE/K7obsWxdj3DNx2\nKEsCFQCkBRpQBr8yeJQAzUx3Kd8VhwGyhQAAAAAAAA==", 3 | "dynamic/instance-identity/document": "{\n \"privateIp\" : \"10.196.24.63\",\n \"devpayProductCodes\" : null,\n \"availabilityZone\" : \"us-east-1a\",\n \"version\" : \"2010-08-31\",\n \"instanceId\" : \"i-aaaf2d1a\",\n \"billingProducts\" : null,\n \"instanceType\" : \"t2.small\",\n \"accountId\" : \"000000000\",\n \"imageId\" : \"ami-bcbffad6\",\n \"pendingTime\" : \"2015-11-18T19:01:04Z\",\n \"kernelId\" : null,\n \"ramdiskId\" : null,\n \"architecture\" : \"x86_64\",\n \"region\" : \"us-east-1\"\n}", 4 | "meta-data/public-hostname": "ec2-1-2-3-4.compute-1.amazonaws.com", 5 | "meta-data/public-ipv4": "1.2.3.4", 6 | "meta-data/reservation-id": "r-fake", 7 | "meta-data/security-groups": "fake-fake\nfoo-bar-baz", 8 | "meta-data/placement/availability-zone": "us-east-1a", 9 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/local-hostname": "ip-10-196-24-63.ec2.internal", 10 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/local-ipv4s": "10.196.24.63", 11 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/mac": "0e:9c:a1:fe:2f:ef", 12 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/owner-id": "716756199562", 13 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/public-hostname": "ec2-1-2-3-4.compute-1.amazonaws.com", 14 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/public-ipv4s": "1.2.3.4", 15 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/security-group-ids": "sg-fake1\nsg-fake2", 16 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/security-groups": "fake-fake\nfoo-bar-baz", 17 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/subnet-id": "subnet-fake", 18 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/subnet-ipv4-cidr-block": "10.196.24.0/25", 19 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/vpc-id": "vpc-fake", 20 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/vpc-ipv4-cidr-block": "10.196.0.0/18", 21 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/ipv4-associations/1.2.3.4": "10.196.24.63", 22 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/interface-id": "eni-fake", 23 | "meta-data/network/interfaces/macs/0e:9c:a1:fe:2f:ef/device-number": "0", 24 | "meta-data/mac": "0e:9c:a1:fe:2f:ef", 25 | "meta-data/local-ipv4": "10.196.24.63", 26 | "meta-data/local-hostname": "ip-10-196-24-63.ec2.internal", 27 | "meta-data/instance-type": "t2.small", 28 | "meta-data/instance-id": "i-aaaf2d1a", 29 | "meta-data/iam/security-credentials/fake-fake": "{\n \"Code\" : \"Success\",\n \"LastUpdated\" : \"2016-06-06T20:42:15Z\",\n \"Type\" : \"AWS-HMAC\",\n \"AccessKeyId\" : \"FAKE1234\",\n \"SecretAccessKey\" : \"foobarbazRandomBase64\",\n \"Token\" : \"nopenopenope=\",\n \"Expiration\" : \"2016-06-07T02:48:58Z\"\n}", 30 | "meta-data/hostname": "ip-10-196-24-63.ec2.internal", 31 | "meta-data/ami-id": "ami-bcbffad6" 32 | } 33 | -------------------------------------------------------------------------------- /test/data/s3/account/12345.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1.0, 3 | "properties": { 4 | "global": "global", 5 | "account": 123456789010, 6 | "region": "region", 7 | "vpc_id": "vpc", 8 | "produce": "product", 9 | "stack": "stack", 10 | "service": "service", 11 | "version": "version", 12 | "asg": "asg", 13 | "space": "spaz", 14 | "maxCassandraConnects": 1 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /test/data/s3/ami-bcbffad6.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1.0, 3 | "properties": { 4 | "baz": 3, 5 | "quiz": 4 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /test/data/s3/global.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1.0, 3 | "properties": { 4 | "global": "global", 5 | "account": "account", 6 | "region": "region", 7 | "vpc_id": "vpc", 8 | "produce": "product", 9 | "stack": "stack", 10 | "service": "service", 11 | "version": "version", 12 | "asg": "asg", 13 | "foo": "bar", 14 | "test": true, 15 | "maxCassandraConnects": 0 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /test/data/s3/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1.0, 3 | "sources": [ 4 | { 5 | "name": "global", 6 | "type": "s3", 7 | "parameters": { 8 | "path": "global.json" 9 | } 10 | }, 11 | { 12 | "name": "account", 13 | "type": "s3", 14 | "parameters": { 15 | "path": "account/12345.json" 16 | } 17 | }, 18 | { 19 | "name": "ami", 20 | "type": "s3", 21 | "parameters": { 22 | "path": "{{ instance:ami-id }}.json" 23 | } 24 | }, 25 | { 26 | "name": "iam-role", 27 | "type": "s3", 28 | "parameters": { 29 | "path": "role/{{ instance:iam-role }}.json" 30 | } 31 | }, 32 | { 33 | "name": "name-tag", 34 | "type": "s3", 35 | "parameters": { 36 | "path": "{{ instance:tags:Name }}.json" 37 | } 38 | }, 39 | { 40 | "name": "consul", 41 | "type": "consul" 42 | } 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /test/data/s3/role/fake-fake.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1.0, 3 | "properties": { 4 | "only.set.on.fake-fake": true 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /test/lib/helpers.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | global.Log = new (require('winston').Logger)(); 4 | global.Config = require('nconf'); 5 | 6 | Config.defaults({ 7 | // The S3 Source module uses some Config parameters as defaults 8 | index: { 9 | path: 'index.json', 10 | interval: 30000, 11 | region: 'us-east-1', 12 | bucket: 'fake-default-bucket-for-testing' 13 | }, 14 | tokend: { 15 | host: '127.0.0.1', 16 | port: 4500, 17 | interval: 10 18 | }, 19 | consul: { 20 | host: '127.0.0.1', 21 | port: 8500, 22 | secure: false 23 | } 24 | }); 25 | -------------------------------------------------------------------------------- /test/lib/stub/consul.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const EventEmitter = require('events').EventEmitter; 4 | 5 | const checks = require('../../data/consul-checks.json'); 6 | const nodes = require('../../data/consul-nodes.json'); 7 | const services = require('../../data/consul-catalog-services.json'); 8 | const health = require('../../data/consul-health-service.json'); 9 | 10 | class Watcher extends EventEmitter { 11 | constructor(data) { 12 | super(); 13 | this.data = data; 14 | } 15 | 16 | change() { 17 | this.emit('change', this.data); 18 | } 19 | 20 | error() { 21 | this.emit('error', new Error('This is a test error!')); 22 | } 23 | 24 | end() {} 25 | } 26 | exports.Watcher = Watcher; 27 | 28 | // Data mappings. These get passed as the `method` parameter of `watch` 29 | exports.health = { 30 | service: function service(options, callback) { 31 | const name = options.service; 32 | let results = health[name]; 33 | 34 | if (options.passing) { 35 | results = results.filter((node) => !!node.Checks.every((check) => check.Status === 'passing')); 36 | } 37 | 38 | setTimeout(function _() { 39 | callback(null, results); 40 | }, 150); 41 | }, 42 | state: checks 43 | }; 44 | 45 | // Method stubs 46 | exports.watch = function watch(options) { 47 | if (!options.method) { 48 | throw ReferenceError('No method provided for watcher!'); 49 | } 50 | 51 | return new Watcher(options.method); 52 | }; 53 | 54 | exports.catalog = { 55 | service: { 56 | list: function list(options, callback) { 57 | setTimeout(function _() { 58 | callback(null, services); 59 | }, 150); 60 | } 61 | }, 62 | node: { 63 | list: function list(callback) { 64 | // Simulate a little bit of network-service latency 65 | setTimeout(function _() { 66 | callback(null, nodes); 67 | }, 150); 68 | } 69 | } 70 | }; 71 | 72 | // Export some useful datasets to test against 73 | exports.data = { 74 | checks: { 75 | passing: checks.filter((check) => check.Status === 'passing'), 76 | warning: checks.filter((check) => check.Status === 'warning'), 77 | critical: checks.filter((check) => check.Status === 'critical') 78 | } 79 | }; 80 | -------------------------------------------------------------------------------- /test/lib/stub/source.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const Common = require('../../../dist/lib/source/common'); 4 | 5 | class Parser { 6 | constructor() { 7 | this.properties = {}; 8 | this.sources = []; 9 | } 10 | 11 | update(data) { 12 | this.properties = data.properties; 13 | this.sources = data.sources || []; 14 | } 15 | } 16 | 17 | class Stub extends Common(Parser) { 18 | constructor(name, opts) { 19 | // Inject defaults into options 20 | const options = Object.assign({ 21 | type: 'stub', 22 | delay: 250 + Math.floor(Math.random() * 250) 23 | }, opts); 24 | 25 | super('stub', options); 26 | this.delay = options.delay; 27 | this.name = name; 28 | this.properties = {}; 29 | } 30 | 31 | initialize() { 32 | const initialized = super.initialize(); 33 | 34 | // Simulate a network request 35 | setTimeout(() => { 36 | this._update({properties: this.properties}); 37 | }, this.delay); 38 | 39 | return initialized; 40 | } 41 | 42 | update(properties) { 43 | this._update({properties}); 44 | } 45 | 46 | error(err) { 47 | this._error(err || new Error('this is a stub error')); 48 | } 49 | 50 | recover() { 51 | this._update({properties: this.properties}); 52 | } 53 | } 54 | 55 | class NoExistStub extends Stub { 56 | constructor(properties, options) { 57 | super(properties, options); 58 | } 59 | 60 | initialize() { 61 | const initialized = super.initialize(); 62 | 63 | setImmediate(() => this._update(Common.NO_EXIST)); 64 | 65 | return initialized; 66 | } 67 | } 68 | 69 | class ErrorStub extends Stub { 70 | constructor(properties, options) { 71 | super(properties, options); 72 | } 73 | 74 | initialize() { 75 | const initialized = super.initialize(); 76 | 77 | setImmediate(() => this._error(new Error('This is a test error'))); 78 | 79 | return initialized; 80 | } 81 | 82 | } 83 | 84 | class PollingStub extends Common.Polling(Parser) { 85 | constructor(properties, options) { 86 | super('polling-stub', options); 87 | this.properties = properties; 88 | } 89 | 90 | _fetch(callback) { 91 | setImmediate(() => callback(null, {properties: this.properties})); 92 | } 93 | } 94 | 95 | class IndexStub extends Common(Parser) { 96 | constructor(sources) { 97 | super('index', {}); 98 | this.sources = sources; 99 | } 100 | 101 | initialize() { 102 | const initialized = super.initialize(); 103 | 104 | setImmediate(() => this._update({sources: this.sources})); 105 | 106 | return initialized; 107 | } 108 | 109 | update(sources) { 110 | this._update({sources}); 111 | } 112 | 113 | error(err) { 114 | this._error(err || new Error('this is a stub error')); 115 | } 116 | 117 | recover() { 118 | this._update({sources: this.sources}); 119 | } 120 | } 121 | 122 | // Wrap a namespace around the common module. This exposes 123 | // constants and helpers as one would expect, but protects the namespace 124 | // of the cached Common module. 125 | module.exports = class extends Common.Class {}; 126 | 127 | module.exports.Common = Common; 128 | module.exports.Stub = Stub; 129 | module.exports.NoExistStub = NoExistStub; 130 | module.exports.ErrorStub = ErrorStub; 131 | module.exports.PollingStub = PollingStub; 132 | module.exports.IndexStub = IndexStub; 133 | -------------------------------------------------------------------------------- /test/logger.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('should'); 4 | const Winston = require('winston'); 5 | 6 | class ConfigLike { 7 | constructor(data) { 8 | this.data = data; 9 | } 10 | 11 | get(str) { 12 | return this.data[str]; 13 | } 14 | } 15 | 16 | describe('Logging', () => { 17 | const config = new ConfigLike({ 18 | 'log:level': 'info', 19 | 'log:access:level': 'verbose' 20 | }); 21 | const log = require('../dist/lib/logger').attach(config.get('log:level')); 22 | 23 | it('returns a WINSTON object', () => { 24 | log.should.be.an.instanceOf(Winston.Logger); 25 | }); 26 | 27 | it('sets the log level correctly', () => { 28 | log.level.should.be.exactly('INFO'); 29 | }); 30 | 31 | describe('File logging', () => { 32 | const fileLog = require('../dist/lib/logger').attach('INFO', 'tmp.log'); 33 | 34 | fileLog.remove(Winston.transports.Console); 35 | 36 | it('writes to the correct file', (done) => { 37 | fileLog.on('logging', (transport, level, msg) => { 38 | transport.name.should.equal('file'); 39 | transport.filename.should.equal('tmp.log'); 40 | msg.should.equal('Test logging message'); 41 | done(); 42 | }); 43 | 44 | fileLog.log('INFO', 'Test logging message'); 45 | }); 46 | 47 | it('optionally logs to a file', () => { 48 | const logger = require('../dist/lib/logger').attach('info'); 49 | 50 | Object.keys(logger.transports).should.eql(['console']); 51 | }); 52 | 53 | it('displays a deprecation warning when instantiating a file logger', (done) => { 54 | process.on('deprecation', (err) => { 55 | err.name.should.equal('DeprecationError'); 56 | err.namespace.should.equal('propsd'); 57 | err.message.should.equal('The file transport has been deprecated and will be removed in a later version'); 58 | done(); 59 | }); 60 | 61 | require('../dist/lib/logger').attach('info', 'tmp.log'); 62 | }); 63 | }); 64 | }); 65 | -------------------------------------------------------------------------------- /test/source-common.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('./lib/helpers'); 4 | 5 | const Source = require('./lib/stub/source'); 6 | const expect = require('chai').expect; 7 | const nock = require('nock'); 8 | 9 | describe('Source/Common', function () { 10 | before(function () { 11 | nock.disableNetConnect(); 12 | }); 13 | 14 | after(function () { 15 | nock.enableNetConnect(); 16 | }); 17 | 18 | it('sets configurable parameters from constructor options', function () { 19 | // Create some references to test against 20 | const testParser = {}; 21 | 22 | const stub = new Source.Stub('stub', { 23 | parser: testParser 24 | }); 25 | 26 | expect(stub.name).to.equal('stub'); 27 | expect(stub.type).to.equal('stub'); 28 | 29 | expect(stub.parser).to.equal(testParser); 30 | }); 31 | 32 | it('initialize returns a promise', function () { 33 | const source = new Source.Stub(); 34 | 35 | expect(source.initialize()).to.be.instanceOf(Promise); 36 | }); 37 | 38 | it('initialized promise resolves when a response is received', function () { 39 | const source = new Source.Stub(); 40 | 41 | return source.initialize() 42 | .then(() => { 43 | expect(source.state).to.equal(Source.RUNNING); 44 | }); 45 | }); 46 | 47 | it('initialized promise resolves when a NO_EXIST is received', function () { 48 | const source = new Source.NoExistStub(); 49 | 50 | return source.initialize().then(() => { 51 | expect(source.state).to.equal(Source.WAITING); 52 | }); 53 | }); 54 | 55 | it('initialized promise resolves when an error is received', function () { 56 | const source = new Source.ErrorStub(); 57 | 58 | return source.initialize().then(() => { 59 | expect(source.state).to.equal(Source.ERROR); 60 | }); 61 | }); 62 | 63 | it('should shuts down cleanly', function () { 64 | const source = new Source.Stub(); 65 | 66 | return source.initialize() 67 | .then(function () { 68 | expect(source.state).to.eql(Source.RUNNING); 69 | return Promise.resolve(source.shutdown()); 70 | }) 71 | .then(function () { 72 | expect(source.state).to.eql(Source.SHUTDOWN); 73 | expect(source._state).to.eql(null); 74 | }); 75 | }); 76 | 77 | it('handles and emits errors from the underlying resource', function (done) { 78 | const source = new Source.ErrorStub(); 79 | 80 | source.once('error', (err) => { 81 | expect(err).to.be.instanceOf(Error); 82 | expect(source.state).to.equal(Source.ERROR); 83 | 84 | source.once('update', () => { 85 | expect(source.state).to.equal(Source.RUNNING); 86 | done(); 87 | }); 88 | source._update({}); 89 | }); 90 | 91 | source.initialize(); 92 | }); 93 | 94 | it('fakes inheritance checks through the Source Factory methods', function () { 95 | expect(new Source.Stub()).to.be.instanceOf(Source.Common); 96 | expect(new Source.PollingStub()).to.be.instanceOf(Source.Common); 97 | expect(new Source.PollingStub()).to.be.instanceOf(Source.Common.Polling); 98 | }); 99 | 100 | it('clears properties and state on NO_EXIST when INITIALIZING', function () { 101 | const source = new Source.Stub({ key: 'value' }); 102 | 103 | source.state = Source.Common.INITIALIZING; 104 | source._state = 'non-null-value'; 105 | 106 | source._update(Source.Common.NO_EXIST); 107 | 108 | expect(source.state).to.eql(Source.Common.WAITING); 109 | expect(source.properties).to.eql({}); 110 | expect(source._state).to.eql(null); 111 | }); 112 | 113 | it('clears properties and state on NO_EXIST when RUNNING', function () { 114 | const source = new Source.Stub({ key: 'value' }); 115 | 116 | source.state = Source.Common.RUNNING; 117 | source._state = 'non-null-value'; 118 | 119 | source._update(Source.Common.NO_EXIST); 120 | 121 | expect(source.state).to.eql(Source.Common.WAITING); 122 | expect(source.properties).to.eql({}); 123 | expect(source._state).to.eql(null); 124 | }); 125 | 126 | it('clears properties and state on NO_EXIST when WARNING', function () { 127 | const source = new Source.Stub({ key: 'value' }); 128 | 129 | source.state = Source.Common.WARNING; 130 | source._state = 'non-null-value'; 131 | 132 | source._update(Source.Common.NO_EXIST); 133 | 134 | expect(source.state).to.eql(Source.Common.WAITING); 135 | expect(source.properties).to.eql({}); 136 | expect(source._state).to.eql(null); 137 | }); 138 | 139 | it('clears properties and state on NO_EXIST when ERROR', function () { 140 | const source = new Source.Stub({ key: 'value' }); 141 | 142 | source.state = Source.Common.ERROR; 143 | source._state = 'non-null-value'; 144 | 145 | source._update(Source.Common.NO_EXIST); 146 | 147 | expect(source.state).to.eql(Source.Common.WAITING); 148 | expect(source.properties).to.eql({}); 149 | expect(source._state).to.eql(null); 150 | }); 151 | 152 | it('clears properties and state on NO_EXIST when WAITING', function () { 153 | const source = new Source.Stub({ key: 'value' }); 154 | 155 | source.state = Source.Common.WAITING; 156 | source._state = 'non-null-value'; 157 | 158 | source._update(Source.Common.NO_EXIST); 159 | 160 | expect(source.state).to.eql(Source.Common.WAITING); 161 | expect(source.properties).to.eql({}); 162 | expect(source._state).to.eql(null); 163 | }); 164 | 165 | describe('Polling', function () { 166 | it('sets an interval', function () { 167 | const stub = new Source.PollingStub({}, { 168 | interval: 42 169 | }); 170 | 171 | expect(stub.interval).to.equal(42); 172 | }); 173 | 174 | it('starts a timer when initialized', function (done) { 175 | const stub = new Source.PollingStub(); 176 | 177 | stub.initialize().then(() => { 178 | expect(stub._timer).to.be.an('object'); 179 | stub.shutdown(); 180 | 181 | done(); 182 | }); 183 | }); 184 | 185 | it('only creates one timer if initialized multiple times', function () { 186 | const stub = new Source.PollingStub(); 187 | 188 | return stub.initialize().then(() => { 189 | expect(stub._timer).to.be.an('object'); 190 | 191 | const firstTimer = stub._timer; 192 | 193 | return stub.initialize().then(() => { 194 | expect(stub._timer).to.equal(firstTimer); 195 | 196 | stub.shutdown(); 197 | }); 198 | }); 199 | }); 200 | 201 | it('clears its timer when shutdown', function () { 202 | const stub = new Source.PollingStub(); 203 | 204 | return stub.initialize().then(() => { 205 | expect(stub._timer).to.be.an('object'); 206 | 207 | stub.shutdown(); 208 | expect(stub._timer).to.equal(undefined); 209 | }); 210 | }); 211 | }); 212 | }); 213 | -------------------------------------------------------------------------------- /test/string-template.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const should = require('should'); 4 | const StringTemplate = require('../dist/lib/string-template'); 5 | 6 | describe('StringTemplate', () => { 7 | let string = 'The {{jumper:adjective}} {{ jumper:color }} {{jumper:animal}}'; 8 | 9 | string += ' jumped {{ preposition }} the... What ever, I\'m hungery'; 10 | 11 | const expected = 'The quick red fox jumped over the... What ever, I\'m hungery'; 12 | const notATemplate = 'This isn\'t a template'; 13 | 14 | const undefinedSubstitution = 'This template references {{ an:invalid }} variable!'; 15 | 16 | const scope = { 17 | jumper: { 18 | adjective: 'quick', 19 | color: 'red', 20 | animal: 'fox' 21 | }, 22 | preposition: 'over' 23 | }; 24 | 25 | const template = new StringTemplate(string, scope); 26 | 27 | it('detects valid template strings', () => { 28 | StringTemplate.isTemplate(string).should.equal(true); 29 | }); 30 | 31 | it('detects invalid template strings', () => { 32 | StringTemplate.isTemplate(notATemplate).should.equal(false); 33 | }); 34 | 35 | it('toJSON converts to a string correctly', () => { 36 | template.toJSON().should.equal(expected); 37 | }); 38 | 39 | it('substitutes values into a template string correctly', () => { 40 | template.toString().should.equal(expected); 41 | }); 42 | 43 | it('throws a ReferenceError if a substitutes references an undefined variable', () => { 44 | const throwsAnError = new StringTemplate(undefinedSubstitution, scope); 45 | 46 | should.throws(() => throwsAnError.toString(), ReferenceError); 47 | }); 48 | 49 | it('correctly substitutes a template string if given valid properties', () => { 50 | const t = StringTemplate.coerce('{{foo:bar}}', {foo: {bar: 'baz'}}); 51 | 52 | t.should.equal('baz'); 53 | }); 54 | 55 | it('returns the original string if it\'s not a valid template', () => { 56 | const t = StringTemplate.coerce('foo:bar}}', {foo: 1}); 57 | 58 | t.should.equal('foo:bar}}'); 59 | }); 60 | 61 | it('iterates through a deep object and substitutes template values', () => { 62 | const _scope = { 63 | watermelon: 'test', 64 | yo: {lo: 'slap'} 65 | }; 66 | const _template = { 67 | value: 'this is a {{ watermelon }}', 68 | complex: Promise.resolve(), 69 | node: { 70 | list: ['of', { 71 | objects: '{{yo:lo}}' 72 | }] 73 | } 74 | }; 75 | 76 | const rendered = StringTemplate.render(_template, _scope); 77 | 78 | rendered.value.should.equal('this is a test'); 79 | rendered.complex.should.equal(_template.complex); 80 | rendered.node.list.should.containDeep(['of', {objects: 'slap'}]); 81 | }); 82 | }); 83 | -------------------------------------------------------------------------------- /test/tags.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('./lib/helpers'); 4 | 5 | const expect = require('chai').expect; 6 | const AWS = require('aws-sdk-mock'); 7 | const AWS_SDK = require('aws-sdk'); 8 | const Tags = require('../dist/lib/source/tags'); 9 | const Parser = require('../dist/lib/source/tags/parser'); 10 | const sinon = require('sinon'); 11 | const nock = require('nock'); 12 | 13 | const metadataPaths = require('./data/metadata-paths.json'); 14 | const tagValues = { 15 | Tags: [ 16 | { Key: 'Name', Value: 'service-name', ResourceType: 'instance', ResourceId: 'i-instanceid' }, 17 | { Key: 'Service', Value: 'service-type', ResourceType: 'instance', ResourceId: 'i-instanceid' }, 18 | { Key: 'This tag', Value: 'a value', ResourceType: 'instance', ResourceId: 'i-instanceid' }, 19 | { Key: 'Another tag', Value: 'some other value', ResourceType: 'instance', ResourceId: 'i-instanceid' } 20 | ] 21 | }; 22 | 23 | describe('Tags source plugin', function () { 24 | 25 | let metadataServiceSpy = sinon.spy(); 26 | let ec2Spy = sinon.spy(); 27 | 28 | before(function () { 29 | nock.disableNetConnect(); 30 | }); 31 | 32 | after(function () { 33 | nock.cleanAll(); 34 | nock.enableNetConnect(); 35 | }); 36 | 37 | beforeEach(function () { 38 | AWS.setSDKInstance(AWS_SDK); 39 | }); 40 | 41 | afterEach(function () { 42 | AWS.restore(); 43 | metadataServiceSpy.reset(); 44 | ec2Spy.reset(); 45 | }); 46 | 47 | 48 | 49 | it('should parses tags into a useful object', function () { 50 | const parser = new Parser(); 51 | 52 | parser.update(tagValues); 53 | expect(parser.properties.Name).to.be.a('string'); 54 | expect(parser.properties.Service).to.be.a('string'); 55 | expect(parser.properties['This tag']).to.be.a('string'); 56 | expect(parser.properties['Another tag']).to.be.a('string'); 57 | 58 | expect(parser.properties.Name).to.equal('service-name'); 59 | expect(parser.properties.Service).to.equal('service-type'); 60 | expect(parser.properties['This tag']).to.equal('a value'); 61 | expect(parser.properties['Another tag']).to.equal('some other value'); 62 | }); 63 | 64 | it('handles errors from the AWS Metadata SDK gracefully by not exposing the property', function () { 65 | AWS.mock('MetadataService', 'request', (path, callback) => { 66 | metadataServiceSpy(); 67 | callback(new Error('some error from the AWS SDK'), null); 68 | }); 69 | const source = new Tags({ 70 | interval: 100 71 | }); 72 | 73 | return source.initialize() 74 | .then(function () { 75 | expect(metadataServiceSpy.called).to.be.true; 76 | expect(source.properties).to.be.a('object'); 77 | expect(source.properties).to.be.empty; 78 | }); 79 | }); 80 | 81 | it('should handle errors from the AWS EC2 SDK gracefully by not exposing the property', function () { 82 | AWS.mock('MetadataService', 'request', (path, callback) => { 83 | metadataServiceSpy(); 84 | callback(null, metadataPaths[path]); 85 | }); 86 | 87 | AWS.mock('EC2', 'describeTags', (path, callback) => { 88 | ec2Spy(); 89 | callback(new Error('some error from the AWS SDK'), null); 90 | }); 91 | 92 | const source = new Tags({ 93 | interval: 100 94 | }); 95 | 96 | return source.initialize() 97 | .then(function () { 98 | expect(metadataServiceSpy.called).to.be.true; 99 | expect(ec2Spy.called).to.be.true; 100 | expect(source.properties).to.be.a('object'); 101 | expect(source.properties).to.be.empty; 102 | }); 103 | }); 104 | 105 | it('should periodically fetches tag data', function () { 106 | // this.timeout(2500); 107 | 108 | // Stub the AWS.MetadataService request method 109 | AWS.mock('MetadataService', 'request', (path, callback) => { 110 | metadataServiceSpy(); 111 | callback(null, metadataPaths[path]); 112 | }); 113 | 114 | AWS.mock('EC2', 'describeTags', (path, callback) => { 115 | ec2Spy(); 116 | callback(null, tagValues); 117 | }); 118 | 119 | const source = new Tags({ 120 | interval: 100 121 | }); 122 | 123 | let ec2InitialCount; 124 | 125 | return source.initialize() 126 | .then(function () { 127 | ec2InitialCount = ec2Spy.callCount 128 | expect(source.properties.Name).to.be.a('string'); 129 | expect(source.properties.Service).to.be.a('string'); 130 | expect(source.properties['This tag']).to.be.a('string'); 131 | expect(source.properties['Another tag']).to.be.a('string'); 132 | 133 | expect(source.properties.Name).to.equal('service-name'); 134 | expect(source.properties.Service).to.equal('service-type'); 135 | expect(source.properties['This tag']).to.equal('a value'); 136 | expect(source.properties['Another tag']).to.equal('some other value'); 137 | }) 138 | .then(function () { 139 | return new Promise((resolve) => { setTimeout(resolve, 1000) }) 140 | }) 141 | .then(function () { 142 | expect(ec2Spy.callCount).to.be.above(ec2InitialCount); 143 | }); 144 | }); 145 | }); 146 | -------------------------------------------------------------------------------- /test/tokend-client.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('./lib/helpers'); 4 | 5 | const expect = require('chai').expect; 6 | const nock = require('nock'); 7 | const TokendClient = require('../dist/lib/transformers/tokend-client'); 8 | 9 | describe('TokendClient', function () { 10 | let _client = null; 11 | 12 | beforeEach(function () { 13 | nock.cleanAll(); 14 | nock.enableNetConnect(); 15 | if (_client) { 16 | _client.shutdown(); 17 | } 18 | }); 19 | 20 | afterEach(function () { 21 | nock.disableNetConnect(); 22 | if (_client) { 23 | _client.shutdown(); 24 | } 25 | }); 26 | 27 | it('finds Tokend on 127.0.0.1:4500 by default', function () { 28 | _client = new TokendClient(); 29 | 30 | expect(_client._host).to.equal('127.0.0.1'); 31 | expect(_client._port).to.equal(4500); 32 | }); 33 | 34 | it('allows Tokend to be found on a non-default host:port', function () { 35 | _client = new TokendClient({ 36 | host: 'token.d', 37 | port: 2600 38 | }); 39 | 40 | expect(_client._host).to.equal('token.d'); 41 | expect(_client._port).to.equal(2600); 42 | }); 43 | 44 | it('only calls Tokend once for each generic secret', function (done) { 45 | // Nock clears a response after it's requested. 46 | // Processing the same secret more than once will fail when tokend.done() is called. 47 | const tokend = nock('http://127.0.0.1:4500') 48 | .get('/v1/secret/default/kali/root/password') 49 | .reply(200, { 50 | plaintext: 'toor' 51 | }); 52 | 53 | _client = new TokendClient(); 54 | 55 | const secret1 = _client.get('/v1/secret/default/kali/root/password'); 56 | const secret2 = _client.get('/v1/secret/default/kali/root/password'); 57 | 58 | Promise.all([secret1, secret2]).then((secrets) => { 59 | secrets.forEach((secret) => { 60 | expect(secret).to.eql({ 61 | plaintext: 'toor' 62 | }); 63 | }); 64 | 65 | tokend.done(); 66 | done(); 67 | }).catch(done); 68 | }); 69 | 70 | it('emits "update" events when generic secrets in Tokend change', function (done) { 71 | // Nock clears a response after it's requested. 72 | const tokend = nock('http://127.0.0.1:4500') 73 | 74 | // First request comes from TokendClient.get call 75 | .get('/v1/secret/default/kali/root/password') 76 | .reply(200, { 77 | plaintext: 'toor' 78 | }) 79 | 80 | // Second request comes from timer to check for changes 81 | .get('/v1/secret/default/kali/root/password') 82 | .reply(200, { 83 | plaintext: 'myvoiceismypassword' 84 | }); 85 | 86 | _client = new TokendClient(); 87 | 88 | _client.initialize().then(() => { 89 | // First request will resolve with the original secret. 90 | _client.get('/v1/secret/default/kali/root/password').then((originalSecret) => { 91 | expect(originalSecret).to.eql({ 92 | plaintext: 'toor' 93 | }); 94 | 95 | // "update" will have fired once from the initialization; watch for subsequent update polling. 96 | _client.once('update', () => { 97 | // Second request should resolve with the new secret. 98 | _client.get('/v1/secret/default/kali/root/password').then((updatedSecret) => { 99 | expect(updatedSecret).to.eql({ 100 | plaintext: 'myvoiceismypassword' 101 | }); 102 | 103 | tokend.done(); 104 | done(); 105 | }).catch(done); 106 | }); 107 | }).catch(done); 108 | }).catch(done); 109 | }); 110 | 111 | it('only calls Tokend once for each transit secret', function (done) { 112 | // Nock clears a response after it's requested. 113 | // Processing the same secret more than once will fail when tokend.done() is called. 114 | const tokend = nock('http://127.0.0.1:4500') 115 | .post('/v1/transit/default/decrypt', { 116 | key: 'kali', 117 | ciphertext: 'gbbe' 118 | }) 119 | .reply(200, { 120 | plaintext: 'toor' 121 | }); 122 | 123 | _client = new TokendClient(); 124 | 125 | const secret1 = _client.post('/v1/transit/default/decrypt', { key: 'kali', ciphertext: 'gbbe' }); 126 | const secret2 = _client.post('/v1/transit/default/decrypt', { key: 'kali', ciphertext: 'gbbe' }); 127 | 128 | Promise.all([secret1, secret2]).then((secrets) => { 129 | secrets.forEach((secret) => { 130 | expect(secret).to.eql({ 131 | plaintext: 'toor' 132 | }); 133 | }); 134 | 135 | tokend.done(); 136 | done(); 137 | }).catch(done); 138 | }); 139 | 140 | it('provides a method for clearing the request cache', function () { 141 | const tokend = nock('http://127.0.0.1:4500') 142 | .post('/v1/transit/default/decrypt', { 143 | key: 'kali', 144 | ciphertext: 'gbbe' 145 | }) 146 | .reply(200, { 147 | plaintext: 'toor' 148 | }); 149 | 150 | _client = new TokendClient(); 151 | 152 | const keyId = '/v1/transit/default/decrypt.kali.gbbe'; 153 | 154 | return _client.post('/v1/transit/default/decrypt', { key: 'kali', ciphertext: 'gbbe' }).then(() => { 155 | const postRequestQueue = _client._pendingPostRequests; 156 | 157 | expect(Object.keys(postRequestQueue).length).to.equal(1); 158 | expect(postRequestQueue[keyId]).to.be.an.instanceof(Promise); 159 | 160 | _client.clearCacheAtKey('POST', keyId); 161 | 162 | expect(Object.keys(postRequestQueue).length).to.equal(0); 163 | 164 | tokend.done(); 165 | }); 166 | }); 167 | 168 | it('throws an error if attempting to clear a non-existent cache', function () { 169 | const tokend = nock('http://127.0.0.1:4500') 170 | .post('/v1/transit/default/decrypt', { 171 | key: 'kali', 172 | ciphertext: 'gbbe' 173 | }) 174 | .reply(200, { 175 | plaintext: 'toor' 176 | }); 177 | 178 | _client = new TokendClient(); 179 | 180 | const keyId = '/v1/transit/default/decrypt.kali.gbbe'; 181 | 182 | return _client.post('/v1/transit/default/decrypt', { key: 'kali', ciphertext: 'gbbe' }).then(() => { 183 | expect(() => _client.clearCacheAtKey('HEAD', keyId)).to.throw(Error, 'A HEAD request does not map to an' + 184 | ' existing cache.'); 185 | tokend.done(); 186 | }); 187 | }); 188 | }); 189 | -------------------------------------------------------------------------------- /test/util.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const should = require('should'); 4 | const util = require('../dist/lib/util'); 5 | 6 | describe('Util/getNestedProperty', function() { 7 | const getNestedProperty = util.getNestedProperty; 8 | const someObject = { 9 | some: { 10 | ips: [1, 2, 3, 4] 11 | }, 12 | cool: { 13 | tacos: { 14 | types: ['carnitas', 'al pastor', 'steak', 'chicken', 'barbacoa', 'lengua'], 15 | meats: ['pork', 'chicken', 'beef'], 16 | status: 'delicious' 17 | } 18 | }, 19 | object: { 20 | with: { 21 | nested: { 22 | keys: 'foobar' 23 | } 24 | } 25 | } 26 | }; 27 | 28 | it('retrieves a nested value', function() { 29 | getNestedProperty(someObject, ['cool', 'tacos', 'status']).should.equal('delicious'); 30 | getNestedProperty(someObject, 'object.with.nested.keys'.split('.')).should.equal('foobar'); 31 | }); 32 | 33 | it('returns the correct type when the nested path ends', function() { 34 | getNestedProperty(someObject, 'cool.tacos.types'.split('.')).should.be.an.Array(); 35 | getNestedProperty(someObject, 'object.with'.split('.')).should.be.an.Object(); 36 | }); 37 | 38 | it('retrieves a nested value at an arbitrary level', function() { 39 | getNestedProperty(someObject, ['cool']).should.eql({ 40 | tacos: { 41 | types: ['carnitas', 'al pastor', 'steak', 'chicken', 'barbacoa', 'lengua'], 42 | meats: ['pork', 'chicken', 'beef'], 43 | status: 'delicious' 44 | } 45 | }); 46 | 47 | getNestedProperty(someObject, 'object.with'.split('.')).should.eql({ 48 | nested: { 49 | keys: 'foobar' 50 | } 51 | }); 52 | }); 53 | 54 | it('throws a TypeError with info if a key doesn\'t exist', function() { 55 | should.throws(() => { 56 | getNestedProperty(someObject, 'cool.burgers.status'.split('.')); 57 | }, TypeError, `Key 'burgers' does not exist in object ${JSON.stringify(someObject.cool)}`); 58 | }); 59 | }); 60 | -------------------------------------------------------------------------------- /test/utils/s3-stub.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const proxyquire = require('proxyquire'); 4 | 5 | module.exports = function generateS3Proxy(stubs) { 6 | return proxyquire('../../dist/lib/source/s3', { 7 | 'aws-sdk': { 8 | S3: function constructor() { 9 | return stubs; 10 | } 11 | } 12 | }); 13 | }; 14 | --------------------------------------------------------------------------------