├── .gitignore
├── .travis.yml
├── CONTRIBUTING.md
├── LICENSE
├── NOTES.md
├── NOTICE
├── README.md
├── apex
├── functions
│ ├── create_cfg
│ │ ├── .apexignore
│ │ └── main.go
│ ├── delete_cfg
│ │ ├── .apexignore
│ │ └── main.go
│ ├── delete_key
│ │ ├── .apexignore
│ │ └── main.go
│ ├── get_key
│ │ ├── .apexignore
│ │ └── main.go
│ ├── info_cfg
│ │ ├── .apexignore
│ │ └── main.go
│ ├── set_key
│ │ ├── .apexignore
│ │ └── main.go
│ └── update_cfg
│ │ ├── .apexignore
│ │ └── main.go
├── infrastructure
│ ├── dev
│ │ ├── main.tf
│ │ └── variables.tf
│ └── modules
│ │ ├── api_gateway
│ │ ├── api-gateway.tf
│ │ ├── api_gateway_body_mapping.template
│ │ └── variables.tf
│ │ ├── dynamodb
│ │ ├── dynamodb.tf
│ │ └── variables.tf
│ │ └── iam
│ │ ├── api-gateway-iam.tf
│ │ ├── iam.tf
│ │ └── outputs.tf
└── project.json
├── commands
├── commands.go
├── commands_test.go
├── util.go
└── util_test.go
├── config
├── config.go
├── status.go
└── status_test.go
├── docs
├── TODO.md
└── logo.png
├── main.go
├── main_test.go
├── server
├── main.go
└── v1.go
├── storage
├── README.md
├── dynamodb
│ └── dynamodb.go
├── mockdb
│ └── mock.go
├── storage.go
└── storage_test.go
└── version
└── version.go
/.gitignore:
--------------------------------------------------------------------------------
1 | discfg
2 | .discfg
3 | *.tfstate
4 | *.tfstate.backup
5 | .terraform
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | # https://docs.travis-ci.com/user/languages/go
2 | language: go
3 |
4 | go:
5 | - 1.6
6 |
7 | before_install:
8 | - go get github.com/golang/lint/golint
9 | # - go get github.com/mattn/goveralls
10 |
11 | matrix:
12 | fast_finish: true
13 |
14 | script:
15 | - go vet -x ./...
16 | - $HOME/gopath/bin/golint ./...
17 | - go test -v ./...
18 | # - go test -covermode=count -coverprofile=profile.cov .
19 |
20 | #after_script:
21 | # - $HOME/gopath/bin/goveralls -coverprofile=profile.cov -service=travis-ci
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | When contributing to this repository, please first discuss the change you wish to make via issue,
4 | email, or any other method with the owners of this repository before making a change.
5 |
6 | If you are working on an open issue, please be sure to test the code to the best of your ability.
7 | While test cases are not required as part of contributing, they are always helpful.
8 |
9 | Please note this project's simple code of conduct, please follow it in all your interactions.
10 |
11 | Also note that not every pull request or issue can always be addressed. The project maintainers
12 | reserve the right to ignore, close, or move any issue or pull request. Especially if requests
13 | are not in line with the goals of the tool and its authors. That said, feedback and suggestions
14 | are vital to any open-source project and are _always appreciated_.
15 |
16 | The simple reality of it is that we all have jobs and can't always be super responsive.
17 | Please do not take anything personally.
18 |
19 | ## Pull Request Process
20 |
21 | 1. Be sure to sign the [CLA](https://cla-assistant.io/tmaiaroto/discfg)
22 | 2. Ensure any code is accompanied by a test case should it make sense.
23 | 3. Ensure your contribution does not break any existing test cases.
24 | 4. If it makes sense, update the README.md with details of changes should there be a breaking
25 | change to the brief documentation within it or if new information makes sense.
26 | 5. Increase the version number in `version/version.go`. The versioning scheme we use is [SemVer](http://semver.org/).
27 | 6. You may merge the Pull Request in once you have the sign-off of another developer, or if you
28 | do not have permission to do that, you may request someone to merge it for you.
29 |
30 | ## Code of Conduct
31 |
32 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers
33 | support a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity,
34 | gender identity and expression, level of experience, nationality, personal appearance, race, religion,
35 | or sexual identity and orientation.
36 |
37 | Be excellent to each other. It's not that hard folks.
38 |
39 | ### Unacceptable Behavior
40 |
41 | Examples of unacceptable behavior by participants include:
42 |
43 | * The use of sexualized language or imagery and unwelcome sexual attention or
44 | advances
45 | * Trolling, insulting/derogatory comments, and personal or political attacks
46 | * Public or private harassment
47 | * Publishing others' private information, such as a physical or electronic
48 | address, without explicit permission
49 | * Other conduct which could reasonably be considered inappropriate in a
50 | professional setting
51 |
52 | ### Our Responsibilities & Enforcement
53 |
54 | Project maintainers have the right and responsibility to remove, edit, or
55 | reject comments, commits, code, wiki edits, issues, and other contributions
56 | that are not aligned to this Code of Conduct, or to ban temporarily or
57 | permanently any contributor for other behaviors that they deem inappropriate,
58 | threatening, offensive, or harmful.
59 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
--------------------------------------------------------------------------------
/NOTES.md:
--------------------------------------------------------------------------------
1 | Just some notes with thoughts to be figured out.
2 |
3 | ## Think about using DynamoDB streams.
4 | It says "near real time" ... So I'm a bit skeptical. Though it has a lot of convenience. It will show the old version of the item when modified.
5 | However, it only keeps history for 24hrs (or longer, but no guarantees). So it's not exatly going to be great for getting a whole history of modifications.
6 |
7 | DynamoDB streams conveniently integrate with Lambda though. So we could get the notifications pushed out in a very easy way by using them with the triggers feature.
8 |
9 | discfg does not have a listener like etcd, but it could have a notification service. Of course a notification service can still be built into discfg even
10 | without the use of streams and triggers.
11 |
12 | Might be an interesting configurable option.
13 |
14 | UPDATE on this: Definitely going to use Kinesis streams. It's really going to make for an interesting feature.
15 |
16 | ## JSON Support
17 |
18 | DynamoDB is supposed to support JSON and querying into objects.
19 | http://www.allthingsdistributed.com/2014/10/document-model-dynamodb.html
20 |
21 | How? Can it be used for to setup a tree hiearchy for keys? Or would that mean one config document period? I'd rather have multiple items for the config
22 | because of size restrictions in DynamoDB. Though that was bumped up to 400kb apparently. Which is pretty darn big but still.
23 |
24 | But then I go and read: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html and it says JSON is a used as a transport
25 | protocol only.
26 |
27 | UPDATE on this: Now storing binary data for so many reasons. JSON can be stored and Go handles with json.RawMessage() when possible.
28 | However, there was no querying within the JSON.
29 |
30 | ## Snowflakes
31 |
32 | One approach I thought about taking was to use snowflake ids. These are sequential (mostly) and that kept the use of DynamoDB as append only.
33 | Only new values would be added and the snowflake could be used as a RANGE key which would help distribute data well.
34 |
35 | The challenge here is some of the benefits of DynamoDB would have been lost. I would have needed to re-implement certain features that I would
36 | otherwise get for free.
37 |
38 | There would be more queries as a result and this would mean more DynamoDB utilization which would mean more cost. ...Which would go against
39 | the goals of the project.
40 |
41 | I love the idea of append only. I dislike updates. The cool thing is the query would sort by this RANGE key part and by limiting the results
42 | to just one, it would always return the latest. But at this point I don't want to think about things like expiring old items and making additional
43 | queries for conditional updates.
44 |
45 | UPDATE on this: I don't think this is worth the tradeoffs.
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | discfg
2 | Copyright 2016 Tom Maiaroto
3 |
4 | This product includes software developed by Tom Maiaroto.
5 | (https://github.com/tmaiaroto/discfg).
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | [](https://github.com/tmaiaroto/discfg/blob/master/LICENSE) [](http://godoc.org/github.com/tmaiaroto/discfg) [](https://travis-ci.org/tmaiaroto/discfg) [](https://goreportcard.com/report/github.com/tmaiaroto/discfg)
4 |
5 | A serverless and distributed (key/value) configuration tool built on top of Amazon Web Services. Specifically,
6 | it aims to use Lambda, DyanmoDB, and API Gateway. Though it can be used with other services.
7 |
8 | It can install to your system as a binary, so managing configuration from any machine is simple from
9 | a command line. You can also work with configurations via RESTful API.
10 |
11 | ### Command Line Interface
12 |
13 | Assuming you built and/or installed the ```discfg``` binary and you have your AWS credentials under ```~/.aws```
14 | because you've used the AWS CLI tool before and configured them...
15 |
16 | ```
17 | ./discfg create mycfg
18 | ./discfg use mycfg
19 | ./discfg set mykey '{"json": "works"}'
20 | ./discfg set mykey -d file.json
21 | ```
22 |
23 | That first command creates a configuration for you (a table in DynamoDB - US East region by default).
24 | After that you can set keys from values passed to the CLI or from a file on disk. All values ultimately
25 | get stored as binary data, so you could even store (small - DynamoDB size limits) files if you really
26 | wanted to; images, maybe icons, for example.
27 |
28 | Note: If you did not want to call the ```use``` command or if you need to work with multiple configurations,
29 | you can always get and set keys by passing the configuration name. So the following ```set``` command is
30 | the same as the one above:
31 |
32 | ```
33 | ./discfg set mycfg mykey '{"json": "works"}'
34 | ```
35 |
36 | Also note that the slash is optional. All keys without a forward slash will have one prepended automatically.
37 | That is to say they will be at the root level. Now to retrieve this value:
38 |
39 | ```
40 | ./discfg get mykey
41 | ```
42 |
43 | To retrieve the value as a JSON response run (and jq is handy here; https://stedolan.github.io/jq):
44 |
45 | ```
46 | ./discfg get mykey -f json
47 | ```
48 |
49 | You should see something like this:
50 |
51 | ```
52 | {
53 | "action": "get",
54 | "item": {
55 | "version": 2,
56 | "key": "mykey",
57 | "value": {
58 | "foo": "test"
59 | }
60 | },
61 | "prevItem": {}
62 | }
63 | ```
64 |
65 | NOTE: You will only see ```prevItem``` populated upon an update. discfg does not store a history
66 | of item values.
67 |
68 | ### Serverless API
69 |
70 | The serverless API was built using the [Apex](http://apex.run/) framework along with [Terraform](https://www.terraform.io/).
71 | This leverages AWS Lambda and API Gateway. Assuming you have AWS CLI setup and then setup Apex
72 | and Terraform, you could then easily deploy discfg (from the `apex` directory) with the following.
73 |
74 | You'll first need the Apex go shim if you don't already have it:
75 |
76 | ```
77 | go get github.com/apex/go-apex
78 | ```
79 |
80 | Then you can setup the infrastructure and deploy with:
81 |
82 | ```
83 | apex infra get
84 | apex infra apply -var 'aws_account_id=XXXXX'
85 | apex deploy
86 | ```
87 |
88 | Where `XXXXX` has been replaced with your Amazon Web Services Account ID. Note that within the
89 | `infrastructure` directory, you'll find all the `.tf` files. Feel free to adjust those in the
90 | `variables.tf` to change simple things like the API name. You can also dig even deeper to change
91 | more complex things or course you can change things from the AWS web console once you've deployed
92 | the default provided.
93 |
94 | #### Example API Calls
95 |
96 | You'll of course prepend these URL paths with your AWS API Gateway API's base URL.
97 |
98 | **PUT /{name}/keys/{key}**
99 |
100 | Body
101 | ```
102 | any value
103 | ```
104 |
105 | Would set the provided value from he PUT body for the config name and key name passed
106 | in the API endpoint path parameters. There would then be a JSON response.
107 |
108 | **GET /{name}/keys/{key}**
109 |
110 | Would get the key value for the given key name and config name passed in the API endpoint
111 | path parameters. The response would be a JSON message.
112 |
113 |
114 | **PUT /{name}/cfg**
115 |
116 | Body
117 | ```
118 | {"WriteCapacityUnits": 2, "ReadCapacityUnits": 4}
119 | ```
120 |
121 | Would create a table in DynamoDB with the provided name in the API endpoint path and would
122 | also configure it with the given settings from the PUT body. In the case of DynamoDB these
123 | setings are the read and write capacity units (by default 1 write and 2 read).
124 |
125 |
126 | ### Running the API Server (on a server)
127 |
128 | While discfg is meant to be a tool for a "serverless" architecture, it doesn't mean you can't
129 | run it on your own server. Currently, there is no storage engine that would keep the data on
130 | the same server (and that defeats the whole purpose of being distributed), but the RESTful API
131 | can certainly be hosted on any server. So you can work with your configurations using JSON
132 | instead of just on the CLI or having to bring discfg into your own Go package.
133 |
134 | The API server can be on your local machine, or a remote server. Or both. The point is convenience.
135 |
136 | Currently, discfg has no authentication built in. _Do not run it exposed to the world._
137 | The point of relying on AWS is that Amazon provides you with the ability to control access.
138 | From the API server exposed through API Gateway to the DynamoDB database.
139 |
140 | You'll find the API server under the `server` directory. If you have the project cloned from
141 | the repo, you could simply go to that directory and run `go main.go v1.go` to check it out.
142 | You'll ultimatley want to build a binary and run it from where ever you need.
143 |
144 | It runs on port `8899` by default, but you can change that with a `--port` flag. Also note
145 | that discfg only uses AWS for storage engines right now so you should be sure to pay attention
146 | to the AWS region. It's `us-east-1` by default, but you can change that too with a `region` flag.
147 |
148 | ## What prompted this tool?
149 |
150 | The need for a serverless application configuration. When dealing with AWS Lambda, state and
151 | configuration become a regular issue with no consistent solution. This ends up becoming a bit
152 | of boilerplate code in each of your functions.
153 |
154 | Discfg solves this need by making it very easy to work with key/value configuration data.
155 |
156 | Of course your application need not be serverless or run in AWS Lambda to benefit from discfg.
157 |
158 | Managing a bunch of environment variables isn't scalable. It's annoying and when you go to deploy
159 | or have a co-worker work on the project, it becomes a hassle. Even with tools like Docker. Things
160 | change and keeping on top of configuration changes is simply annoying with environment variables.
161 |
162 | Likewise, dropping an .ini or .json or .env file into a project is also not a terrific solution.
163 | Configuration files also become quickly dated and it still doesn't help much when you need to
164 | share configurations with others.
165 |
166 | Essentially, discfg is an interface around DynamoDB (and other distributed storage solutions).
167 | Opposed to some other configuration tools, it's not responsible for the distributed storage itself.
168 |
169 | Etcd was a big inspiration for this project. However, etcd is not "serverless" and so it requires one
170 | to set up and maintain a cluster of servers. This is a little less convenient, though the tool itself
171 | is also much faster. There's a trade off for convenience. Discfg was meant for higher level application
172 | use and so the performance factor wasn't a concern. The feature set of discfg also started to diverge
173 | from etcd as well. Discfg is simply a tool with a different use case.
174 |
--------------------------------------------------------------------------------
/apex/functions/create_cfg/.apexignore:
--------------------------------------------------------------------------------
1 | *.go
2 | event.json
--------------------------------------------------------------------------------
/apex/functions/create_cfg/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/apex/go-apex"
6 | "github.com/tmaiaroto/discfg/commands"
7 | "github.com/tmaiaroto/discfg/config"
8 | "github.com/tmaiaroto/discfg/version"
9 | "os"
10 | )
11 |
12 | // To change these settings for DynamoDB, deploy with a different environment variable.
13 | // apex deploy -s DISCFG_DB_REGION=us-west-1
14 | var discfgDBRegion = os.Getenv("DISCFG_REGION")
15 |
16 | // The JSON message passed to the Lambda
17 | type message struct {
18 | Name string `json:"name"`
19 | // ...is actually the POST body (for now) but didn't want to call it "Value" in this function
20 | Settings string `json:"settings"`
21 | }
22 |
23 | var options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
24 |
25 | func main() {
26 | // If not set for some reason, use us-east-1 by default.
27 | if discfgDBRegion == "" {
28 | discfgDBRegion = "us-east-1"
29 | }
30 |
31 | apex.HandleFunc(func(event json.RawMessage, ctx *apex.Context) (interface{}, error) {
32 | var m message
33 |
34 | if err := json.Unmarshal(event, &m); err != nil {
35 | return nil, err
36 | }
37 |
38 | options.Storage.AWS.Region = discfgDBRegion
39 |
40 | if m.Name != "" {
41 | options.CfgName = m.Name
42 | }
43 |
44 | var settings map[string]interface{}
45 | err := json.Unmarshal([]byte(m.Settings), &settings)
46 | if err != nil {
47 | return nil, err
48 | }
49 |
50 | resp := commands.CreateCfg(options, settings)
51 |
52 | return commands.FormatJSONValue(resp), nil
53 | })
54 | }
55 |
--------------------------------------------------------------------------------
/apex/functions/delete_cfg/.apexignore:
--------------------------------------------------------------------------------
1 | *.go
2 | event.json
--------------------------------------------------------------------------------
/apex/functions/delete_cfg/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/apex/go-apex"
6 | "github.com/tmaiaroto/discfg/commands"
7 | "github.com/tmaiaroto/discfg/config"
8 | "github.com/tmaiaroto/discfg/version"
9 | "os"
10 | )
11 |
12 | // To change these settings for DynamoDB, deploy with a different environment variable.
13 | // apex deploy -s DISCFG_DB_REGION=us-west-1
14 | var discfgDBRegion = os.Getenv("DISCFG_REGION")
15 |
16 | // The JSON message passed to the Lambda
17 | type message struct {
18 | Name string `json:"name"`
19 | }
20 |
21 | var options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
22 |
23 | func main() {
24 | // If not set for some reason, use us-east-1 by default.
25 | if discfgDBRegion == "" {
26 | discfgDBRegion = "us-east-1"
27 | }
28 |
29 | apex.HandleFunc(func(event json.RawMessage, ctx *apex.Context) (interface{}, error) {
30 | var m message
31 |
32 | if err := json.Unmarshal(event, &m); err != nil {
33 | return nil, err
34 | }
35 |
36 | options.Storage.AWS.Region = discfgDBRegion
37 |
38 | if m.Name != "" {
39 | options.CfgName = m.Name
40 | }
41 |
42 | resp := commands.DeleteCfg(options)
43 |
44 | return commands.FormatJSONValue(resp), nil
45 | })
46 | }
47 |
--------------------------------------------------------------------------------
/apex/functions/delete_key/.apexignore:
--------------------------------------------------------------------------------
1 | *.go
2 | event.json
--------------------------------------------------------------------------------
/apex/functions/delete_key/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/apex/go-apex"
6 | "github.com/tmaiaroto/discfg/commands"
7 | "github.com/tmaiaroto/discfg/config"
8 | "github.com/tmaiaroto/discfg/version"
9 | "os"
10 | )
11 |
12 | // To change these settings for DynamoDB, deploy with a different environment variable.
13 | // apex deploy -s DISCFG_DB_REGION=us-west-1
14 | var discfgDBRegion = os.Getenv("DISCFG_REGION")
15 | var discfgDBTable = os.Getenv("DISCFG_TABLE")
16 |
17 | // The JSON message passd to the Lambda (should include key, value, etc.)
18 | type message struct {
19 | Name string `json:"name"`
20 | // Comes in as string, but needs to be converted to in64
21 | TTL string `json:"ttl"`
22 | Key string `json:"key"`
23 | Value string `json:"value"`
24 | Raw string `json:"raw"`
25 | }
26 |
27 | var options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
28 |
29 | func main() {
30 | // If not set for some reason, use us-east-1 by default.
31 | if discfgDBRegion == "" {
32 | discfgDBRegion = "us-east-1"
33 | }
34 |
35 | apex.HandleFunc(func(event json.RawMessage, ctx *apex.Context) (interface{}, error) {
36 | var m message
37 |
38 | if err := json.Unmarshal(event, &m); err != nil {
39 | return nil, err
40 | }
41 |
42 | options.Storage.AWS.Region = discfgDBRegion
43 | // Each discfg API can be configured with a default table name.
44 | options.CfgName = discfgDBTable
45 | // Overwritten by the message passed to the Lambda.
46 | if m.Name != "" {
47 | options.CfgName = m.Name
48 | }
49 | options.Key = m.Key
50 |
51 | resp := commands.DeleteKey(options)
52 |
53 | return commands.FormatJSONValue(resp), nil
54 | })
55 | }
56 |
--------------------------------------------------------------------------------
/apex/functions/get_key/.apexignore:
--------------------------------------------------------------------------------
1 | *.go
2 | event.json
--------------------------------------------------------------------------------
/apex/functions/get_key/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/apex/go-apex"
6 | "github.com/tmaiaroto/discfg/commands"
7 | "github.com/tmaiaroto/discfg/config"
8 | "github.com/tmaiaroto/discfg/version"
9 | "os"
10 | "time"
11 | )
12 |
13 | // To change these settings for DynamoDB, deploy with a different environment variable.
14 | // apex deploy -s DISCFG_DB_REGION=us-west-1
15 | var discfgDBRegion = os.Getenv("DISCFG_REGION")
16 | var discfgDBTable = os.Getenv("DISCFG_TABLE")
17 |
18 | // The JSON message passd to the Lambda (should include key, value, etc.)
19 | type message struct {
20 | Name string `json:"name"`
21 | // Comes in as string, but needs to be converted to int64
22 | TTL string `json:"ttl"`
23 | Key string `json:"key"`
24 | Value string `json:"value"`
25 | Raw string `json:"raw"`
26 | }
27 |
28 | var options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
29 |
30 | func main() {
31 | // If not set for some reason, use us-east-1 by default.
32 | if discfgDBRegion == "" {
33 | discfgDBRegion = "us-east-1"
34 | }
35 |
36 | apex.HandleFunc(func(event json.RawMessage, ctx *apex.Context) (interface{}, error) {
37 | var m message
38 |
39 | if err := json.Unmarshal(event, &m); err != nil {
40 | return nil, err
41 | }
42 |
43 | options.Storage.AWS.Region = discfgDBRegion
44 | // Each discfg API can be configured with a default table name.
45 | options.CfgName = discfgDBTable
46 | // Overwritten by the message passed to the Lambda.
47 | if m.Name != "" {
48 | options.CfgName = m.Name
49 | }
50 | options.Key = m.Key
51 |
52 | resp := commands.GetKey(options)
53 |
54 | // Format the expiration time (if applicable). This prevents output like "0001-01-01T00:00:00Z" when empty
55 | // and allows for the time.RFC3339Nano format to be used whereas time.Time normally marshals to a different format.
56 | if resp.Item.TTL > 0 {
57 | resp.Item.OutputExpiration = resp.Item.Expiration.Format(time.RFC3339Nano)
58 | }
59 |
60 | // Just return the raw value for the given key if raw was passed as true
61 | // if m.Raw == "true" {
62 | // return resp.Item.Value, nil
63 | // }
64 |
65 | r := commands.FormatJSONValue(resp)
66 | return r, nil
67 | })
68 | }
69 |
--------------------------------------------------------------------------------
/apex/functions/info_cfg/.apexignore:
--------------------------------------------------------------------------------
1 | *.go
2 | event.json
--------------------------------------------------------------------------------
/apex/functions/info_cfg/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/apex/go-apex"
6 | "github.com/tmaiaroto/discfg/commands"
7 | "github.com/tmaiaroto/discfg/config"
8 | "github.com/tmaiaroto/discfg/version"
9 | "os"
10 | )
11 |
12 | // To change these settings for DynamoDB, deploy with a different environment variable.
13 | // apex deploy -s DISCFG_DB_REGION=us-west-1
14 | var discfgDBRegion = os.Getenv("DISCFG_REGION")
15 |
16 | // The JSON message passed to the Lambda
17 | type message struct {
18 | Name string `json:"name"`
19 | }
20 |
21 | var options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
22 |
23 | func main() {
24 | // If not set for some reason, use us-east-1 by default.
25 | if discfgDBRegion == "" {
26 | discfgDBRegion = "us-east-1"
27 | }
28 |
29 | apex.HandleFunc(func(event json.RawMessage, ctx *apex.Context) (interface{}, error) {
30 | var m message
31 |
32 | if err := json.Unmarshal(event, &m); err != nil {
33 | return nil, err
34 | }
35 |
36 | options.Storage.AWS.Region = discfgDBRegion
37 |
38 | if m.Name != "" {
39 | options.CfgName = m.Name
40 | }
41 |
42 | resp := commands.Info(options)
43 |
44 | return commands.FormatJSONValue(resp), nil
45 | })
46 | }
47 |
--------------------------------------------------------------------------------
/apex/functions/set_key/.apexignore:
--------------------------------------------------------------------------------
1 | *.go
2 | event.json
--------------------------------------------------------------------------------
/apex/functions/set_key/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/apex/go-apex"
6 | "github.com/tmaiaroto/discfg/commands"
7 | "github.com/tmaiaroto/discfg/config"
8 | "github.com/tmaiaroto/discfg/version"
9 | "os"
10 | "strconv"
11 | "time"
12 | )
13 |
14 | // To change these settings for DynamoDB, deploy with a different environment variable.
15 | // apex deploy -s DISCFG_DB_REGION=us-west-1
16 | var discfgDBRegion = os.Getenv("DISCFG_REGION")
17 | var discfgDBTable = os.Getenv("DISCFG_TABLE")
18 |
19 | // The JSON message passd to the Lambda (should include key, value, etc.)
20 | type message struct {
21 | Name string `json:"name"`
22 | // Comes in as string, but needs to be converted to in64
23 | TTL string `json:"ttl"`
24 | Key string `json:"key"`
25 | Value string `json:"value"`
26 | }
27 |
28 | var options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
29 |
30 | func main() {
31 | // If not set for some reason, use us-east-1 by default.
32 | if discfgDBRegion == "" {
33 | discfgDBRegion = "us-east-1"
34 | }
35 |
36 | apex.HandleFunc(func(event json.RawMessage, ctx *apex.Context) (interface{}, error) {
37 | var m message
38 |
39 | if err := json.Unmarshal(event, &m); err != nil {
40 | return nil, err
41 | }
42 |
43 | options.Storage.AWS.Region = discfgDBRegion
44 | // The following are set automatically.
45 | // options.Storage.AWS.AccessKeyID = os.Getenv("AWS_ACCESS_KEY_ID")
46 | // options.Storage.AWS.SecretAccessKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
47 | // options.Storage.AWS.SessionToken = os.Getenv("AWS_SESSION_TOKEN")
48 |
49 | // Each discfg API can be configured with a default table name.
50 | options.CfgName = discfgDBTable
51 | // Overwritten by the message passed to the Lambda.
52 | if m.Name != "" {
53 | options.CfgName = m.Name
54 | }
55 | // Comes from a path param from API Gateway.
56 | options.Key = m.Key
57 | // Comes from a querystring value from API Gateway, ie. ?ttl=300
58 | // Note: 0 is unlimited, no TTL.
59 | if m.TTL != "" {
60 | if ttl, err := strconv.ParseInt(m.TTL, 10, 64); err == nil {
61 | options.TTL = ttl
62 | }
63 | }
64 | // Ends up being the POST body from API Gateway.
65 | options.Value = []byte(m.Value)
66 |
67 | resp := commands.SetKey(options)
68 |
69 | // Format the expiration time (if applicable). This prevents output like "0001-01-01T00:00:00Z" when empty
70 | // and allows for the time.RFC3339Nano format to be used whereas time.Time normally marshals to a different format.
71 | if resp.Item.TTL > 0 {
72 | resp.Item.OutputExpiration = resp.Item.Expiration.Format(time.RFC3339Nano)
73 | }
74 |
75 | return commands.FormatJSONValue(resp), nil
76 | })
77 | }
78 |
--------------------------------------------------------------------------------
/apex/functions/update_cfg/.apexignore:
--------------------------------------------------------------------------------
1 | *.go
2 | event.json
--------------------------------------------------------------------------------
/apex/functions/update_cfg/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/apex/go-apex"
6 | "github.com/tmaiaroto/discfg/commands"
7 | "github.com/tmaiaroto/discfg/config"
8 | "github.com/tmaiaroto/discfg/version"
9 | "os"
10 | )
11 |
12 | // To change these settings for DynamoDB, deploy with a different environment variable.
13 | // apex deploy -s DISCFG_DB_REGION=us-west-1
14 | var discfgDBRegion = os.Getenv("DISCFG_REGION")
15 |
16 | // The JSON message passed to the Lambda
17 | type message struct {
18 | Name string `json:"name"`
19 | // ...is actually the POST body (for now) but didn't want to call it "Value" in this function
20 | Settings string `json:"settings"`
21 | }
22 |
23 | var options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
24 |
25 | func main() {
26 | // If not set for some reason, use us-east-1 by default.
27 | if discfgDBRegion == "" {
28 | discfgDBRegion = "us-east-1"
29 | }
30 |
31 | apex.HandleFunc(func(event json.RawMessage, ctx *apex.Context) (interface{}, error) {
32 | var m message
33 |
34 | if err := json.Unmarshal(event, &m); err != nil {
35 | return nil, err
36 | }
37 |
38 | options.Storage.AWS.Region = discfgDBRegion
39 |
40 | if m.Name != "" {
41 | options.CfgName = m.Name
42 | }
43 |
44 | var settings map[string]interface{}
45 | err := json.Unmarshal([]byte(m.Settings), &settings)
46 | if err != nil {
47 | return nil, err
48 | }
49 |
50 | resp := commands.UpdateCfg(options, settings)
51 |
52 | return commands.FormatJSONValue(resp), nil
53 | })
54 | }
55 |
--------------------------------------------------------------------------------
/apex/infrastructure/dev/main.tf:
--------------------------------------------------------------------------------
1 | module "iam" {
2 | source = "../modules/iam"
3 | }
4 |
5 | module "api_gateway" {
6 | source = "../modules/api_gateway"
7 | api_gateway_aws_account_id = "${var.aws_account_id}"
8 | api_gateway_aws_region = "${var.aws_region}"
9 | api_gateway_invoke_discfg_lambda_role_arn = "${module.iam.gateway_invoke_discfg_lambda_role_arn}"
10 | api_gateway_stage = "${var.api_stage}"
11 | api_gateway_api_name = "${var.api_name}"
12 | }
13 |
14 | # Note, eventually this can be set by Discfg commands via the API.
15 | # For now, this will be the simplest way to get it going.
16 | module "dynamodb" {
17 | source = "../modules/dynamodb"
18 | dynamodb_discfg_table = "${var.discfg_table}"
19 | }
20 |
21 | output "lambda_function_role_id" {
22 | value = "${module.iam.lambda_function_role_id}"
23 | }
24 |
--------------------------------------------------------------------------------
/apex/infrastructure/dev/variables.tf:
--------------------------------------------------------------------------------
1 | variable "aws_account_id" {}
2 |
3 | variable "aws_region" {
4 | default = "us-east-1"
5 | }
6 |
7 | variable "stage" {
8 | default = "dev"
9 | }
10 |
11 | variable "discfg_table" {
12 | default = "discfg"
13 | }
14 |
15 | variable "api_name" {
16 | default = "Discfg"
17 | }
18 |
19 | variable "api_stage" {
20 | default = "dev"
21 | }
--------------------------------------------------------------------------------
/apex/infrastructure/modules/api_gateway/api-gateway.tf:
--------------------------------------------------------------------------------
1 | # Creates the Discfg API
2 | resource "aws_api_gateway_rest_api" "DiscfgAPI" {
3 | name = "${var.api_gateway_api_name}"
4 | description = "A simple distributed configuration service"
5 | }
6 |
7 | # Notes
8 | # ----------
9 | # General API endpoint is: /{name}/keys/{key}
10 | # This allows for getting and setting key values for a given discfg name
11 |
12 | # ------------------------------------------------ RESOURCE PATHS ------------------------------------------------
13 | #
14 | # Creates the /{name} API resource path (for config table name)
15 | resource "aws_api_gateway_resource" "NameResource" {
16 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
17 | parent_id = "${aws_api_gateway_rest_api.DiscfgAPI.root_resource_id}"
18 | path_part = "{name}"
19 | }
20 |
21 | # Creates the /keys API resource path
22 | resource "aws_api_gateway_resource" "KeysResource" {
23 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
24 | parent_id = "${aws_api_gateway_resource.NameResource.id}"
25 | path_part = "keys"
26 | }
27 |
28 | # /{name}/keys/{key} resource path
29 | resource "aws_api_gateway_resource" "KeysKeyResource" {
30 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
31 | parent_id = "${aws_api_gateway_resource.KeysResource.id}"
32 | path_part = "{key}"
33 | }
34 |
35 | # /{name}/cfg resource path
36 | resource "aws_api_gateway_resource" "CfgResource" {
37 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
38 | parent_id = "${aws_api_gateway_resource.NameResource.id}"
39 | path_part = "cfg"
40 | }
41 |
42 |
43 | # --------------------------------------------------- METHODS ----------------------------------------------------
44 | # ---- Method Execution
45 | # ---- /{name}/keys/{key} PUT
46 | #
47 | # Creates the PUT method under /keys/{key}
48 | resource "aws_api_gateway_method" "KeysPUTMethod" {
49 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
50 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
51 | http_method = "PUT"
52 | authorization = "NONE"
53 | }
54 | # Configures the integration for the Resource Method (in other words, what gets triggered)
55 | # Client -> Method Request -> Integration Request -> *Integration*
56 | resource "aws_api_gateway_integration" "KeysPUTIntegration" {
57 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
58 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
59 | http_method = "${aws_api_gateway_method.KeysPUTMethod.http_method}"
60 | type = "AWS"
61 | # Must be POST for invoking Lambda function
62 | integration_http_method = "POST"
63 | credentials = "${var.api_gateway_invoke_discfg_lambda_role_arn}"
64 | # http://docs.aws.amazon.com/apigateway/api-reference/resource/integration/#uri
65 | uri = "arn:aws:apigateway:${var.api_gateway_aws_region}:lambda:path/2015-03-31/functions/arn:aws:lambda:${var.api_gateway_aws_region}:${var.api_gateway_aws_account_id}:function:discfg_set_key/invocations"
66 | request_templates = {
67 | "application/json" = "${file("${path.module}/api_gateway_body_mapping.template")}"
68 | }
69 | }
70 | # Integration -> Integration Response -> *Method Response* -> Client
71 | resource "aws_api_gateway_method_response" "KeysPUTMethod200" {
72 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
73 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
74 | http_method = "${aws_api_gateway_method.KeysPUTMethod.http_method}"
75 | status_code = "200"
76 | }
77 | # Integration -> *Integration Response* -> Method Response -> Client
78 | resource "aws_api_gateway_integration_response" "KeysPUTIntegrationResponse" {
79 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
80 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
81 | http_method = "${aws_api_gateway_method.KeysPUTMethod.http_method}"
82 | status_code = "${aws_api_gateway_method_response.KeysPUTMethod200.status_code}"
83 | }
84 |
85 | # ---- Method Execution
86 | # ---- /{name}/keys/{key} GET
87 | #
88 | # Creates the GET method under /keys/{key}
89 | resource "aws_api_gateway_method" "KeysGETMethod" {
90 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
91 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
92 | http_method = "GET"
93 | authorization = "NONE"
94 | }
95 | # Configures the integration for the Resource Method (in other words, what gets triggered)
96 | # Client -> Method Request -> Integration Request -> *Integration*
97 | resource "aws_api_gateway_integration" "KeysGETIntegration" {
98 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
99 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
100 | http_method = "${aws_api_gateway_method.KeysGETMethod.http_method}"
101 | type = "AWS"
102 | # Must be POST for invoking Lambda function
103 | integration_http_method = "POST"
104 | credentials = "${var.api_gateway_invoke_discfg_lambda_role_arn}"
105 | # http://docs.aws.amazon.com/apigateway/api-reference/resource/integration/#uri
106 | uri = "arn:aws:apigateway:${var.api_gateway_aws_region}:lambda:path/2015-03-31/functions/arn:aws:lambda:${var.api_gateway_aws_region}:${var.api_gateway_aws_account_id}:function:discfg_get_key/invocations"
107 | request_templates = {
108 | "application/json" = "${file("${path.module}/api_gateway_body_mapping.template")}"
109 | }
110 | }
111 | # Integration -> Integration Response -> *Method Response* -> Client
112 | resource "aws_api_gateway_method_response" "KeysGETMethod200" {
113 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
114 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
115 | http_method = "${aws_api_gateway_method.KeysGETMethod.http_method}"
116 | status_code = "200"
117 | }
118 | # Integration -> *Integration Response* -> Method Response -> Client
119 | resource "aws_api_gateway_integration_response" "KeysGETIntegrationResponse" {
120 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
121 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
122 | http_method = "${aws_api_gateway_method.KeysGETMethod.http_method}"
123 | status_code = "${aws_api_gateway_method_response.KeysGETMethod200.status_code}"
124 | }
125 |
126 | # ---- Method Execution
127 | # ---- /{name}/keys/{key} DELETE
128 | #
129 | # Creates the DELETE method under /keys/{key}
130 | resource "aws_api_gateway_method" "KeysDELETEMethod" {
131 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
132 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
133 | http_method = "DELETE"
134 | authorization = "NONE"
135 | }
136 | # Configures the integration for the Resource Method (in other words, what gets triggered)
137 | # Client -> Method Request -> Integration Request -> *Integration*
138 | resource "aws_api_gateway_integration" "KeysDELETEIntegration" {
139 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
140 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
141 | http_method = "${aws_api_gateway_method.KeysDELETEMethod.http_method}"
142 | type = "AWS"
143 | # Must be POST for invoking Lambda function
144 | integration_http_method = "POST"
145 | credentials = "${var.api_gateway_invoke_discfg_lambda_role_arn}"
146 | # http://docs.aws.amazon.com/apigateway/api-reference/resource/integration/#uri
147 | uri = "arn:aws:apigateway:${var.api_gateway_aws_region}:lambda:path/2015-03-31/functions/arn:aws:lambda:${var.api_gateway_aws_region}:${var.api_gateway_aws_account_id}:function:discfg_delete_key/invocations"
148 | request_templates = {
149 | "application/json" = "${file("${path.module}/api_gateway_body_mapping.template")}"
150 | }
151 | }
152 | # Integration -> Integration Response -> *Method Response* -> Client
153 | resource "aws_api_gateway_method_response" "KeysDELETEMethod200" {
154 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
155 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
156 | http_method = "${aws_api_gateway_method.KeysDELETEMethod.http_method}"
157 | status_code = "200"
158 | }
159 | # Integration -> *Integration Response* -> Method Response -> Client
160 | resource "aws_api_gateway_integration_response" "KeysDELETEIntegrationResponse" {
161 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
162 | resource_id = "${aws_api_gateway_resource.KeysKeyResource.id}"
163 | http_method = "${aws_api_gateway_method.KeysDELETEMethod.http_method}"
164 | status_code = "${aws_api_gateway_method_response.KeysDELETEMethod200.status_code}"
165 | }
166 |
167 | # ---- Method Execution
168 | # ---- /cfg/{name} PUT
169 | #
170 | # Creates the PUT method under /cfg/{name}
171 | resource "aws_api_gateway_method" "CfgPUTMethod" {
172 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
173 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
174 | http_method = "PUT"
175 | authorization = "NONE"
176 | }
177 | # Configures the integration for the Resource Method (in other words, what gets triggered)
178 | # Client -> Method Request -> Integration Request -> *Integration*
179 | resource "aws_api_gateway_integration" "CfgPUTIntegration" {
180 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
181 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
182 | http_method = "${aws_api_gateway_method.CfgPUTMethod.http_method}"
183 | type = "AWS"
184 | # Must be POST for invoking Lambda function
185 | integration_http_method = "POST"
186 | credentials = "${var.api_gateway_invoke_discfg_lambda_role_arn}"
187 | # http://docs.aws.amazon.com/apigateway/api-reference/resource/integration/#uri
188 | uri = "arn:aws:apigateway:${var.api_gateway_aws_region}:lambda:path/2015-03-31/functions/arn:aws:lambda:${var.api_gateway_aws_region}:${var.api_gateway_aws_account_id}:function:discfg_create_cfg/invocations"
189 | request_templates = {
190 | "application/json" = "${file("${path.module}/api_gateway_body_mapping.template")}"
191 | }
192 | }
193 | # Integration -> Integration Response -> *Method Response* -> Client
194 | resource "aws_api_gateway_method_response" "CfgPUTMethod200" {
195 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
196 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
197 | http_method = "${aws_api_gateway_method.CfgPUTMethod.http_method}"
198 | status_code = "200"
199 | }
200 | # Integration -> *Integration Response* -> Method Response -> Client
201 | resource "aws_api_gateway_integration_response" "CfgPUTIntegrationResponse" {
202 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
203 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
204 | http_method = "${aws_api_gateway_method.CfgPUTMethod.http_method}"
205 | status_code = "${aws_api_gateway_method_response.CfgPUTMethod200.status_code}"
206 | }
207 |
208 | # ---- Method Execution
209 | # ---- /cfg/{name} PATCH
210 | #
211 | # Creates the PATCH method under /cfg/{name}
212 | resource "aws_api_gateway_method" "CfgPATCHMethod" {
213 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
214 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
215 | http_method = "PATCH"
216 | authorization = "NONE"
217 | }
218 | # Configures the integration for the Resource Method (in other words, what gets triggered)
219 | # Client -> Method Request -> Integration Request -> *Integration*
220 | resource "aws_api_gateway_integration" "CfgPATCHIntegration" {
221 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
222 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
223 | http_method = "${aws_api_gateway_method.CfgPATCHMethod.http_method}"
224 | type = "AWS"
225 | # Must be POST for invoking Lambda function
226 | integration_http_method = "POST"
227 | credentials = "${var.api_gateway_invoke_discfg_lambda_role_arn}"
228 | # http://docs.aws.amazon.com/apigateway/api-reference/resource/integration/#uri
229 | uri = "arn:aws:apigateway:${var.api_gateway_aws_region}:lambda:path/2015-03-31/functions/arn:aws:lambda:${var.api_gateway_aws_region}:${var.api_gateway_aws_account_id}:function:discfg_update_cfg/invocations"
230 | request_templates = {
231 | "application/json" = "${file("${path.module}/api_gateway_body_mapping.template")}"
232 | }
233 | }
234 | # Integration -> Integration Response -> *Method Response* -> Client
235 | resource "aws_api_gateway_method_response" "CfgPATCHMethod200" {
236 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
237 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
238 | http_method = "${aws_api_gateway_method.CfgPATCHMethod.http_method}"
239 | status_code = "200"
240 | }
241 | # Integration -> *Integration Response* -> Method Response -> Client
242 | resource "aws_api_gateway_integration_response" "CfgPATCHIntegrationResponse" {
243 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
244 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
245 | http_method = "${aws_api_gateway_method.CfgPATCHMethod.http_method}"
246 | status_code = "${aws_api_gateway_method_response.CfgPATCHMethod200.status_code}"
247 | }
248 |
249 | # ---- Method Execution
250 | # ---- /cfg/{name} DELETE
251 | #
252 | # Creates the DELETE method under /cfg/{name}
253 | resource "aws_api_gateway_method" "CfgDELETEMethod" {
254 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
255 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
256 | http_method = "DELETE"
257 | authorization = "NONE"
258 | }
259 | # Configures the integration for the Resource Method (in other words, what gets triggered)
260 | # Client -> Method Request -> Integration Request -> *Integration*
261 | resource "aws_api_gateway_integration" "CfgDELETEIntegration" {
262 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
263 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
264 | http_method = "${aws_api_gateway_method.CfgDELETEMethod.http_method}"
265 | type = "AWS"
266 | # Must be POST for invoking Lambda function
267 | integration_http_method = "POST"
268 | credentials = "${var.api_gateway_invoke_discfg_lambda_role_arn}"
269 | # http://docs.aws.amazon.com/apigateway/api-reference/resource/integration/#uri
270 | uri = "arn:aws:apigateway:${var.api_gateway_aws_region}:lambda:path/2015-03-31/functions/arn:aws:lambda:${var.api_gateway_aws_region}:${var.api_gateway_aws_account_id}:function:discfg_delete_cfg/invocations"
271 | request_templates = {
272 | "application/json" = "${file("${path.module}/api_gateway_body_mapping.template")}"
273 | }
274 | }
275 | # Integration -> Integration Response -> *Method Response* -> Client
276 | resource "aws_api_gateway_method_response" "CfgDELETEMethod200" {
277 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
278 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
279 | http_method = "${aws_api_gateway_method.CfgDELETEMethod.http_method}"
280 | status_code = "200"
281 | }
282 | # Integration -> *Integration Response* -> Method Response -> Client
283 | resource "aws_api_gateway_integration_response" "CfgDELETEIntegrationResponse" {
284 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
285 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
286 | http_method = "${aws_api_gateway_method.CfgDELETEMethod.http_method}"
287 | status_code = "${aws_api_gateway_method_response.CfgDELETEMethod200.status_code}"
288 | }
289 |
290 | # ---- Method Execution
291 | # ---- /cfg/{name} OPTIONS
292 | #
293 | # Creates the OPTIONS method under /cfg/{name}
294 | resource "aws_api_gateway_method" "CfgOPTIONSMethod" {
295 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
296 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
297 | http_method = "OPTIONS"
298 | authorization = "NONE"
299 | }
300 | # Configures the integration for the Resource Method (in other words, what gets triggered)
301 | # Client -> Method Request -> Integration Request -> *Integration*
302 | resource "aws_api_gateway_integration" "CfgOPTIONSIntegration" {
303 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
304 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
305 | http_method = "${aws_api_gateway_method.CfgOPTIONSMethod.http_method}"
306 | type = "AWS"
307 | # Must be POST for invoking Lambda function
308 | integration_http_method = "POST"
309 | credentials = "${var.api_gateway_invoke_discfg_lambda_role_arn}"
310 | # http://docs.aws.amazon.com/apigateway/api-reference/resource/integration/#uri
311 | uri = "arn:aws:apigateway:${var.api_gateway_aws_region}:lambda:path/2015-03-31/functions/arn:aws:lambda:${var.api_gateway_aws_region}:${var.api_gateway_aws_account_id}:function:discfg_info_cfg/invocations"
312 | request_templates = {
313 | "application/json" = "${file("${path.module}/api_gateway_body_mapping.template")}"
314 | }
315 | }
316 | # Integration -> Integration Response -> *Method Response* -> Client
317 | resource "aws_api_gateway_method_response" "CfgOPTIONSMethod200" {
318 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
319 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
320 | http_method = "${aws_api_gateway_method.CfgOPTIONSMethod.http_method}"
321 | status_code = "200"
322 | }
323 | # Integration -> *Integration Response* -> Method Response -> Client
324 | resource "aws_api_gateway_integration_response" "CfgOPTIONSIntegrationResponse" {
325 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
326 | resource_id = "${aws_api_gateway_resource.CfgResource.id}"
327 | http_method = "${aws_api_gateway_method.CfgOPTIONSMethod.http_method}"
328 | status_code = "${aws_api_gateway_method_response.CfgOPTIONSMethod200.status_code}"
329 | }
330 |
331 | # -------------------------------------------------- DEPLOYMENT --------------------------------------------------
332 | # Creates the API stage
333 | resource "aws_api_gateway_deployment" "stage" {
334 | depends_on = ["aws_api_gateway_integration.KeysPUTIntegration", "aws_api_gateway_integration.KeysGETIntegration", "aws_api_gateway_integration.KeysDELETEIntegration", "aws_api_gateway_integration.CfgPUTIntegration", "aws_api_gateway_integration.CfgPATCHIntegration", "aws_api_gateway_integration.CfgDELETEIntegration", "aws_api_gateway_integration.CfgOPTIONSIntegration"]
335 |
336 | rest_api_id = "${aws_api_gateway_rest_api.DiscfgAPI.id}"
337 | stage_name = "${var.api_gateway_stage}"
338 | }
--------------------------------------------------------------------------------
/apex/infrastructure/modules/api_gateway/api_gateway_body_mapping.template:
--------------------------------------------------------------------------------
1 | {
2 | "name": "$input.params('name')",
3 | "key": "$input.params('key')",
4 | "value": "$util.escapeJavaScript($input.body)",
5 | "ttl": "$input.params('ttl')",
6 | "settings": "$util.escapeJavaScript($input.body)",
7 | "raw": "$input.params('raw')"
8 | }
--------------------------------------------------------------------------------
/apex/infrastructure/modules/api_gateway/variables.tf:
--------------------------------------------------------------------------------
1 | variable "api_gateway_aws_account_id" {}
2 | variable "api_gateway_aws_region" {}
3 | variable "api_gateway_invoke_discfg_lambda_role_arn" {}
4 | variable "api_gateway_stage" {}
5 | variable "api_gateway_api_name" {}
6 |
--------------------------------------------------------------------------------
/apex/infrastructure/modules/dynamodb/dynamodb.tf:
--------------------------------------------------------------------------------
1 | resource "aws_dynamodb_table" "DiscfgTable" {
2 | name = "${var.dynamodb_discfg_table}"
3 | read_capacity = 2
4 | write_capacity = 2
5 | hash_key = "key"
6 | attribute {
7 | name = "key"
8 | type = "S"
9 | }
10 | }
--------------------------------------------------------------------------------
/apex/infrastructure/modules/dynamodb/variables.tf:
--------------------------------------------------------------------------------
1 | variable "dynamodb_discfg_table" {}
2 |
--------------------------------------------------------------------------------
/apex/infrastructure/modules/iam/api-gateway-iam.tf:
--------------------------------------------------------------------------------
1 | resource "aws_iam_role" "gateway_invoke_discfg_lambda" {
2 | name = "gateway_invoke_discfg_lambda"
3 | assume_role_policy = < 0 {
19 | _, err := storage.CreateConfig(opts, settings)
20 | if err != nil {
21 | resp.Error = err.Error()
22 | resp.Message = "Error creating the configuration"
23 | } else {
24 | resp.Message = "Successfully created the configuration"
25 | }
26 | } else {
27 | resp.Error = NotEnoughArgsMsg
28 | // TODO: Error code for this, message may not be necessary - is it worthwhile to try and figure out exactly which arguments were missing?
29 | // Maybe a future thing to do. I need to git er done right now.
30 | }
31 | return resp
32 | }
33 |
34 | // DeleteCfg deletes a configuration
35 | func DeleteCfg(opts config.Options) config.ResponseObject {
36 | resp := config.ResponseObject{
37 | Action: "delete cfg",
38 | }
39 | if len(opts.CfgName) > 0 {
40 | _, err := storage.DeleteConfig(opts)
41 | if err != nil {
42 | resp.Error = err.Error()
43 | resp.Message = "Error deleting the configuration"
44 | } else {
45 | resp.Message = "Successfully deleted the configuration"
46 | }
47 | } else {
48 | resp.Error = NotEnoughArgsMsg
49 | // TODO: Error code for this, message may not be necessary - is it worthwhile to try and figure out exactly which arguments were missing?
50 | // Maybe a future thing to do. I need to git er done right now.
51 | }
52 | return resp
53 | }
54 |
55 | // UpdateCfg updates a configuration's options/settings (if applicable, depends on the interface)
56 | func UpdateCfg(opts config.Options, settings map[string]interface{}) config.ResponseObject {
57 | resp := config.ResponseObject{
58 | Action: "update cfg",
59 | }
60 |
61 | // Note: For some storage engines, such as DynamoDB, it could take a while for changes to be reflected.
62 | if len(settings) > 0 {
63 | _, updateErr := storage.UpdateConfig(opts, settings)
64 | if updateErr != nil {
65 | resp.Error = updateErr.Error()
66 | resp.Message = "Error updating the configuration"
67 | } else {
68 | resp.Message = "Successfully updated the configuration"
69 | }
70 | } else {
71 | resp.Error = NotEnoughArgsMsg
72 | }
73 |
74 | return resp
75 | }
76 |
77 | // Use sets a discfg configuration to use for all future commands until unset (it is optional, but conveniently saves a CLI argument - kinda like MongoDB's use)
78 | func Use(opts config.Options) config.ResponseObject {
79 | resp := config.ResponseObject{
80 | Action: "use",
81 | }
82 | if len(opts.CfgName) > 0 {
83 | cc := []byte(opts.CfgName)
84 | err := ioutil.WriteFile(".discfg", cc, 0644)
85 | if err != nil {
86 | resp.Error = err.Error()
87 | } else {
88 | resp.Message = "Set current working discfg to " + opts.CfgName
89 | resp.CurrentDiscfg = opts.CfgName
90 | }
91 | } else {
92 | resp.Error = NotEnoughArgsMsg
93 | }
94 | return resp
95 | }
96 |
97 | // Which shows which discfg configuration is currently active for use
98 | func Which(opts config.Options) config.ResponseObject {
99 | resp := config.ResponseObject{
100 | Action: "which",
101 | }
102 | currentCfg := GetDiscfgNameFromFile()
103 | if currentCfg == "" {
104 | resp.Error = NoCurrentWorkingCfgMsg
105 | } else {
106 | resp.Message = "Current working configuration: " + currentCfg
107 | resp.CurrentDiscfg = currentCfg
108 | }
109 | return resp
110 | }
111 |
112 | // SetKey sets a key value for a given configuration
113 | func SetKey(opts config.Options) config.ResponseObject {
114 | resp := config.ResponseObject{
115 | Action: "set",
116 | }
117 | // Do not allow empty values to be set
118 | if opts.Value == nil {
119 | resp.Error = ValueRequiredMsg
120 | return resp
121 | }
122 |
123 | if opts.CfgName == "" {
124 | resp.Error = MissingCfgNameMsg
125 | return resp
126 | }
127 |
128 | key, keyErr := formatKeyName(opts.Key)
129 | if keyErr == nil {
130 | opts.Key = key
131 | storageResponse, err := storage.Update(opts)
132 | if err != nil {
133 | resp.Error = err.Error()
134 | resp.Message = "Error updating key value"
135 | } else {
136 | resp.Item.Key = key
137 | resp.Item.Value = opts.Value
138 | resp.Item.Version = 1
139 |
140 | // Only set PrevItem if there was a previous value
141 | if storageResponse.Value != nil {
142 | resp.PrevItem = storageResponse
143 | resp.PrevItem.Key = key
144 | // Update the current item's value if there was a previous version
145 | resp.Item.Version = resp.PrevItem.Version + 1
146 | }
147 | }
148 | } else {
149 | resp.Error = keyErr.Error()
150 | }
151 | return resp
152 | }
153 |
154 | // GetKey gets a key from a configuration
155 | func GetKey(opts config.Options) config.ResponseObject {
156 | resp := config.ResponseObject{
157 | Action: "get",
158 | }
159 | key, keyErr := formatKeyName(opts.Key)
160 | if keyErr == nil {
161 | opts.Key = key
162 | storageResponse, err := storage.Get(opts)
163 | if err != nil {
164 | resp.Error = err.Error()
165 | } else {
166 | resp.Item = storageResponse
167 | }
168 | } else {
169 | resp.Error = keyErr.Error()
170 | }
171 | return resp
172 | }
173 |
174 | // DeleteKey deletes a key from a configuration
175 | func DeleteKey(opts config.Options) config.ResponseObject {
176 | resp := config.ResponseObject{
177 | Action: "delete",
178 | }
179 | key, keyErr := formatKeyName(opts.Key)
180 | if keyErr == nil {
181 | opts.Key = key
182 | storageResponse, err := storage.Delete(opts)
183 | if err != nil {
184 | resp.Error = err.Error()
185 | resp.Message = "Error getting key value"
186 | } else {
187 | resp.Item = storageResponse
188 | resp.Item.Key = opts.Key
189 | resp.Item.Value = nil
190 | resp.Item.Version = storageResponse.Version + 1
191 | resp.PrevItem.Key = opts.Key
192 | resp.PrevItem.Version = storageResponse.Version
193 | resp.PrevItem.Value = storageResponse.Value
194 | // log.Println(storageResponse)
195 | }
196 | } else {
197 | resp.Error = NotEnoughArgsMsg
198 | }
199 | return resp
200 | }
201 |
202 | // Info about the configuration including global version/state and modified time
203 | func Info(opts config.Options) config.ResponseObject {
204 | resp := config.ResponseObject{
205 | Action: "info",
206 | }
207 |
208 | if opts.CfgName != "" {
209 | // Just get the root key
210 | opts.Key = "/"
211 |
212 | storageResponse, err := storage.Get(opts)
213 | if err != nil {
214 | resp.Error = err.Error()
215 | } else {
216 | // Debating putting the item value on here... (allowing users to store values on the config or "root")
217 | // resp.Item = storageResponse
218 | // Set the configuration version and modified time on the response
219 | // Item.CfgVersion and Item.CfgModifiedNanoseconds are not included in the JSON output
220 | resp.CfgVersion = storageResponse.CfgVersion
221 | resp.CfgModified = 0
222 | resp.CfgModifiedNanoseconds = storageResponse.CfgModifiedNanoseconds
223 | // Modified in seconds
224 | resp.CfgModified = storageResponse.CfgModifiedNanoseconds / int64(time.Second)
225 | // Modified parsed
226 | modified := time.Unix(0, storageResponse.CfgModifiedNanoseconds)
227 | resp.CfgModifiedParsed = modified.Format(time.RFC3339)
228 |
229 | // Set information about the storage engine
230 | resp.CfgStorage.InterfaceName = opts.StorageInterfaceName
231 | resp.CfgStorage.Name = storage.Name(opts)
232 | resp.CfgStorage.Options = storage.Options(opts)
233 |
234 | // Get the status (only applicable for some storage interfaces, such as DynamoDB)
235 | resp.CfgState, err = storage.ConfigState(opts)
236 | if err != nil {
237 | resp.Error = err.Error()
238 | } else {
239 | var buffer bytes.Buffer
240 | buffer.WriteString(opts.CfgName)
241 | if resp.CfgState != "" {
242 | buffer.WriteString(" (")
243 | buffer.WriteString(resp.CfgState)
244 | buffer.WriteString(")")
245 | }
246 | buffer.WriteString(" version ")
247 | buffer.WriteString(strconv.FormatInt(resp.CfgVersion, 10))
248 | buffer.WriteString(" last modified ")
249 | buffer.WriteString(modified.Format(time.RFC1123))
250 | resp.Message = buffer.String()
251 | buffer.Reset()
252 | }
253 | }
254 | } else {
255 | resp.Error = NotEnoughArgsMsg
256 | }
257 | return resp
258 | }
259 |
260 | // Export a discfg to file in JSON format
261 | func Export(opts config.Options, args []string) {
262 | // TODO
263 | }
264 |
--------------------------------------------------------------------------------
/commands/commands_test.go:
--------------------------------------------------------------------------------
1 | package commands
2 |
3 | import (
4 | . "github.com/smartystreets/goconvey/convey"
5 | "github.com/tmaiaroto/discfg/config"
6 | "github.com/tmaiaroto/discfg/storage"
7 | "github.com/tmaiaroto/discfg/storage/mockdb"
8 | "io/ioutil"
9 | "log"
10 | "os"
11 | "testing"
12 | )
13 |
14 | func TestCreateCfg(t *testing.T) {
15 | }
16 |
17 | func TestDeleteCfg(t *testing.T) {
18 | }
19 | func TestUpdateCfg(t *testing.T) {
20 | }
21 | func TestUse(t *testing.T) {
22 | }
23 |
24 | func TestWhich(t *testing.T) {
25 | Convey("Should return an error when no .discfg exists", t, func() {
26 | storage.RegisterShipper("mock", mockdb.MockShipper{})
27 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0"}
28 | r := Which(opts)
29 | So(r.Action, ShouldEqual, "which")
30 | So(r.Error, ShouldEqual, NoCurrentWorkingCfgMsg)
31 | })
32 |
33 | Convey("Should return a ResponseObject with the current working config", t, func() {
34 | _ = ioutil.WriteFile(".discfg", []byte("testcfg"), 0644)
35 |
36 | storage.RegisterShipper("mock", mockdb.MockShipper{})
37 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0"}
38 | r := Which(opts)
39 | So(r.Action, ShouldEqual, "which")
40 | So(r.CurrentDiscfg, ShouldEqual, "testcfg")
41 |
42 | _ = os.Remove(".discfg")
43 | })
44 | }
45 |
46 | func TestSetKey(t *testing.T) {
47 | Convey("Should return a ResponseObject with an Error message if no value was provided", t, func() {
48 | storage.RegisterShipper("mock", mockdb.MockShipper{})
49 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0"}
50 | r := SetKey(opts)
51 | So(r.Action, ShouldEqual, "set")
52 | So(r.Error, ShouldEqual, ValueRequiredMsg)
53 | })
54 |
55 | Convey("Should return a ResponseObject with an Error message if no key name was provided", t, func() {
56 | storage.RegisterShipper("mock", mockdb.MockShipper{})
57 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0", CfgName: "mockcfg", Value: []byte("test")}
58 | r := SetKey(opts)
59 | So(r.Action, ShouldEqual, "set")
60 | So(r.Error, ShouldEqual, MissingKeyNameMsg)
61 | })
62 |
63 | Convey("Should return a ResponseObject with an Error message if no config name was provided", t, func() {
64 | storage.RegisterShipper("mock", mockdb.MockShipper{})
65 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0", Value: []byte("test"), Key: "test"}
66 | r := SetKey(opts)
67 | So(r.Action, ShouldEqual, "set")
68 | So(r.Error, ShouldEqual, MissingCfgNameMsg)
69 | })
70 | }
71 |
72 | func TestGetKey(t *testing.T) {
73 | Convey("Should return a ResponseObject with the key value", t, func() {
74 | storage.RegisterShipper("mock", mockdb.MockShipper{})
75 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0", CfgName: "mockcfg", Key: "initial"}
76 | r := GetKey(opts)
77 | So(r.Action, ShouldEqual, "get")
78 | So(r.Item.Version, ShouldEqual, int64(1))
79 | So(string(r.Item.Value.([]byte)), ShouldEqual, "initial value for test")
80 | })
81 |
82 | Convey("Should return a ResponseObject with the key value", t, func() {
83 | storage.RegisterShipper("mock", mockdb.MockShipper{})
84 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0", CfgName: "mockcfg", Key: "encoded"}
85 | r := GetKey(opts)
86 | So(r.Action, ShouldEqual, "get")
87 | So(r.Item.Version, ShouldEqual, int64(1))
88 | //So(string(r.Item.Value.([]byte)), ShouldEqual, "initial value for test")
89 | log.Println(FormatJSONValue(r).Item.Value)
90 | })
91 |
92 | Convey("Should return a ResponseObject with an Error message if no key name was provided", t, func() {
93 | storage.RegisterShipper("mock", mockdb.MockShipper{})
94 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0", CfgName: "mockcfg"}
95 | r := GetKey(opts)
96 | So(r.Action, ShouldEqual, "get")
97 | So(r.Error, ShouldEqual, MissingKeyNameMsg)
98 | })
99 | }
100 |
101 | func TestDeleteKey(t *testing.T) {
102 | Convey("Should return a ResponseObject with an Error message if not enough arguments were provided", t, func() {
103 | storage.RegisterShipper("mock", mockdb.MockShipper{})
104 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0"}
105 | r := DeleteKey(opts)
106 | So(r.Action, ShouldEqual, "delete")
107 | So(r.Error, ShouldEqual, NotEnoughArgsMsg)
108 | })
109 | }
110 |
111 | func TestInfo(t *testing.T) {
112 | Convey("Should return a ResponseObject with info about the config", t, func() {
113 | storage.RegisterShipper("mock", mockdb.MockShipper{})
114 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0", CfgName: "mockcfg"}
115 | r := Info(opts)
116 | So(r.Action, ShouldEqual, "info")
117 | So(r.CfgVersion, ShouldEqual, int64(4))
118 | So(r.CfgState, ShouldEqual, "ACTIVE")
119 | So(r.CfgModifiedNanoseconds, ShouldEqual, int64(1464675792991825937))
120 | So(r.CfgModified, ShouldEqual, int64(1464675792))
121 | // So(r.CfgModifiedParsed, ShouldEqual, "2016-05-30T23:23:12-07:00")
122 | // ^ System timezone running the tests could make this fail - doh!
123 | So(r.CfgModifiedParsed, ShouldHaveSameTypeAs, "string")
124 | })
125 |
126 | Convey("Should return a ResponseObject with an Error message if not enough arguments were provided", t, func() {
127 | storage.RegisterShipper("mock", mockdb.MockShipper{})
128 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0"}
129 | r := Info(opts)
130 | So(r.Action, ShouldEqual, "info")
131 | So(r.Error, ShouldEqual, NotEnoughArgsMsg)
132 | })
133 | }
134 |
135 | func TestExport(t *testing.T) {
136 | }
137 |
--------------------------------------------------------------------------------
/commands/util.go:
--------------------------------------------------------------------------------
1 | // Package commands utilities and response structs, constants, etc.
2 | package commands
3 |
4 | import (
5 | //"encoding/base64"
6 | "encoding/json"
7 | "errors"
8 | "fmt"
9 | "regexp"
10 | //"strconv"
11 | //"github.com/pquerna/ffjson/ffjson"
12 | ct "github.com/daviddengcn/go-colortext"
13 | "github.com/tmaiaroto/discfg/config"
14 | "io/ioutil"
15 | "time"
16 | )
17 |
18 | // TODO: Refactor: Change the following ...Msg constants and use config/status.go instead to centralize the error codes and messages.
19 |
20 | // NotEnoughArgsMsg defines a message for input validation
21 | const NotEnoughArgsMsg = "Not enough arguments passed. Run 'discfg help' for usage."
22 |
23 | // ValueRequired defines a message for input validation
24 | const ValueRequiredMsg = "A value is required. Run 'discfg help' for usage."
25 |
26 | // DiscfgFileName defines the temporary filename used to hold the current working config name
27 | const DiscfgFileName = ".discfg"
28 |
29 | // NoCurrentWorkingCfgMsg defines a message for an error when a config name can not be found in a .discfg file
30 | const NoCurrentWorkingCfgMsg = "No current working configuration has been set at this path."
31 |
32 | // MissingKeyNameMsg defines a message for input validation when a key name was not passed
33 | const MissingKeyNameMsg = "Missing key name"
34 |
35 | // InvalidKeyNameMsg defines a message for input validation
36 | const InvalidKeyNameMsg = "Invalid key name"
37 |
38 | // MissingCfgNameMsg defines a message for input validation
39 | const MissingCfgNameMsg = "Missing configuration name"
40 |
41 | // Out formats a config.ResponseObject for suitable output
42 | func Out(opts config.Options, resp config.ResponseObject) config.ResponseObject {
43 | // We've stored everything as binary data. But that can be many things.
44 | // A string, a number, or even JSON. We can check to see if it's something we can marshal to JSON.
45 | // If that fails, then we'll just return it as a string in the JSON response under the "value" key.
46 | //
47 | // If it isn't JSON, then return a base64 string.
48 | // TODO: Add Content-Type field of some sort so there's some context?
49 | //
50 | // TODO: Stuff like this will now be handled by an output interface.
51 | // ...and will also handle the content-type situation.
52 | // Output JSON, output Msgpack, output Protobuf? output whatever Content-Type.
53 | //
54 | // if resp.Item.Value != nil {
55 | // if !isJSON(string(resp.Item.Value)) {
56 | // // Return base64 when not JSON?
57 | // // b64Str := base64.StdEncoding.EncodeToString(resp.Item.Value)
58 | // //resp.Item.Value = []byte(strconv.Quote(b64Str))
59 | // resp.Item.Value = []byte(strconv.Quote(string(resp.Item.Value)))
60 | // }
61 | // // The output value is always raw JSON. It is not stored in the data store.
62 | // // It's simply for display.
63 | // resp.Item.OutputValue = json.RawMessage(resp.Item.Value)
64 | // }
65 |
66 | // // Same for the PrevItem if set
67 | // if resp.PrevItem.Value != nil {
68 | // if !isJSON(string(resp.PrevItem.Value)) {
69 | // resp.PrevItem.Value = []byte(strconv.Quote(string(resp.PrevItem.Value)))
70 | // }
71 | // resp.PrevItem.OutputValue = json.RawMessage(resp.PrevItem.Value)
72 | // }
73 |
74 | // Format the expiration time (if applicable). This prevents output like "0001-01-01T00:00:00Z" when empty
75 | // and allows for the time.RFC3339Nano format to be used whereas time.Time normally marshals to a different format.
76 | if resp.Item.TTL > 0 {
77 | resp.Item.OutputExpiration = resp.Item.Expiration.Format(time.RFC3339Nano)
78 | }
79 |
80 | switch opts.OutputFormat {
81 | case "json":
82 | o, _ := json.Marshal(&resp)
83 | // TODO: Benchmark this - is it faster?
84 | // o, _ := ffjson.Marshal(&resp)
85 | //
86 | // TODO: verbose mode here too? Shouldn't be in a situation where it can't be marshaled but who knows.
87 | // Always best to handle errors.
88 | // if(oErr) {
89 | // errorLabel("Error")
90 | // fmt.Print(oErr)
91 | // }
92 | fmt.Print(string(o))
93 | case "human":
94 | if resp.Error != "" {
95 | errorLabel(resp.Error)
96 | }
97 | if resp.Item.Value != nil {
98 | // The value should be a byte array, for th CLI we want a string.
99 | fmt.Println(string(resp.Item.Value.([]byte)))
100 | } else {
101 | if resp.Message != "" {
102 | fmt.Println(resp.Message)
103 | }
104 | }
105 | }
106 | return resp
107 | }
108 |
109 | // Changes the color for error messages. Good for one line heading. Any lengthy response should probably not be colored with a red background.
110 | func errorLabel(message string) {
111 | ct.ChangeColor(ct.White, true, ct.Red, false)
112 | fmt.Print(message)
113 | ct.ResetColor()
114 | fmt.Println("")
115 | }
116 |
117 | // Changes the color for the messages to green for success.
118 | func successLabel(message string) {
119 | ct.Foreground(ct.Green, true)
120 | fmt.Print(message)
121 | ct.ResetColor()
122 | fmt.Println("")
123 | }
124 |
125 | // GetDiscfgNameFromFile simply returns the name of the set discfg name (TODO: will need to change as .discfg gets more complex).
126 | func GetDiscfgNameFromFile() string {
127 | name := ""
128 | currentCfg, err := ioutil.ReadFile(DiscfgFileName)
129 | if err == nil {
130 | name = string(currentCfg)
131 | }
132 | return name
133 | }
134 |
135 | // Simple substring function
136 | func substr(s string, pos, length int) string {
137 | runes := []rune(s)
138 | l := pos + length
139 | if l > len(runes) {
140 | l = len(runes)
141 | }
142 | return string(runes[pos:l])
143 | }
144 |
145 | // Checks and formats the key name.
146 | func formatKeyName(key string) (string, error) {
147 | var err error
148 | k := ""
149 | if len(key) > 0 {
150 | k = key
151 | } else {
152 | return "", errors.New(MissingKeyNameMsg)
153 | }
154 |
155 | // Ensure valid characters
156 | r, _ := regexp.Compile(`[\w\/\-]+$`)
157 | if !r.MatchString(k) {
158 | return "", errors.New(InvalidKeyNameMsg)
159 | }
160 |
161 | // Remove any trailing slashes (unless there's only one, the root).
162 | // NOTE: A tree structure is not yet supported. The user can define one, but there are no recursive features when getting/deleting.
163 | // This may come in a future version, for now the structure is flat. However, convention set by other tools (along with REST API endpoints)
164 | // makes using slashes a natural fit and discfg will assume they are being used. It could be thought of as a namespace.
165 | if len(k) > 1 {
166 | for k[len(k)-1:] == "/" {
167 | k = k[:len(k)-1]
168 | }
169 | }
170 |
171 | return k, err
172 | }
173 |
174 | func isJSONString(s string) bool {
175 | var js string
176 | err := json.Unmarshal([]byte(s), &js)
177 | return err == nil
178 | }
179 | func isJSON(s string) bool {
180 | var js map[string]interface{}
181 | return json.Unmarshal([]byte(s), &js) == nil
182 |
183 | }
184 |
185 | // FormatJSONValue sets the Item Value (an interface{}) as a map[string]interface{} so it can be output as JSON.
186 | // The stored value could actually be JSON so it tries to Unmarshal. If it can't, it will just be a string value
187 | // in the response object which will already be JSON (ie. {"value": "the string value"}).
188 | func FormatJSONValue(resp config.ResponseObject) config.ResponseObject {
189 | // Don't attempt to Unmarshal or anything if the Value is empty. We wouldn't want to create a panic now.
190 | if resp.Item.Value != nil {
191 | resp.Item.Value = string(resp.Item.Value.([]byte))
192 |
193 | // TODO: Perhaps an option for storing/retrieving data...
194 | // The value could be base64 encoded, but it need not be.
195 | // val, err := base64.StdEncoding.DecodeString(resp.Item.Value.(string)) //`eyJ1cGRhdGVkIjogImZyaWRheSJ9`)
196 | // if err == nil && val != nil {
197 | // resp.Item.Value = string(val)
198 | // }
199 |
200 | // Try to unmarshal to map if JSON string
201 | var jsonData map[string]interface{}
202 | err := json.Unmarshal([]byte(resp.Item.Value.(string)), &jsonData)
203 | if err == nil {
204 | resp.Item.Value = jsonData
205 | }
206 | }
207 |
208 | // The previous value as well
209 | if resp.PrevItem.Value != nil {
210 | resp.PrevItem.Value = string(resp.PrevItem.Value.([]byte))
211 |
212 | // TODO: Perhaps an option for storing/retrieving data...
213 | // The value could be base64 encoded, but it need not be.
214 | // val, err := base64.StdEncoding.DecodeString(resp.PrevItem.Value.(string)) //`eyJ1cGRhdGVkIjogImZyaWRheSJ9`)
215 | // if err == nil && val != nil {
216 | // resp.PrevItem.Value = string(val)
217 | // }
218 |
219 | var jsonData map[string]interface{}
220 | err := json.Unmarshal([]byte(resp.PrevItem.Value.(string)), &jsonData)
221 | if err == nil {
222 | resp.PrevItem.Value = jsonData
223 | }
224 | }
225 |
226 | return resp
227 | }
228 |
--------------------------------------------------------------------------------
/commands/util_test.go:
--------------------------------------------------------------------------------
1 | package commands
2 |
3 | import (
4 | . "github.com/smartystreets/goconvey/convey"
5 | "github.com/tmaiaroto/discfg/config"
6 | "github.com/tmaiaroto/discfg/storage"
7 | "github.com/tmaiaroto/discfg/storage/mockdb"
8 | "io/ioutil"
9 | "os"
10 | "testing"
11 | )
12 |
13 | func TestOut(t *testing.T) {
14 | }
15 | func TestGetDiscfgNameFromFile(t *testing.T) {
16 | Convey("When a .discfg file is present", t, func() {
17 | Convey("The current working config name should be returned", func() {
18 | _ = ioutil.WriteFile(".discfg", []byte("testcfg"), 0644)
19 |
20 | c := GetDiscfgNameFromFile()
21 | So(c, ShouldEqual, "testcfg")
22 |
23 | _ = os.Remove(".discfg")
24 | })
25 | })
26 |
27 | Convey("When a .discfg file is not present", t, func() {
28 | Convey("An empty string should be returned", func() {
29 | ce := GetDiscfgNameFromFile()
30 | So(ce, ShouldEqual, "")
31 | })
32 | })
33 | }
34 |
35 | func TestFormatJSONValue(t *testing.T) {
36 | Convey("Should handle basic string values", t, func() {
37 | storage.RegisterShipper("mock", mockdb.MockShipper{})
38 | var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0", CfgName: "mockcfg", Key: "initial"}
39 | r := GetKey(opts)
40 | rFormatted := FormatJSONValue(r)
41 | So(rFormatted.Item.Value.(string), ShouldEqual, "initial value for test")
42 | })
43 |
44 | // Not yet
45 | // Convey("Should handle base64 encoded string values", t, func() {
46 | // storage.RegisterShipper("mock", mockdb.MockShipper{})
47 | // var opts = config.Options{StorageInterfaceName: "mock", Version: "0.0.0", CfgName: "mockcfg", Key: "encoded"}
48 | // r := GetKey(opts)
49 | // rFormatted := FormatJSONValue(r)
50 | // mapValue := map[string]interface{}{"updated": "friday"}
51 | // So(rFormatted.Item.Value.(map[string]interface{}), ShouldResemble, mapValue)
52 | // })
53 | }
54 |
--------------------------------------------------------------------------------
/config/config.go:
--------------------------------------------------------------------------------
1 | // Package config defines various structures including the configuration.
2 | package config
3 |
4 | import (
5 | //"encoding/json"
6 | "time"
7 | )
8 |
9 | // Options needed by various functions (set by CLI commands, config files, other code, etc.)
10 | type Options struct {
11 | CfgName string
12 | ConditionalValue string
13 | Recursive bool
14 | Key string
15 | Value []byte
16 | TTL int64
17 | StorageInterfaceName string
18 | // Storage options, for now AWS is the only supported storage
19 | Storage struct {
20 | AWS
21 | }
22 | Version string
23 | OutputFormat string
24 | }
25 |
26 | // AWS credentials and options
27 | type AWS struct {
28 | Region string
29 | AccessKeyID string
30 | SecretAccessKey string
31 | SessionToken string
32 | CredProfile string
33 | }
34 |
35 | // ResponseObject for output
36 | type ResponseObject struct {
37 | Action string `json:"action"`
38 | Item Item `json:"item,omitempty"`
39 | PrevItem Item `json:"prevItem,omitempty"`
40 | ErrorCode int `json:"errorCode,omitempty"`
41 | CurrentDiscfg string `json:"currentDiscfg,omitempty"`
42 | // Error message
43 | Error string `json:"error,omitempty"`
44 | // Message returned to the CLI
45 | Message string `json:"message,omitempty"`
46 | // Add this? Might be useful for troubleshooting, but users shouldn't really need to worry about it.
47 | // On the other hand, for things like DynamoDB, it's handy to know where the config stands in terms of scalability/capacity.
48 | // For things like S3, there's no real settings to care about (for the most part at least).
49 | // StorageResponse interface{} `json:"storageResponse,omitempty"`
50 | // Information about the config
51 | CfgVersion int64 `json:"cfgVersion,omitempty"`
52 | // In seconds since that's probably more common for people
53 | CfgModified int64 `json:"cfgModified,omitempty"`
54 | // In nanoseconds for the gophers like me who are snobby about time =)
55 | CfgModifiedNanoseconds int64 `json:"cfgModifiedNanoseconds,omitempty"`
56 | // A parsed date for humans to read
57 | CfgModifiedParsed string `json:"cfgModifiedParsed,omitempty"`
58 | // Configuration state (some storage engines, such as DynamoDB, have "active" and "updating" states)
59 | CfgState string `json:"cfgState,omitempty"`
60 | // Information about the configuration storage
61 | CfgStorage StorageInfo `json:"cfgStorage,omitempty"`
62 | }
63 |
64 | // StorageInfo holds information about the storage engine used for the configuration
65 | type StorageInfo struct {
66 | Name string `json:"name"`
67 | InterfaceName string `json:"interfaceName"`
68 | Options map[string]interface{} `json:"options"`
69 | }
70 |
71 | // NOTES ON ITEMS (somewhat similar to etcd's nodes):
72 | // Unlike etcd, there is no "index" key because discfg doesn't try to be a state machine like etcd.
73 | // The index there refers to some internal state of the entire system and certain actions advance that state.
74 | // discfg does not have a distributed lock system nor this sense of global state.
75 | //
76 | // However, it is useful for applications (and humans) to get a sense of change. So two thoughts:
77 | // 1. An "id" value using snowflake (so it's roughly sortable - the thought being sequential enough for discfg's needs)
78 | // 2. A "version" value that simply increments on each update
79 | //
80 | // If using snowflake ids, it would make sense to add those values as part of the index (RANGE). It would certainly
81 | // help DynaoDB distribute the data...
82 | // See: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GuidelinesForTables.html#GuidelinesForTables.UniformWorkloa
83 | // And: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key
84 | //
85 | // The challenge then here is there wouldn't be conditional updates by DynamoDB design. Those would need to be added
86 | // and it would require more queries. The database would be append only (which has its own benefits). Then there would
87 | // eventually need to be some sort of expiration on old items. Since one of the gaols of discfg is cost efficiency,
88 | // it doesn't make sense to keep old items around. Plus, going backwards in time is not a typical need for a configuration
89 | // service. The great thing about etcd's state here is the ability to watch for changes and should that HTTP connection
90 | // be interrupted, it could be resumed from a specific point. This is just one reason for that state index.
91 | //
92 | // discfg does not have this feature. There is no way to watch for a key update because discfg is not meant to run in
93 | // persistence. The data is of course, but the service is not. It's designed to run on demand CLI or AWS Lambda.
94 | // It's simply a different design decision in order to hit a goal. discfg's answer for this need would be to reach for
95 | // other AWS services to push notifications out (SNS), add to a message queue (SQS), etc.
96 | //
97 | // So with that in mind, a simple version is found on each item. While a bit naive, it's effective for many situations.
98 | // Not seen on this struct (for now), but stored in DynamoDB is also a list of the parent items (full paths).
99 | // This is for traversing needs.
100 | //
101 | // Another great piece of DynamoDB documentation with regard to counters and conditional writes can be found here:
102 | // http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html#WorkingWithItems.AtomicCounters
103 | //
104 | // Again, this highlights discfg's source of inspriation (etcd) and difference from it.
105 | //
106 | // TODO: Content-Type?
107 | // Value will be an interface{}, but stored as []byte in DynamoDB.
108 | // Other storage engines may convert to something else.
109 | // For now, all data is coming in as string. Either from the terminal or a RESTful API.
110 |
111 | // Item defines the data structure around a key and its state
112 | type Item struct {
113 | Version int64 `json:"version,omitempty"`
114 | Key string `json:"key,omitempty"`
115 | //Value []byte `json:"value,omitempty"`
116 | Value interface{} `json:"value,omitempty"`
117 | OutputValue map[string]interface{} `json:"ovalue,omitempty"`
118 |
119 | // perfect for json, not good if some other value was stored
120 | //OutputValue map[string]interface{} `json:"value,omitempty"`
121 | // We really need interface{} for any type of data. []byte above is for DynamoDB specifically.
122 | // It could be ... yea. an interface{} too. converted to []byte for storing in dynamodb.
123 | //OutputValue interface{} `json:"value,omitempty"`
124 | TTL int64 `json:"ttl,omitempty"`
125 | Expiration time.Time `json:"-"`
126 | OutputExpiration string `json:"expiration,omitempty"`
127 | // For now, skip this. The original thinking was to have a tree like directory structure like etcd.
128 | // Though discfg has now deviated away from that to a flat key/value structure.
129 | // Items []Item `json:"items,omitepty"`
130 | CfgVersion int64 `json:"-"`
131 | CfgModifiedNanoseconds int64 `json:"-"`
132 | }
133 |
--------------------------------------------------------------------------------
/config/status.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | // discfg status codes (not unlike HTTP status codes, but different numbers)
4 | const (
5 | // EcodeKeyNotFound = 100
6 | StatusContinue = 100
7 | StatusSwitchingProtocols = 101
8 |
9 | StatusOK = 200
10 | StatusCreated = 201
11 | StatusAccepted = 202
12 | )
13 |
14 | var statusText = map[int]string{
15 | // EcodeKeyNotFound: "Key not found",
16 | StatusContinue: "Continue",
17 | StatusSwitchingProtocols: "Switching Protocols",
18 |
19 | StatusOK: "OK",
20 | StatusCreated: "Created",
21 | StatusAccepted: "Accepted",
22 | }
23 |
24 | // StatusText returns a text for the discfg status code.
25 | // It returns the empty string if the code is unknown.
26 | func StatusText(code int) string {
27 | return statusText[code]
28 | }
29 |
--------------------------------------------------------------------------------
/config/status_test.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | . "github.com/smartystreets/goconvey/convey"
5 | "testing"
6 | )
7 |
8 | func TestStatusText(t *testing.T) {
9 | Convey("Should return status text string for passed code int", t, func() {
10 | s := StatusText(100)
11 | So(s, ShouldEqual, "Continue")
12 | })
13 | }
14 |
--------------------------------------------------------------------------------
/docs/TODO.md:
--------------------------------------------------------------------------------
1 | Writing documentation is still something that needs to be done.
2 |
3 | I'm thinking of using Hugo to do it and then hosting through GitHub pages.
4 |
5 | https://gohugo.io/tutorials/github-pages-blog/
6 |
7 | Possbily add search? http://lunrjs.com/
--------------------------------------------------------------------------------
/docs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tmaiaroto/discfg/8637ec6f4f816b3726ab067d89cace01017351b4/docs/logo.png
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "encoding/json"
7 | "fmt"
8 | "github.com/spf13/cobra"
9 | "github.com/tmaiaroto/discfg/commands"
10 | "github.com/tmaiaroto/discfg/config"
11 | "github.com/tmaiaroto/discfg/version"
12 | "io/ioutil"
13 | "os"
14 | "time"
15 | )
16 |
17 | var _ time.Duration
18 | var _ bytes.Buffer
19 |
20 | // Options for the configuration
21 | var Options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
22 |
23 | // dataFile for loading data for a key from file using the CLI
24 | var dataFile = ""
25 |
26 | // DiscfgCmd defines the parent discfg command
27 | var DiscfgCmd = &cobra.Command{
28 | Use: "discfg",
29 | Short: "discfg is a distributed configuration service",
30 | Long: `A distributed configuration service using Amazon Web Services.`,
31 | Run: func(cmd *cobra.Command, args []string) {},
32 | }
33 |
34 | // versionCmd displays the discfg version
35 | var versionCmd = &cobra.Command{
36 | Use: "version",
37 | Short: "discfg version number",
38 | Long: `Displays the version number for discfg`,
39 | Run: func(cmd *cobra.Command, args []string) {
40 | fmt.Println("discfg v" + Options.Version)
41 | },
42 | }
43 |
44 | var cfgCmd = &cobra.Command{
45 | Use: "cfg",
46 | Short: "manage discfg configurations",
47 | Long: `Creates and deletes discfg configurations`,
48 | Run: func(cmd *cobra.Command, args []string) {
49 | },
50 | }
51 | var useCmd = &cobra.Command{
52 | Use: "use",
53 | Short: "use a specific discfg",
54 | Long: `For the current path, always use a specific discfg`,
55 | Run: func(cmd *cobra.Command, args []string) {
56 | if len(args) > 0 {
57 | Options.CfgName = args[0]
58 | }
59 | resp := commands.Use(Options)
60 | commands.Out(Options, resp)
61 | },
62 | }
63 | var whichCmd = &cobra.Command{
64 | Use: "which",
65 | Short: "shows current discfg in use",
66 | Long: `Shows which discfg is currently selected for use at the current path`,
67 | Run: func(cmd *cobra.Command, args []string) {
68 | resp := commands.Which(Options)
69 | commands.Out(Options, resp)
70 | },
71 | }
72 | var createCfgCmd = &cobra.Command{
73 | Use: "create",
74 | Short: "create config",
75 | Long: `Creates a new discfg distributed configuration`,
76 | Run: func(cmd *cobra.Command, args []string) {
77 | var settings map[string]interface{}
78 | switch len(args) {
79 | case 1:
80 | settings = map[string]interface{}{}
81 | Options.CfgName = args[0]
82 | break
83 | case 2:
84 | Options.CfgName = args[0]
85 | if err := json.Unmarshal([]byte(args[0]), &settings); err != nil {
86 | commands.Out(Options, config.ResponseObject{Action: "create", Error: err.Error()})
87 | }
88 | }
89 |
90 | resp := commands.CreateCfg(Options, settings)
91 | commands.Out(Options, resp)
92 | },
93 | }
94 | var deleteCfgCmd = &cobra.Command{
95 | Use: "delete",
96 | Short: "delete config",
97 | Long: `Deletes a discfg distributed configuration`,
98 | Run: func(cmd *cobra.Command, args []string) {
99 | if len(args) > 0 {
100 | Options.CfgName = args[0]
101 | // Confirmation
102 | inputReader := bufio.NewReader(os.Stdin)
103 | cfgCmd.Print("Are you sure? [Y/n] ")
104 | input, _ := inputReader.ReadString('\n')
105 | if input != "Y\n" {
106 | DiscfgCmd.Println("Aborted")
107 | return
108 | }
109 | }
110 | resp := commands.DeleteCfg(Options)
111 | commands.Out(Options, resp)
112 | },
113 | }
114 | var updateCfgCmd = &cobra.Command{
115 | Use: "update",
116 | Short: "update config storage settings",
117 | Long: `Adjusts options for a config's storage engine`,
118 | Run: func(cmd *cobra.Command, args []string) {
119 | name := commands.GetDiscfgNameFromFile()
120 | Options.CfgName = name
121 | var settings map[string]interface{}
122 |
123 | switch len(args) {
124 | case 1:
125 | if err := json.Unmarshal([]byte(args[0]), &settings); err != nil {
126 | commands.Out(Options, config.ResponseObject{Action: "update", Error: err.Error()})
127 | }
128 | break
129 | case 2:
130 | Options.CfgName = args[0]
131 | if err := json.Unmarshal([]byte(args[0]), &settings); err != nil {
132 | commands.Out(Options, config.ResponseObject{Action: "update", Error: err.Error()})
133 | }
134 | }
135 | resp := commands.UpdateCfg(Options, settings)
136 | commands.Out(Options, resp)
137 | },
138 | }
139 | var infoCmd = &cobra.Command{
140 | Use: "info",
141 | Short: "config information",
142 | Long: `Information about the config including version and modified time`,
143 | Run: func(cmd *cobra.Command, args []string) {
144 | setOptsFromArgs(args)
145 | resp := commands.Info(Options)
146 | commands.Out(Options, resp)
147 | },
148 | }
149 |
150 | var setCmd = &cobra.Command{
151 | Use: "set",
152 | Short: "set key value",
153 | Long: `Sets a key value for a given discfg`,
154 | Run: func(cmd *cobra.Command, args []string) {
155 | setOptsFromArgs(args)
156 | resp := commands.SetKey(Options)
157 | commands.Out(Options, resp)
158 | },
159 | }
160 | var getCmd = &cobra.Command{
161 | Use: "get",
162 | Short: "get key value",
163 | Long: `Gets a key value for a given discfg`,
164 | Run: func(cmd *cobra.Command, args []string) {
165 | setOptsFromArgs(args)
166 | resp := commands.GetKey(Options)
167 | commands.Out(Options, resp)
168 | },
169 | }
170 | var deleteCmd = &cobra.Command{
171 | Use: "delete",
172 | Short: "delete key",
173 | Long: `Deletes a key for a given discfg`,
174 | Run: func(cmd *cobra.Command, args []string) {
175 | setOptsFromArgs(args)
176 | resp := commands.DeleteKey(Options)
177 | commands.Out(Options, resp)
178 | },
179 | }
180 | var exportCmd = &cobra.Command{
181 | Use: "export",
182 | Short: "export entire config",
183 | Long: `Exports the entire discfg to a file in JSON format`,
184 | Run: func(cmd *cobra.Command, args []string) {
185 | commands.Export(Options, args)
186 | },
187 | }
188 |
189 | func main() {
190 | // Set up commands
191 | DiscfgCmd.AddCommand(versionCmd)
192 | DiscfgCmd.PersistentFlags().StringVarP(&Options.OutputFormat, "format", "f", "human", "Output format for responses (human|json|slient)")
193 |
194 | // AWS options & credentials
195 | DiscfgCmd.PersistentFlags().StringVarP(&Options.Storage.AWS.Region, "region", "l", "us-east-1", "AWS Region to use")
196 | DiscfgCmd.PersistentFlags().StringVarP(&Options.Storage.AWS.AccessKeyID, "keyId", "k", "", "AWS Access Key ID")
197 | DiscfgCmd.PersistentFlags().StringVarP(&Options.Storage.AWS.SecretAccessKey, "secretKey", "s", "", "AWS Secret Access Key")
198 | DiscfgCmd.PersistentFlags().StringVarP(&Options.Storage.AWS.CredProfile, "credProfile", "p", "", "AWS Credentials Profile to use")
199 |
200 | // Additional options by some operations
201 | DiscfgCmd.PersistentFlags().StringVarP(&dataFile, "data", "d", "", "Data file to read for value")
202 | DiscfgCmd.PersistentFlags().StringVarP(&Options.ConditionalValue, "condition", "c", "", "Conditional operation value")
203 | DiscfgCmd.PersistentFlags().Int64VarP(&Options.TTL, "ttl", "t", 0, "Set a time to live for a key (0 is no TTL)")
204 |
205 | DiscfgCmd.AddCommand(cfgCmd, setCmd, getCmd, deleteCmd, infoCmd)
206 | cfgCmd.AddCommand(useCmd)
207 | cfgCmd.AddCommand(whichCmd)
208 | cfgCmd.AddCommand(createCfgCmd)
209 | cfgCmd.AddCommand(deleteCfgCmd)
210 | cfgCmd.AddCommand(updateCfgCmd)
211 | cfgCmd.AddCommand(infoCmd)
212 | DiscfgCmd.Execute()
213 | }
214 |
215 | // Takes positional command arguments and sets options from them (because some may be optional)
216 | func setOptsFromArgs(args []string) {
217 | // The user may have set a config name in a `.discfg` file, for convenience, to shorten the commands.
218 | // This will affect the positional arguments. The confusing part will be if a config name has been
219 | // set and then the user forgets and puts the config name in the positional arguments. To avoid
220 | // this, a check against the first argument and the config name is made...But that means setting
221 | // a key name the same as the config name requires 3 positional arguments.
222 | // I'm beginning to wonder if pulling this out was even worthwhile since some of it also depends
223 | // on the actual command.
224 | name := commands.GetDiscfgNameFromFile()
225 | if name != "" {
226 | Options.CfgName = name
227 | }
228 |
229 | switch len(args) {
230 | case 1:
231 | if Options.CfgName != "" && args[0] != Options.CfgName {
232 | Options.Key = args[0]
233 | } else {
234 | Options.CfgName = args[0]
235 | }
236 | break
237 | case 2:
238 | if Options.CfgName != "" && args[0] != Options.CfgName {
239 | Options.Key = args[0]
240 | Options.Value = []byte(args[1])
241 | } else {
242 | Options.CfgName = args[0]
243 | Options.Key = args[1]
244 | }
245 | break
246 | case 3:
247 | // 3 args always means a CfgName was passed. It couldn't mean anything else at this time.
248 | Options.CfgName = args[0]
249 | Options.Key = args[1]
250 | Options.Value = []byte(args[2])
251 | break
252 | }
253 |
254 | // A data file will overwrite Options.Value, even if set. Prefer the data file (if it can be read)
255 | // if both a value command line argument and a file path are specified.
256 | if dataFile != "" {
257 | b, err := ioutil.ReadFile(dataFile)
258 | if err == nil {
259 | Options.Value = b
260 | }
261 | }
262 | }
263 |
--------------------------------------------------------------------------------
/main_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | . "github.com/smartystreets/goconvey/convey"
5 | "testing"
6 | )
7 |
8 | func TestSetOptsFromArgs(t *testing.T) {
9 | Convey("When 1 argument is passed", t, func() {
10 | Convey("A CfgName option should be set, a Key and Value should not", func() {
11 | setOptsFromArgs([]string{"testCfg"})
12 | So(Options.CfgName, ShouldEqual, "testCfg")
13 | So(string(Options.Key), ShouldEqual, "")
14 | So(string(Options.Value), ShouldEqual, "")
15 | })
16 | })
17 |
18 | Convey("When 2 arguments are passed", t, func() {
19 | Convey("A CfgName and Key option should be set", func() {
20 | setOptsFromArgs([]string{"testCfg", "some key"})
21 | So(Options.CfgName, ShouldEqual, "testCfg")
22 | So(string(Options.Key), ShouldEqual, "some key")
23 | So(string(Options.Value), ShouldEqual, "")
24 | })
25 | })
26 |
27 | Convey("When 3 arguments are passed", t, func() {
28 | Convey("CfgName, Key, and Value options should be set", func() {
29 | setOptsFromArgs([]string{"testCfg", "some key", "some value"})
30 | So(Options.CfgName, ShouldEqual, "testCfg")
31 | So(Options.Key, ShouldEqual, "some key")
32 | So(string(Options.Value), ShouldEqual, "some value")
33 | })
34 | })
35 | }
36 |
--------------------------------------------------------------------------------
/server/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | // "flag"
5 | // "github.com/labstack/echo"
6 | // mw "github.com/labstack/echo/middleware"
7 | "github.com/tmaiaroto/discfg/config"
8 | "github.com/tmaiaroto/discfg/version"
9 | "log"
10 | )
11 |
12 | var options = config.Options{StorageInterfaceName: "dynamodb", Version: version.Semantic}
13 |
14 | func main() {
15 | // TODO: remove
16 | log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime)
17 |
18 | // port := *flag.String("port", "8899", "API port")
19 | // apiVersion := *flag.String("version", "v1", "API version")
20 | // region := *flag.String("region", "us-east-1", "AWS region")
21 |
22 | // options.Storage.AWS.Region = region
23 |
24 | // e := echo.New()
25 |
26 | // // Middleware
27 | // //e.Use(mw.Logger())
28 | // e.Use(mw.Gzip())
29 | // e.Use(mw.Recover())
30 |
31 | // // Routes
32 | // switch apiVersion {
33 | // default:
34 | // case "v1":
35 | // v1Routes(e)
36 | // }
37 |
38 | // // Start server
39 | // e.Run(":" + port)
40 | }
41 |
--------------------------------------------------------------------------------
/server/v1.go:
--------------------------------------------------------------------------------
1 | // API Version 1
2 | package main
3 |
4 | // import (
5 | // "encoding/json"
6 | // //"encoding/base64"
7 | // "github.com/labstack/echo"
8 | // "github.com/tmaiaroto/discfg/commands"
9 | // "github.com/tmaiaroto/discfg/config"
10 | // //"github.com/ugorji/go/codec" // <-- may eventually be used as a different output format. have to see what echo supports.
11 | // "io/ioutil"
12 | // //"log"
13 | // "net/http"
14 | // )
15 |
16 | // // Set the routes for V1 API
17 | // func v1Routes(e *echo.Echo) {
18 | // e.Put("/v1/:name/keys/:key", v1SetKey)
19 | // e.Get("/v1/:name/keys/:key", v1GetKey)
20 | // e.Delete("/v1/:name/keys/:key", v1DeleteKey)
21 |
22 | // e.Put("/v1/:name/cfg", v1CreateCfg)
23 | // e.Delete("/v1/:name/cfg", v1DeleteCfg)
24 | // e.Patch("/v1/:name/cfg", v1PatchCfg)
25 | // e.Options("/v1/:name/cfg", v1OptionsCfg)
26 | // }
27 |
28 | // // Gets a key from discfg
29 | // func v1GetKey(c *echo.Context) error {
30 | // options.CfgName = c.Param("name")
31 | // options.Key = c.Param("key")
32 | // resp := commands.GetKey(options)
33 | // // Since this option is not needed for anything else, it's not held on the Options struct.
34 | // contentType := c.Query("type")
35 |
36 | // // This is very awesome. Very interesting possibilties now.
37 | // switch contentType {
38 | // case "text", "text/plain", "string":
39 | // return c.String(http.StatusOK, string(resp.Item.Value.([]byte)))
40 | // break
41 | // // This one is going to be interesting. Weird? Bad practice? I don't know, but I dig it and it starts giving me wild ideas.
42 | // case "html", "text/html":
43 | // return c.HTML(http.StatusOK, string(resp.Item.Value.([]byte)))
44 | // break
45 | // // TODO:
46 | // //case "jsonp":
47 | // //break
48 | // case "json", "application/json":
49 | // resp = commands.FormatJSONValue(resp)
50 | // break
51 | // default:
52 | // resp = commands.FormatJSONValue(resp)
53 | // break
54 | // }
55 | // // default response
56 | // return c.JSON(http.StatusOK, resp)
57 | // }
58 |
59 | // // Sets a key in discfg
60 | // func v1SetKey(c *echo.Context) error {
61 | // options.CfgName = c.Param("name")
62 | // options.Key = c.Param("key")
63 | // resp := config.ResponseObject{
64 | // Action: "set",
65 | // }
66 |
67 | // // Allow the value to be passed via querystring param.
68 | // options.Value = []byte(c.Query("value"))
69 |
70 | // // Overwrite that if the request body passes a value that can be read, preferring that.
71 | // b, err := ioutil.ReadAll(c.Request().Body)
72 | // if err != nil {
73 | // // If reading the body failed and we don't have a value from the querystring parameter
74 | // // then we have a (potential) problem.
75 | // //
76 | // // Some data stores may be ok with an empty key value. DynamoDB is not. It will only
77 | // // return a ValidationException error. Plus, even if it was allowed, it would really
78 | // // confuse the user. Some random error reading the body of a request and poof, the data
79 | // // vanishes? That'd be terrible UX.
80 | // // log.Println(err)
81 | // resp.Error = err.Error()
82 | // resp.Message = "Something went wrong reading the body of the request."
83 | // // resp.ErrorCode = 500 <-- TODO: I need to come up with discfg specific error codes. Keep as const somewhere.
84 | // // return c.JSON(http.StatusOK, resp)
85 | // // Or maybe return an HTTP status message... This is outside discfg's concern. It's not an error message/code
86 | // // that would ever be seen from the CLI, right? Or maybe it would. Maybe a more generic, "error parsing key value" ...
87 | // } else if len(b) > 0 {
88 | // options.Value = b
89 | // }
90 |
91 | // resp = commands.SetKey(options)
92 |
93 | // return c.JSON(http.StatusOK, resp)
94 | // }
95 |
96 | // // Deletes a key in discfg
97 | // func v1DeleteKey(c *echo.Context) error {
98 | // options.CfgName = c.Param("name")
99 | // options.Key = c.Param("key")
100 | // return c.JSON(http.StatusOK, commands.DeleteKey(options))
101 | // }
102 |
103 | // // Creates a new configuration
104 | // func v1CreateCfg(c *echo.Context) error {
105 | // options.CfgName = c.Param("name")
106 | // resp := config.ResponseObject{
107 | // Action: "create cfg",
108 | // }
109 |
110 | // // Any settings to pass along to the storage interface (for example, ReadCapacityUnits and WriteCapacityUnits for DynamoDB).
111 | // var settings map[string]interface{}
112 | // b, err := ioutil.ReadAll(c.Request().Body)
113 | // if err != nil {
114 | // resp.Error = err.Error()
115 | // resp.Message = "Something went wrong reading the body of the request."
116 | // // resp.ErrorCode = 500 <-- TODO
117 | // } else if len(b) > 0 {
118 | // resp := config.ResponseObject{
119 | // Action: "create cfg",
120 | // }
121 | // //options.Value = b
122 | // if err := json.Unmarshal(b, &settings); err != nil {
123 | // resp.Error = err.Error()
124 | // resp.Message = "Something went wrong reading the body of the request."
125 | // return c.JSON(http.StatusOK, resp)
126 | // }
127 | // }
128 |
129 | // return c.JSON(http.StatusOK, commands.CreateCfg(options, settings))
130 | // }
131 |
132 | // // Deletes a configuration
133 | // func v1DeleteCfg(c *echo.Context) error {
134 | // options.CfgName = c.Param("name")
135 | // return c.JSON(http.StatusOK, commands.DeleteCfg(options))
136 | // }
137 |
138 | // // Sets options for a configuration
139 | // func v1PatchCfg(c *echo.Context) error {
140 | // options.CfgName = c.Param("name")
141 | // resp := config.ResponseObject{
142 | // Action: "info",
143 | // }
144 |
145 | // var settings map[string]interface{}
146 | // b, err := ioutil.ReadAll(c.Request().Body)
147 | // if err != nil {
148 | // resp.Error = err.Error()
149 | // resp.Message = "Something went wrong reading the body of the request."
150 | // // resp.ErrorCode = 500 <-- TODO
151 | // return c.JSON(http.StatusOK, resp)
152 | // } else if len(b) > 0 {
153 | // resp := config.ResponseObject{
154 | // Action: "update cfg",
155 | // }
156 | // //options.Value = b
157 | // if err := json.Unmarshal(b, &settings); err != nil {
158 | // resp.Error = err.Error()
159 | // resp.Message = "Something went wrong reading the body of the request."
160 | // return c.JSON(http.StatusOK, resp)
161 | // }
162 | // }
163 |
164 | // return c.JSON(http.StatusOK, commands.UpdateCfg(options, settings))
165 | // }
166 |
167 | // func v1OptionsCfg(c *echo.Context) error {
168 | // options.CfgName = c.Param("name")
169 | // // resp := config.ResponseObject{
170 | // // Action: "info",
171 | // // }
172 |
173 | // return c.JSON(http.StatusOK, commands.Info(options))
174 | // }
175 |
--------------------------------------------------------------------------------
/storage/README.md:
--------------------------------------------------------------------------------
1 | DynamoDB is the only intended storage solution for now. This directory/package exists mainly
2 | for file/code organization. It is a personal preference over having a ton of go files all in
3 | one directory. However, since it's being built with an interface, it does leave the door open
4 | for other storage options in the future.
5 |
6 | Would that be so strange? DynamoDB was decided upon for a few reasons, but it may not work
7 | for all cases. A different solution may work better. This leaves the door open.
8 |
9 | Also, configurations are meant to be shareable. If that's the case, then it's feasible that
10 | we would move a configuration from one storage solution to another.
11 |
12 | Maybe we'll use other data stores like Cockroach DB in the future. Who knows. DynamoDB was
13 | just the obvious choice given the initial goals of the project (which included cost).
14 |
15 | Or maybe it's just a local SQLite or file based storage. At that point it's not distributed...
16 | But maybe the tool can do a little more.
--------------------------------------------------------------------------------
/storage/dynamodb/dynamodb.go:
--------------------------------------------------------------------------------
1 | package database
2 |
3 | import (
4 | "bytes"
5 | "encoding/gob"
6 | "github.com/aws/aws-sdk-go/aws"
7 | "github.com/aws/aws-sdk-go/aws/credentials"
8 | "github.com/aws/aws-sdk-go/aws/session"
9 | "github.com/aws/aws-sdk-go/service/dynamodb"
10 | "github.com/fatih/structs"
11 | "github.com/tmaiaroto/discfg/config"
12 | "os"
13 | "strconv"
14 | "time"
15 | )
16 |
17 | // DynamoDB implements the Shipper interface.
18 | type DynamoDB struct {
19 | }
20 |
21 | // Name simply returns the display name for this shipper. It might return version info too from a database,
22 | // so the config options are present should it need to connect for that.
23 | func (db DynamoDB) Name(opts config.Options) string {
24 | return "DynamoDB"
25 | }
26 |
27 | // Options returns misc. settings and options for the datastore. For DynamoDB this is going to be the
28 | // read and write capacity units, but anything like that would be found here. Up to the discretion of
29 | // the interface.
30 | func (db DynamoDB) Options(opts config.Options) map[string]interface{} {
31 | svc := Svc(opts)
32 |
33 | params := &dynamodb.DescribeTableInput{
34 | TableName: aws.String(opts.CfgName), // Required
35 | }
36 | resp, err := svc.DescribeTable(params)
37 | if err == nil {
38 | m := structs.Map(resp)
39 | return m
40 | }
41 | return map[string]interface{}{}
42 | }
43 |
44 | // Svc configures the DynamoDB service to use
45 | func Svc(opts config.Options) *dynamodb.DynamoDB {
46 | awsConfig := &aws.Config{Region: aws.String(opts.Storage.AWS.Region)}
47 |
48 | // If a session was passed... (AWS Lambda does this)
49 | if opts.Storage.AWS.SessionToken != "" {
50 | os.Setenv("AWS_SESSION_TOKEN", opts.Storage.AWS.SessionToken)
51 | }
52 |
53 | // Look in a variety of places for AWS credentials. First, try the credentials file set by AWS CLI tool.
54 | // Note the empty string instructs to look under default file path (different based on OS).
55 | // This file can have multiple profiles and a default profile will be used unless otherwise configured.
56 | // See: https://godoc.org/github.com/aws/aws-sdk-go/aws/credentials#SharedCredentialsProvider
57 | creds := credentials.NewSharedCredentials("", opts.Storage.AWS.CredProfile)
58 | _, err := creds.Get()
59 | // If that failed, try environment variables.
60 | if err != nil {
61 | // The following are checked:
62 | // Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
63 | // Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
64 | creds = credentials.NewEnvCredentials()
65 | }
66 |
67 | // If credentials were passed via config, then use those. They will take priority over other methods.
68 | if opts.Storage.AWS.AccessKeyID != "" && opts.Storage.AWS.SecretAccessKey != "" {
69 | creds = credentials.NewStaticCredentials(opts.Storage.AWS.AccessKeyID, opts.Storage.AWS.SecretAccessKey, "")
70 | }
71 | awsConfig.Credentials = creds
72 |
73 | return dynamodb.New(session.New(awsConfig))
74 | }
75 |
76 | // CreateConfig creates a new table for a configuration
77 | func (db DynamoDB) CreateConfig(opts config.Options, settings map[string]interface{}) (interface{}, error) {
78 | svc := Svc(opts)
79 | wu := int64(1)
80 | ru := int64(2)
81 | if val, ok := settings["WriteCapacityUnits"]; ok {
82 | wu = int64(val.(float64))
83 | }
84 | if val, ok := settings["ReadCapacityUnits"]; ok {
85 | ru = int64(val.(float64))
86 | }
87 |
88 | // Must be at least 1
89 | if wu < 1 {
90 | wu = int64(1)
91 | }
92 | // Also must be at least 1, default to 2
93 | if ru < 1 {
94 | ru = int64(2)
95 | }
96 |
97 | params := &dynamodb.CreateTableInput{
98 | AttributeDefinitions: []*dynamodb.AttributeDefinition{
99 | {
100 | AttributeName: aws.String("key"),
101 | AttributeType: aws.String("S"),
102 | },
103 | // {
104 | // AttributeName: aws.String("id"),
105 | // AttributeType: aws.String("N"),
106 | // },
107 | // // More values...
108 | },
109 | KeySchema: []*dynamodb.KeySchemaElement{
110 | // One is required, but we use both a HASH (key name) and a RANGE (Snowflake).
111 | {
112 | AttributeName: aws.String("key"),
113 | KeyType: aws.String("HASH"),
114 | },
115 | // {
116 | // AttributeName: aws.String("id"),
117 | // KeyType: aws.String("RANGE"),
118 | // },
119 | },
120 | // Hard to estimate really. Should be passed along via command line when creating a new config.
121 | // Along with the table name. This will let people choose. Though it's kinda annoying someone must
122 | // think about this...
123 | // http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html
124 | ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required
125 | ReadCapacityUnits: aws.Int64(ru), // Required
126 | WriteCapacityUnits: aws.Int64(wu), // Required
127 | },
128 | TableName: aws.String(opts.CfgName), // Required
129 |
130 | }
131 | response, err := svc.CreateTable(params)
132 | // TODO: Convey this somehow?
133 | // if err == nil {
134 | // tableStatus := *response.TableDescription.TableStatus
135 | // if tableStatus != "CREATING" && tableStatus != "ACTIVE" {
136 | // err = errors.New("Something went wrong creating tables")
137 | // }
138 | // }
139 | return response, err
140 | }
141 |
142 | // DeleteConfig deletes a configuration (removing the DynamoDB table and all data within it)
143 | func (db DynamoDB) DeleteConfig(opts config.Options) (interface{}, error) {
144 | svc := Svc(opts)
145 | params := &dynamodb.DeleteTableInput{
146 | TableName: aws.String(opts.CfgName), // Required
147 | }
148 | return svc.DeleteTable(params)
149 | }
150 |
151 | // UpdateConfig updates a configuration (DyanmoDB can have its read and write capacity units adjusted as needed)
152 | // Note: Adjusting the read capacity is fast, adjusting write capacity takes longer.
153 | func (db DynamoDB) UpdateConfig(opts config.Options, settings map[string]interface{}) (interface{}, error) {
154 | svc := Svc(opts)
155 | wu := int64(1)
156 | ru := int64(2)
157 | if val, ok := settings["WriteCapacityUnits"]; ok {
158 | wu = int64(val.(float64))
159 | }
160 | if val, ok := settings["ReadCapacityUnits"]; ok {
161 | ru = int64(val.(float64))
162 | }
163 | // Must be at least 1
164 | if wu < 1 {
165 | wu = int64(1)
166 | }
167 | // Also must be at least 1, default to 2
168 | if ru < 1 {
169 | ru = int64(2)
170 | }
171 |
172 | params := &dynamodb.UpdateTableInput{
173 | TableName: aws.String(opts.CfgName), // Required
174 | ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
175 | ReadCapacityUnits: aws.Int64(ru), // Required
176 | WriteCapacityUnits: aws.Int64(wu), // Required
177 | },
178 | // Not for now. Mainly because only one operation per UpdateTable() call. Makes it annoying.
179 | // StreamSpecification: &dynamodb.StreamSpecification{
180 | // StreamEnabled: aws.Bool(true),
181 | // StreamViewType: aws.String("StreamViewType"),
182 | // },
183 | }
184 | return svc.UpdateTable(params)
185 | }
186 |
187 | // ConfigState returns the DynamoDB table state
188 | func (db DynamoDB) ConfigState(opts config.Options) (string, error) {
189 | svc := Svc(opts)
190 | status := ""
191 |
192 | params := &dynamodb.DescribeTableInput{
193 | TableName: aws.String(opts.CfgName), // Required
194 | }
195 | resp, err := svc.DescribeTable(params)
196 | if err != nil {
197 | // Print the error, cast err to awserr.Error to get the Code and
198 | // Message from an error.
199 | //fmt.Println(err.Error())
200 | status = *resp.Table.TableStatus
201 | }
202 | return status, err
203 | }
204 |
205 | // Update a key in DynamoDB
206 | func (db DynamoDB) Update(opts config.Options) (config.Item, error) {
207 | var err error
208 | svc := Svc(opts)
209 | item := config.Item{Key: opts.Key}
210 |
211 | ttlString := strconv.FormatInt(opts.TTL, 10)
212 | expires := time.Now().Add(time.Duration(opts.TTL) * time.Second)
213 | expiresInt := expires.UnixNano()
214 | expiresString := strconv.FormatInt(expiresInt, 10)
215 | // If no TTL was passed in the options, set 0. Anything 0 is indefinite in these cases.
216 | if opts.TTL == 0 {
217 | expiresString = "0"
218 | }
219 |
220 | // DynamoDB type cheat sheet:
221 | // B: []byte("some bytes")
222 | // BOOL: aws.Bool(true)
223 | // BS: [][]byte{[]byte("bytes and bytes")}
224 | // L: []*dynamodb.AttributeValue{{...recursive values...}}
225 | // M: map[string]*dynamodb.AttributeValue{"key": {...recursive...} }
226 | // N: aws.String("number")
227 | // NS: []*String{aws.String("number"), aws.String("number")}
228 | // NULL: aws.Bool(true)
229 | // S: aws.String("string")
230 | // SS: []*string{aws.String("string"), aws.String("string")}
231 |
232 | // If always putting new items, there's no conditional update.
233 | // But the only way to update is to make the items have a HASH only index instead of HASH + RANGE.
234 |
235 | params := &dynamodb.UpdateItemInput{
236 | Key: map[string]*dynamodb.AttributeValue{
237 | "key": {
238 | S: aws.String(opts.Key),
239 | },
240 | },
241 | TableName: aws.String(opts.CfgName),
242 | // KEY and VALUE are reserved words so the query needs to dereference them
243 | ExpressionAttributeNames: map[string]*string{
244 | //"#k": aws.String("key"),
245 | "#v": aws.String("value"),
246 | // If TTL is a reserved word in DynamoDB...Then why doesn't it seem to have a TTL feature??
247 | "#t": aws.String("ttl"),
248 | },
249 | ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
250 | // value
251 | ":value": {
252 | //B: []byte(opts.Value), // <-- sure, if all we ever stored as strings.
253 | B: opts.Value,
254 | },
255 | // TTL
256 | ":ttl": {
257 | N: aws.String(ttlString),
258 | },
259 | // Expiration timestamp
260 | ":expires": {
261 | N: aws.String(expiresString),
262 | },
263 | // version increment
264 | ":i": {
265 | N: aws.String("1"),
266 | },
267 | },
268 | //ReturnConsumedCapacity: aws.String("TOTAL"),
269 | //ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"),
270 | ReturnValues: aws.String("ALL_OLD"),
271 | UpdateExpression: aws.String("SET #v = :value, #t = :ttl, expires = :expires ADD version :i"),
272 | }
273 |
274 | // Conditional write operation (CAS)
275 | if opts.ConditionalValue != "" {
276 | params.ExpressionAttributeValues[":condition"] = &dynamodb.AttributeValue{B: []byte(opts.ConditionalValue)}
277 | params.ConditionExpression = aws.String("#v = :condition")
278 | }
279 |
280 | response, err := svc.UpdateItem(params)
281 | if err == nil {
282 | // The old values
283 | if val, ok := response.Attributes["value"]; ok {
284 | item.Value = val.B
285 | item.Version, _ = strconv.ParseInt(*response.Attributes["version"].N, 10, 64)
286 | }
287 | }
288 |
289 | return item, err
290 | }
291 |
292 | // Get a key in DynamoDB
293 | func (db DynamoDB) Get(opts config.Options) (config.Item, error) {
294 | var err error
295 | svc := Svc(opts)
296 | item := config.Item{Key: opts.Key}
297 |
298 | params := &dynamodb.QueryInput{
299 | TableName: aws.String(opts.CfgName),
300 |
301 | // KEY and VALUE are reserved words so the query needs to dereference them
302 | ExpressionAttributeNames: map[string]*string{
303 | "#k": aws.String("key"),
304 | },
305 | ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
306 | ":key": {
307 | S: aws.String(opts.Key),
308 | },
309 | },
310 | KeyConditionExpression: aws.String("#k = :key"),
311 | // TODO: Return more? It's nice to have a history now whereas previously I thought I might now have one...But what's the use?
312 | Limit: aws.Int64(1),
313 |
314 | // INDEXES | TOTAL | NONE (not required - not even sure if I need to worry about it)
315 | ReturnConsumedCapacity: aws.String("TOTAL"),
316 | // Important: This needs to be false so it returns results in descending order. If it's true (the default), it's sorted in the
317 | // order values were stored. So the first item stored for the key ever would be returned...But the latest item is needed.
318 | ScanIndexForward: aws.Bool(false),
319 | // http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-Select
320 | Select: aws.String("ALL_ATTRIBUTES"),
321 | }
322 | response, err := svc.Query(params)
323 |
324 | if err == nil {
325 | // Print the error, cast err to awserr.Error to get the Code and
326 | // Message from an error.
327 | //fmt.Println(err.Error())
328 |
329 | if len(response.Items) > 0 {
330 | // Every field should now be checked because it's possible to have a response without a value or version.
331 | // For example, the root key "/" may only hold information about the config version and modified time.
332 | // It may not have a set value and therefore it also won't have a relative version either.
333 | // TODO: Maybe it should? We can always version it as 1 even if empty value. Perhaps also an empty string value...
334 | // But the update config version would need to have a compare for an empty value. See if DynamoDB can do that.
335 | // For now, just check the existence of keys in the map.
336 | if val, ok := response.Items[0]["value"]; ok {
337 | item.Value = val.B
338 | }
339 | if val, ok := response.Items[0]["version"]; ok {
340 | item.Version, _ = strconv.ParseInt(*val.N, 10, 64)
341 | }
342 |
343 | // Expiration/TTL (only set if > 0)
344 | if val, ok := response.Items[0]["ttl"]; ok {
345 | ttl, _ := strconv.ParseInt(*val.N, 10, 64)
346 | if ttl > 0 {
347 | item.TTL = ttl
348 | }
349 | }
350 | if val, ok := response.Items[0]["expires"]; ok {
351 | expiresNano, _ := strconv.ParseInt(*val.N, 10, 64)
352 | if expiresNano > 0 {
353 | item.Expiration = time.Unix(0, expiresNano)
354 | }
355 | }
356 |
357 | // If cfgVersion and cfgModified are set because it's the root key "/" then set those too.
358 | // This is only returned for the root key. no sense in making a separate get function because operations like
359 | // exporting would then require more queries than necessary. However, it won't be displayed in the item's JSON output.
360 | if val, ok := response.Items[0]["cfgVersion"]; ok {
361 | item.CfgVersion, _ = strconv.ParseInt(*val.N, 10, 64)
362 | }
363 | if val, ok := response.Items[0]["cfgModified"]; ok {
364 | item.CfgModifiedNanoseconds, _ = strconv.ParseInt(*val.N, 10, 64)
365 | }
366 | }
367 |
368 | // Check the TTL
369 | if item.TTL > 0 {
370 | // If expired, return an empty item
371 | if item.Expiration.UnixNano() < time.Now().UnixNano() {
372 | item = config.Item{Key: opts.Key}
373 | // Delete the now expired item
374 | // NOTE: This does mean waiting on another DynamoDB request and that technically means slower performance in these situations, but is it a conern?
375 | // A goroutine doesn't help because there's not guarantee there's time for it to complete.
376 | db.Delete(opts)
377 | }
378 | }
379 | }
380 |
381 | return item, err
382 | }
383 |
384 | // Deprecated or at best delayed...
385 | func getChildren(svc *dynamodb.DynamoDB, opts config.Options) ([]config.Item, error) {
386 | var err error
387 | items := []config.Item{}
388 |
389 | // TODO
390 | // params := &dynamodb.QueryInput{
391 | // TableName: aws.String(opts.CfgName),
392 |
393 | // // KEY and VALUE are reserved words so the query needs to dereference them
394 | // ExpressionAttributeNames: map[string]*string{
395 | // "#k": aws.String("key"),
396 | // },
397 | // ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
398 | // ":key": {
399 | // S: aws.String(opts.Key),
400 | // },
401 | // },
402 | // KeyConditionExpression: aws.String("#k = :key"),
403 | // // TODO: Return more? It's nice to have a history now whereas previously I thought I might now have one...But what's the use?
404 | // Limit: aws.Int64(1),
405 |
406 | // // INDEXES | TOTAL | NONE (not required - not even sure if I need to worry about it)
407 | // ReturnConsumedCapacity: aws.String("TOTAL"),
408 | // // Important: This needs to be false so it returns results in descending order. If it's true (the default), it's sorted in the
409 | // // order values were stored. So the first item stored for the key ever would be returned...But the latest item is needed.
410 | // ScanIndexForward: aws.Bool(false),
411 | // // http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-Select
412 | // Select: aws.String("ALL_ATTRIBUTES"),
413 | // }
414 | // response, err := svc.Query(params)
415 |
416 | return items, err
417 | }
418 |
419 | // Delete a key in DynamoDB
420 | func (db DynamoDB) Delete(opts config.Options) (config.Item, error) {
421 | var err error
422 | svc := Svc(opts)
423 | item := config.Item{Key: opts.Key}
424 |
425 | params := &dynamodb.DeleteItemInput{
426 | Key: map[string]*dynamodb.AttributeValue{
427 | "key": {
428 | S: aws.String(opts.Key),
429 | },
430 | },
431 | TableName: aws.String(opts.CfgName),
432 | ReturnValues: aws.String("ALL_OLD"),
433 | // TODO: think about this for statistics
434 | // INDEXES | TOTAL | NONE
435 | //ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"),
436 | }
437 |
438 | // Conditional delete operation
439 | if opts.ConditionalValue != "" {
440 | // Alias value since it's a reserved word
441 | params.ExpressionAttributeNames = make(map[string]*string)
442 | params.ExpressionAttributeNames["#v"] = aws.String("value")
443 | // Set the condition expression value and compare
444 | params.ExpressionAttributeValues = make(map[string]*dynamodb.AttributeValue)
445 | params.ExpressionAttributeValues[":condition"] = &dynamodb.AttributeValue{B: []byte(opts.ConditionalValue)}
446 | params.ConditionExpression = aws.String("#v = :condition")
447 | }
448 |
449 | response, err := svc.DeleteItem(params)
450 | if err == nil {
451 | if len(response.Attributes) > 0 {
452 | item.Value = response.Attributes["value"].B
453 | item.Version, _ = strconv.ParseInt(*response.Attributes["version"].N, 10, 64)
454 | }
455 | }
456 |
457 | return item, err
458 | }
459 |
460 | // UpdateConfigVersion updates the configuration's global version and modified timestamp (fields unique to the root key "/")
461 | func (db DynamoDB) UpdateConfigVersion(opts config.Options) error {
462 | svc := Svc(opts)
463 | now := time.Now()
464 | params := &dynamodb.UpdateItemInput{
465 | Key: map[string]*dynamodb.AttributeValue{
466 | "key": {
467 | S: aws.String("/"),
468 | },
469 | },
470 | TableName: aws.String(opts.CfgName),
471 | ExpressionAttributeNames: map[string]*string{
472 | "#m": aws.String("cfgModified"),
473 | },
474 | ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
475 | // modified timestamp (DynamoDB has no date type)
476 | ":modified": {
477 | N: aws.String(strconv.FormatInt(now.UnixNano(), 10)),
478 | },
479 | // version increment
480 | ":i": {
481 | N: aws.String("1"),
482 | },
483 | },
484 | ReturnValues: aws.String("NONE"),
485 | UpdateExpression: aws.String("SET #m = :modified ADD cfgVersion :i"),
486 | }
487 | _, err := svc.UpdateItem(params)
488 | return err
489 | }
490 |
491 | // Prepares data to be stored in DynamoDb as byte array. interface{} -> []byte
492 | // DEPRECATED
493 | func getBytes(v interface{}) ([]byte, error) {
494 | var buf bytes.Buffer
495 | enc := gob.NewEncoder(&buf)
496 | err := enc.Encode(v)
497 | if err != nil {
498 | return nil, err
499 | }
500 | return buf.Bytes(), nil
501 | }
502 |
--------------------------------------------------------------------------------
/storage/mockdb/mock.go:
--------------------------------------------------------------------------------
1 | // Package mock provides a mock storage Shipper interface for tests.
2 | package mockdb
3 |
4 | import (
5 | "errors"
6 | "github.com/tmaiaroto/discfg/config"
7 | )
8 |
9 | // MockCfg is just a map of mock records within a mock config.
10 | var MockCfg = map[string]map[string]config.Item{
11 | "mockcfg": {
12 | "/": config.Item{
13 | Key: "/",
14 | Value: []byte("Mock configuration"),
15 | CfgVersion: int64(4),
16 | CfgModifiedNanoseconds: int64(1464675792991825937),
17 | },
18 | "initial": config.Item{
19 | Key: "initial",
20 | Value: []byte("initial value for test"),
21 | Version: int64(1),
22 | },
23 | "initial_second": config.Item{
24 | Key: "initial_second",
25 | Value: []byte("a second initial value for test"),
26 | Version: int64(3),
27 | },
28 | "json_value": config.Item{
29 | Key: "initial_second",
30 | Value: []byte(`{"json": "string", "num": 4}`),
31 | Version: int64(3),
32 | },
33 | "encoded": config.Item{
34 | Key: "encoded",
35 | Value: []byte(`eyJ1cGRhdGVkIjogImZyaWRheSJ9`),
36 | Version: int64(1),
37 | },
38 | },
39 | }
40 |
41 | // Version int64 `json:"version,omitempty"`
42 | // Key string `json:"key,omitempty"`
43 | // Value interface{} `json:"value,omitempty"`
44 | // TTL int64 `json:"ttl,omitempty"`
45 | // Expiration time.Time `json:"-"`
46 | // OutputExpiration string `json:"expiration,omitempty"`
47 | // CfgVersion int64 `json:"-"`
48 | // CfgModifiedNanoseconds int64 `json:"-"`
49 |
50 | // MockShipper struct implements the Shipper interface for testing purposes.
51 | type MockShipper struct {
52 | }
53 |
54 | // Name returns the name for the interface
55 | func (m MockShipper) Name(opts config.Options) string {
56 | return "Mock Storage Engine"
57 | }
58 |
59 | // Options returns various settings and options for the shipper
60 | func (m MockShipper) Options(opts config.Options) map[string]interface{} {
61 | return map[string]interface{}{"example": "option"}
62 | }
63 |
64 | // CreateConfig creates a config
65 | func (m MockShipper) CreateConfig(opts config.Options, settings map[string]interface{}) (interface{}, error) {
66 | var err error
67 | return "", err
68 | }
69 |
70 | // DeleteConfig deletes a config
71 | func (m MockShipper) DeleteConfig(opts config.Options) (interface{}, error) {
72 | var err error
73 | return "", err
74 | }
75 |
76 | // UpdateConfig updates a config
77 | func (m MockShipper) UpdateConfig(opts config.Options, settings map[string]interface{}) (interface{}, error) {
78 | var err error
79 | return "", err
80 | }
81 |
82 | // ConfigState returns the state of the config
83 | func (m MockShipper) ConfigState(opts config.Options) (string, error) {
84 | var err error
85 | return "ACTIVE", err
86 | }
87 |
88 | // Update a Item (record)
89 | func (m MockShipper) Update(opts config.Options) (config.Item, error) {
90 | var err error
91 | if val, ok := MockCfg[opts.CfgName][opts.Key]; ok {
92 | val.Version++
93 | } else {
94 | MockCfg[opts.CfgName][opts.Key] = config.Item{
95 | Key: opts.Key,
96 | Value: opts.Value,
97 | Version: int64(1),
98 | }
99 | }
100 | return MockCfg[opts.CfgName][opts.Key], err
101 | }
102 |
103 | // Get a Item (record)
104 | func (m MockShipper) Get(opts config.Options) (config.Item, error) {
105 | var err error
106 | return MockCfg[opts.CfgName][opts.Key], err
107 | }
108 |
109 | // Delete a Item (record)
110 | func (m MockShipper) Delete(opts config.Options) (config.Item, error) {
111 | var err error
112 | defer delete(MockCfg[opts.CfgName], opts.Key)
113 | return MockCfg[opts.CfgName][opts.Key], err
114 | }
115 |
116 | // UpdateConfigVersion updates the incremental counter/state of a configuration and should be called on each change
117 | func (m MockShipper) UpdateConfigVersion(opts config.Options) error {
118 | var err error
119 | if opts.CfgName != "" {
120 | n := MockCfg[opts.CfgName]["/"]
121 | n.CfgVersion++
122 | MockCfg[opts.CfgName]["/"] = n
123 | } else {
124 | err = errors.New("Interface Error: No config name passed.")
125 | }
126 | return err
127 | }
128 |
--------------------------------------------------------------------------------
/storage/storage.go:
--------------------------------------------------------------------------------
1 | // Package storage contains the very important Shipper interface which is responsible for working with storage engines.
2 | package storage
3 |
4 | import (
5 | "errors"
6 | "github.com/tmaiaroto/discfg/config"
7 | ddb "github.com/tmaiaroto/discfg/storage/dynamodb"
8 | )
9 |
10 | // Shipper can send information into a database or log etc. While DynamoDB is the planned data store,
11 | // who knows what will happen in the future. A simple interface never hurts.
12 | type Shipper interface {
13 | CreateConfig(config.Options, map[string]interface{}) (interface{}, error)
14 | DeleteConfig(config.Options) (interface{}, error)
15 | UpdateConfig(config.Options, map[string]interface{}) (interface{}, error)
16 | ConfigState(config.Options) (string, error)
17 | Update(config.Options) (config.Item, error)
18 | Get(config.Options) (config.Item, error)
19 | Delete(config.Options) (config.Item, error)
20 | UpdateConfigVersion(config.Options) error
21 | Name(config.Options) string
22 | Options(config.Options) map[string]interface{}
23 | }
24 |
25 | // ShipperResult contains errors and other information.
26 | type ShipperResult struct {
27 | Interface string `json:"interface"`
28 | Error error `json:"error"`
29 | }
30 |
31 | // Error message constants, reduce repetition.
32 | const (
33 | errMsgInvalidShipper = "Invalid shipper interface."
34 | )
35 |
36 | // A map of all Shipper interfaces available for use (with some defaults).
37 | var shippers = map[string]Shipper{
38 | "dynamodb": ddb.DynamoDB{},
39 | }
40 |
41 | // RegisterShipper allows anyone importing discfg into their own project to register new shippers or overwrite the defaults.
42 | func RegisterShipper(name string, shipper Shipper) {
43 | shippers[name] = shipper
44 | }
45 |
46 | // ListShippers returns the list of available shippers.
47 | func ListShippers() map[string]Shipper {
48 | return shippers
49 | }
50 |
51 | // Name returns the pretty display name for the shipper
52 | func Name(opts config.Options) string {
53 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
54 | return s.Name(opts)
55 | }
56 | return ""
57 | }
58 |
59 | // Options returns various settings and options for the shipper
60 | func Options(opts config.Options) map[string]interface{} {
61 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
62 | return s.Options(opts)
63 | }
64 | return map[string]interface{}{}
65 | }
66 |
67 | // CreateConfig creates a new configuration returning success true/false along with any response and error.
68 | func CreateConfig(opts config.Options, settings map[string]interface{}) (interface{}, error) {
69 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
70 | return s.CreateConfig(opts, settings)
71 | }
72 | return nil, errors.New(errMsgInvalidShipper)
73 | }
74 |
75 | // DeleteConfig deletes an existing configuration
76 | func DeleteConfig(opts config.Options) (interface{}, error) {
77 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
78 | return s.DeleteConfig(opts)
79 | }
80 | return nil, errors.New(errMsgInvalidShipper)
81 | }
82 |
83 | // UpdateConfig updates the options/settings for a configuration (may not be implementd by each interface)
84 | func UpdateConfig(opts config.Options, settings map[string]interface{}) (interface{}, error) {
85 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
86 | return s.UpdateConfig(opts, settings)
87 | }
88 | return nil, errors.New(errMsgInvalidShipper)
89 | }
90 |
91 | // ConfigState returns the config state (just a simple string message, could be "ACTIVE" for example)
92 | func ConfigState(opts config.Options) (string, error) {
93 | // TODO: May get more elaborate and have codes for this too, but will probably always have a string message
94 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
95 | return s.ConfigState(opts)
96 | }
97 | return "", errors.New(errMsgInvalidShipper)
98 | }
99 |
100 | // Update a key value in the configuration
101 | func Update(opts config.Options) (config.Item, error) {
102 | var item config.Item
103 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
104 | err := UpdateConfigVersion(opts)
105 | if err != nil {
106 | return item, err
107 | }
108 | return s.Update(opts)
109 | }
110 | return item, errors.New(errMsgInvalidShipper)
111 | }
112 |
113 | // Get a key value in the configuration
114 | func Get(opts config.Options) (config.Item, error) {
115 | var item config.Item
116 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
117 | return s.Get(opts)
118 | }
119 | return item, errors.New(errMsgInvalidShipper)
120 | }
121 |
122 | // Delete a key value in the configuration
123 | func Delete(opts config.Options) (config.Item, error) {
124 | var item config.Item
125 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
126 | err := UpdateConfigVersion(opts)
127 | if err != nil {
128 | return item, err
129 | }
130 | return s.Delete(opts)
131 | }
132 | return item, errors.New(errMsgInvalidShipper)
133 | }
134 |
135 | // UpdateConfigVersion updates the global discfg config version and modified timestamp (on the root key "/")
136 | func UpdateConfigVersion(opts config.Options) error {
137 | // Technically, this modified timestamp won't be accurate. The config would have changed already by this point.
138 | // TODO: Perhaps pass a timestamp to this function to get a little closer
139 | if s, ok := shippers[opts.StorageInterfaceName]; ok {
140 | return s.UpdateConfigVersion(opts)
141 | }
142 | return errors.New(errMsgInvalidShipper)
143 | }
144 |
--------------------------------------------------------------------------------
/storage/storage_test.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | . "github.com/smartystreets/goconvey/convey"
5 | "github.com/tmaiaroto/discfg/config"
6 | "github.com/tmaiaroto/discfg/storage/mockdb"
7 | "testing"
8 | )
9 |
10 | func TestRegisterShipper(t *testing.T) {
11 | Convey("A new Shipper should be available for use once set", t, func() {
12 | RegisterShipper("mock", mockdb.MockShipper{})
13 | So(shippers["mock"], ShouldHaveSameTypeAs, mockdb.MockShipper{})
14 | })
15 | }
16 |
17 | func TestListShippers(t *testing.T) {
18 | Convey("Shippers should be returned", t, func() {
19 | shippers := ListShippers()
20 | So(shippers, ShouldNotBeEmpty)
21 | })
22 | }
23 |
24 | func TestCreateConfig(t *testing.T) {
25 | // Convey("A new Shipper should be available for use once set", t, func() {
26 | // RegisterShipper("mock", mockdb.MockShipper{})
27 | // So(shippers["mock"], ShouldHaveSameTypeAs, mockdb.MockShipper{})
28 | // })
29 |
30 | Convey("A valid Shipper must be used", t, func() {
31 | _, err := CreateConfig(config.Options{StorageInterfaceName: ""}, map[string]interface{}{})
32 | So(err.Error(), ShouldEqual, errMsgInvalidShipper)
33 | })
34 | }
35 |
36 | func TestDeleteConfig(t *testing.T) {
37 | Convey("A valid Shipper must be used", t, func() {
38 | _, err := DeleteConfig(config.Options{StorageInterfaceName: ""})
39 | So(err.Error(), ShouldEqual, errMsgInvalidShipper)
40 | })
41 | }
42 |
43 | func TestUpdateConfig(t *testing.T) {
44 | Convey("A valid Shipper must be used", t, func() {
45 | _, err := UpdateConfig(config.Options{StorageInterfaceName: ""}, map[string]interface{}{})
46 | So(err.Error(), ShouldEqual, errMsgInvalidShipper)
47 | })
48 | }
49 |
50 | func TestConfigState(t *testing.T) {
51 | Convey("A valid Shipper must be used", t, func() {
52 | _, err := ConfigState(config.Options{StorageInterfaceName: ""})
53 | So(err.Error(), ShouldEqual, errMsgInvalidShipper)
54 | })
55 | }
56 |
57 | func TestUpdate(t *testing.T) {
58 | Convey("Should return with updated item value and version", t, func() {
59 | RegisterShipper("mock", mockdb.MockShipper{})
60 | opts := config.Options{
61 | StorageInterfaceName: "mock",
62 | CfgName: "mockcfg",
63 | Key: "testKey",
64 | Value: []byte("testValue"),
65 | }
66 | resp, err := Update(opts)
67 | So(string(resp.Value.([]byte)), ShouldEqual, "testValue")
68 | So(resp.Version, ShouldEqual, int64(1))
69 | So(err, ShouldBeNil)
70 | })
71 |
72 | Convey("A valid Shipper must be used", t, func() {
73 | opts := config.Options{
74 | StorageInterfaceName: "invalid",
75 | CfgName: "mockcfg",
76 | Key: "testKey",
77 | Value: []byte("testValue"),
78 | }
79 | _, err := Update(opts)
80 | So(err.Error(), ShouldEqual, errMsgInvalidShipper)
81 | })
82 | }
83 |
84 | func TestGet(t *testing.T) {
85 | Convey("A Shipper should get a key value, returning the item", t, func() {
86 | RegisterShipper("mock", mockdb.MockShipper{})
87 | opts := config.Options{
88 | StorageInterfaceName: "mock",
89 | CfgName: "mockcfg",
90 | Key: "initial",
91 | }
92 | item, err := Get(opts)
93 |
94 | So(string(item.Value.([]byte)), ShouldEqual, "initial value for test")
95 | So(err, ShouldBeNil)
96 | })
97 |
98 | Convey("A valid Shipper must be used", t, func() {
99 | _, err := Get(config.Options{StorageInterfaceName: ""})
100 | So(err.Error(), ShouldEqual, errMsgInvalidShipper)
101 | })
102 | }
103 |
104 | func TestDelete(t *testing.T) {
105 | Convey("A Shipper should delete a key value and return the deleted item", t, func() {
106 | RegisterShipper("mock", mockdb.MockShipper{})
107 | opts := config.Options{
108 | StorageInterfaceName: "mock",
109 | CfgName: "mockcfg",
110 | Key: "initial_second",
111 | }
112 | item, err := Delete(opts)
113 |
114 | So(string(item.Value.([]byte)), ShouldEqual, "a second initial value for test")
115 | So(err, ShouldBeNil)
116 |
117 | So(mockdb.MockCfg[opts.CfgName]["initial_second"], ShouldResemble, config.Item{})
118 | })
119 |
120 | Convey("A valid Shipper must be used", t, func() {
121 | _, err := Delete(config.Options{StorageInterfaceName: ""})
122 | So(err.Error(), ShouldEqual, errMsgInvalidShipper)
123 | })
124 | }
125 |
126 | func TestUpdateConfigVersion(t *testing.T) {
127 | Convey("The CfgVersion field on the Item should update", t, func() {
128 | RegisterShipper("mock", mockdb.MockShipper{})
129 | _ = UpdateConfigVersion(config.Options{StorageInterfaceName: "", CfgName: "mockcfg"})
130 | //item, _ := Get(config.Options{StorageInterfaceName: "", CfgName: "mockcfg", Key: "/"})
131 | // The initial value is 4 and TestUpdate changed it to 5, so this should now be 6.
132 | // I couldn't get Go Convey's Reset() to work. Well it "worked" but it returned ??? when running it in TestUpdate()
133 | // and I couldn't get the mockdb.MockCfg to change.
134 | So(mockdb.MockCfg["mockcfg"]["/"].CfgVersion, ShouldEqual, int64(6))
135 | })
136 |
137 | Convey("A valid Shipper must be used", t, func() {
138 | err := UpdateConfigVersion(config.Options{StorageInterfaceName: ""})
139 | So(err.Error(), ShouldEqual, errMsgInvalidShipper)
140 | })
141 | }
142 |
--------------------------------------------------------------------------------
/version/version.go:
--------------------------------------------------------------------------------
1 | // Package version provides a single location for the version used by many components
2 | package version
3 |
4 | // Semantic defines a semver string for discfg
5 | const Semantic = "0.11.1"
6 |
--------------------------------------------------------------------------------