├── .gitignore
├── Dockerfile
├── Gopkg.lock
├── Gopkg.toml
├── LICENSE
├── README.md
├── build.sh
├── doc
├── tc
│ ├── tc.md
│ └── tc_demo.sh
└── tools
│ ├── iftop.md
│ └── netperf.md
├── example.yaml
├── generate.sh
├── logo
├── container_node.png
├── graph.png
└── logo.jpeg
├── main.go
├── scripts
├── bashrc
├── gobuild.sh
└── show_eth0.sh
└── vendor
└── gopkg.in
└── yaml.v2
├── .travis.yml
├── LICENSE
├── LICENSE.libyaml
├── NOTICE
├── README.md
├── apic.go
├── decode.go
├── emitterc.go
├── encode.go
├── go.mod
├── parserc.go
├── readerc.go
├── resolve.go
├── scannerc.go
├── sorter.go
├── writerc.go
├── yaml.go
├── yamlh.go
└── yamlprivateh.go
/.gitignore:
--------------------------------------------------------------------------------
1 | .*swp
2 | Session.vim
3 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu
2 | RUN apt-get update
3 | #RUN apt-get install -y build-essential git
4 | RUN apt-get install -y wget iftop iproute2 netcat-openbsd dstat mtr net-tools sendip tcpreplay netperf iperf iperf3 fping iputils-ping tcpdump iptraf
5 | RUN apt-get install -y graphviz
6 | RUN echo 'bind "\C-n":history-search-backward' >> ~/.bashrc
7 |
--------------------------------------------------------------------------------
/Gopkg.lock:
--------------------------------------------------------------------------------
1 | # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
2 |
3 |
4 | [[projects]]
5 | name = "gopkg.in/yaml.v2"
6 | packages = ["."]
7 | revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
8 | version = "v2.2.1"
9 |
10 | [solve-meta]
11 | analyzer-name = "dep"
12 | analyzer-version = 1
13 | inputs-digest = "ce4cfa8aa3eb29f45e7ba341fdeac9820969e663181e81bddfc4a3aa2d5169bb"
14 | solver-name = "gps-cdcl"
15 | solver-version = 1
16 |
--------------------------------------------------------------------------------
/Gopkg.toml:
--------------------------------------------------------------------------------
1 | # Gopkg.toml example
2 | #
3 | # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
4 | # for detailed Gopkg.toml documentation.
5 | #
6 | # required = ["github.com/user/thing/cmd/thing"]
7 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
8 | #
9 | # [[constraint]]
10 | # name = "github.com/user/project"
11 | # version = "1.0.0"
12 | #
13 | # [[constraint]]
14 | # name = "github.com/user/project2"
15 | # branch = "dev"
16 | # source = "github.com/myfork/project2"
17 | #
18 | # [[override]]
19 | # name = "github.com/x/y"
20 | # version = "2.4.0"
21 | #
22 | # [prune]
23 | # non-go = false
24 | # go-tests = true
25 | # unused-packages = true
26 |
27 |
28 | [[constraint]]
29 | name = "gopkg.in/yaml.v2"
30 | version = "2.2.1"
31 |
32 | [prune]
33 | go-tests = true
34 | unused-packages = true
35 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # GNTE
4 | GNTE(Global Network Topology Emulator) is a docker-based all-in-one unstable global network emulator. It emulates functionality such as random delay and packet loss.
5 |
6 | ## Before Use
7 | Install docker
8 |
9 | ## Build and Run
10 | ### 1. build docker image
11 | Clone this repo and run `build.sh`. There should be an image named `ns` in your docker environment.
12 |
13 | ### 2. modify network definition file
14 | Edit ```example.yaml``` to fit your requirements. The rules of this file are described in the bottom section.
15 |
16 | ### 3. generate and launch network emulator
17 | Run the following command:
18 |
19 | ```
20 | ./generate.sh
21 | ```
22 |
23 | Once all CovenantSQL testnet dockers are running, you can use `docker ps -a` to see all container nodes:
24 |
25 |
26 | You can also find a graph of the network in `graph.png` under your root folder:
27 |
28 |
29 | ### 4. stop random one node or all nodes
30 | Run the following command:
31 |
32 | ```
33 | ./generate.sh stopone filter
34 | ```
35 |
36 | will stop randon one node. If filter is not empty, it will stop node name contains filter.
37 |
38 | And there is also a command to stop all:
39 |
40 | ```
41 | ./generate.sh stopall filter
42 | ```
43 |
44 | Also a command for restart all stopped nodes:
45 |
46 | ```
47 | ./generate.sh startall
48 | ```
49 |
50 | ### 5. run your own program in testnet
51 | Containers are referenced by group_name+ip. For example, given containers 10.250.1.2 and 10.250.8.2, you can run `docker exec -it china10.250.1.2 ping 10.250.8.2` to test the connection between these two networks.
52 |
53 | You can replace the `cmd` in the group section of yaml to run your own command.
54 | The `scripts` dir will be "volumed" to containers.
55 | `docker run --rm -it -v $DIR/scripts:/scripts` you can put your own binaries or scripts here and put your self defined `scripts/my.yaml`:
56 |
57 | ```yaml
58 | group:
59 | -
60 | name: china
61 | nodes:
62 | -
63 | ip: 10.250.1.2
64 | cmd: "cd /scripts && ./YourBin args"
65 | -
66 | ip: 10.250.1.3
67 | cmd: "cd /scripts && ./YourBin args"
68 | delay: "100ms 10ms 30%"
69 | loss: "1% 10%"
70 | -
71 | name: us
72 | nodes:
73 | -
74 | ip: 10.250.2.2
75 | cmd: "cd /scripts && ./YourBin args"
76 | -
77 | ip: 10.250.2.3
78 | cmd: "cd /scripts && ./YourBin args"
79 | delay: "1000ms 10ms 30%"
80 | loss: "1% 10%"
81 |
82 | network:
83 | -
84 | groups:
85 | - china
86 | - us
87 | delay: "200ms 10ms 1%"
88 | corrupt: "0.2%"
89 | rate: "10mbit"
90 |
91 | ```
92 |
93 | and run
94 | ```bash
95 | ./generate.sh scripts/my.yaml
96 | ```
97 |
98 | ### 5. [optional] clean network
99 | Run `./scripts/clean.sh`
100 |
101 | ## Modify Network Definition
102 | A sample network description is provided in `example.yaml`, which you can edit directly.
103 |
104 | ### sample
105 | ```yaml
106 | # Only support 10.250.0.2 ~ 10.250.254.254
107 | group:
108 | -
109 | name: china
110 | nodes:
111 | -
112 | ip: 10.250.1.2/32
113 | cmd: "ping -c3 g.cn"
114 | -
115 | ip: 10.250.2.2/32
116 | cmd: "ping -c3 g.cn"
117 | -
118 | ip: 10.250.3.2/32
119 | cmd: "ping -c3 g.cn"
120 | -
121 | ip: 10.250.4.2/32
122 | cmd: "ping -c3 g.cn"
123 | delay: "100ms 10ms 30%"
124 | loss: "1% 10%"
125 | -
126 | name: eu
127 | nodes:
128 | -
129 | ip: 10.250.5.2/32
130 | cmd: "ping -c3 g.cn"
131 | -
132 | ip: 10.250.6.2/32
133 | cmd: "ping -c3 g.cn"
134 | -
135 | ip: 10.250.7.2/32
136 | cmd: "ping -c3 g.cn"
137 | delay: "10ms 5ms 30%"
138 | loss: "1% 10%"
139 | -
140 | name: jpn
141 | nodes:
142 | -
143 | ip: 10.250.8.2/32
144 | cmd: "ping -c3 g.cn"
145 | -
146 | ip: 10.250.9.2/32
147 | cmd: "ping -c3 g.cn"
148 | delay: "100ms 10ms 30%"
149 | duplicate: "1%"
150 | rate: "100mbit"
151 |
152 | network:
153 | -
154 | groups:
155 | - china
156 | - eu
157 | delay: "200ms 10ms 1%"
158 | corrupt: "0.2%"
159 | rate: "10mbit"
160 |
161 | -
162 | groups:
163 | - china
164 | - jpn
165 | delay: "100ms 10ms 1%"
166 | rate: "10mbit"
167 |
168 | -
169 | groups:
170 | - jpn
171 | - eu
172 | delay: "30ms 5ms 1%"
173 | rate: "100mbit"
174 |
175 | ```
176 |
177 | ## Description
178 | The network definition contains two sections: group and network. Group defines ips and describes network info between them. Network describes network info between groups.
179 |
180 | ### group
181 | - **name**: unique name of the group
182 |
183 | - **nodes**: list of node in the network.
184 |
185 | #### node
186 | - **ip**: Node IP must be between "10.250.0.2 ~ 10.250.254.254" and written in CIDR format, eg. `10.250.1.2/32`.
187 |
188 | - **cmd**: Node command to run. Blocking or Non-blocking are both ok.
189 |
190 | - **network params**:
191 | The following 6 tc network limit parameters are supported:
192 | ```
193 | delay
194 | loss
195 | duplicate
196 | corrupt
197 | reorder
198 | rate
199 | ```
200 | The values of these parameters are exactly like those of the `tc` command.
201 |
202 | * `delay: "100ms 10ms 30%"` means 100ms delay in network and 30% packets +-10ms.
203 | * `duplicate: "1%"` means 1% packets is duplicated.
204 | * `rate: "100mbit"` means network transmit rate is 100mbit.
205 | * `corrupt: "0.2%"` means 0.2% packets are randomly modified.
206 |
207 | ### network
208 | - **groups**: list of group names
209 |
210 | - **network params**: same as group
211 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | docker build -t gnte .
4 |
--------------------------------------------------------------------------------
/doc/tc/tc.md:
--------------------------------------------------------------------------------
1 | ## Traffict Control Usage
2 | ---
3 |
4 | #### Show current settings
5 |
6 | tc qdisc show dev eth0
7 |
8 | ---
9 |
10 | #### Del all settings
11 |
12 | tc qdisc del dev eth0 root
13 |
14 | ---
15 |
16 | #### Use netem: delay, loss, corrupt, duplicate
17 |
18 | tc qdisc add dev eth0 root netem delay 100ms 3ms
19 |
20 | tc qdisc add dev eth0 root netem delay 100ms 10ms distribution normal
21 |
22 | tc qdisc add dev eth0 root netem loss 5%
23 |
24 | tc qdisc change dev eth0 root netem corrupt 5% duplicate 1%
25 |
26 | ---
27 |
28 | #### Use tbf
29 |
30 | tc qdisc add dev eth0 root tbf rate 1mbit burst 32kbit latency 400ms
31 |
32 | > * tbf: use the token buffer filter to manipulate traffic rates
33 | > * rate: sustained maximum rate
34 | > * burst: maximum allowed burst
35 | > * latency: packets with higher latency get dropped
36 |
37 | tc qdisc add dev eth0 root tbf rate 1mbit burst 10kb latency 70ms peakrate 2mbit minburst 1540
38 |
39 | ---
40 |
41 | #### References:
42 |
43 | [1] https://netbeez.net/blog/how-to-use-the-linux-traffic-control/
44 | [2] https://www.cyberciti.biz/faq/linux-traffic-shaping-using-tc-to-control-http-traffic/
45 | [3] http://lartc.org/howto/lartc.qdisc.html
46 | [4] http://manpages.ubuntu.com/manpages/xenial/man8/tc.8.html
47 | [5] https://unix.stackexchange.com/questions/100785/bucket-size-in-tbf
48 |
--------------------------------------------------------------------------------
/doc/tc/tc_demo.sh:
--------------------------------------------------------------------------------
1 | tc qdisc del dev eth0 root
2 |
3 | #tc qdisc add dev eth0 root handle 1: htb default 10
4 | #tc class add dev eth0 parent 1: classid 1:30 htb rate 10mbps
5 | #tc class add dev eth0 parent 1: classid 1:1 htb rate 10mbps
6 | #tc class add dev eth0 parent 1: classid 1:2 htb rate 10mbps
7 | #tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dst 172.17.0.3/0 flowid 1:1
8 | #tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip src 172.17.0.3/0 flowid 1:2
9 |
10 | tc qdisc add dev eth0 root handle 1: htb default 10
11 |
12 | tc class add dev eth0 parent 1: classid 1:1 htb rate 10mbps
13 | tc class add dev eth0 parent 1:1 classid 1:10 htb rate 10mbps
14 | tc class add dev eth0 parent 1:1 classid 1:20 htb rate 10mbps
15 | tc class add dev eth0 parent 1:1 classid 1:30 htb rate 10mbps
16 | tc class add dev eth0 parent 1:1 classid 1:40 htb rate 10mbps
17 |
18 | tc qdisc add dev eth0 parent 1:10 handle 10: netem delay 100ms 5ms
19 | tc qdisc add dev eth0 parent 1:20 handle 20: tbf rate 1mbit burst 32kbit latency 100ms
20 | tc qdisc add dev eth0 parent 1:30 handle 30: tbf rate 2mbit burst 32kbit latency 100ms
21 | tc qdisc add dev eth0 parent 1:40 handle 40: sfq
22 |
23 | tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip src 172.17.0.3/32 flowid 1:20
24 | tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dst 172.17.0.3/32 flowid 1:20
25 |
26 | tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip src 172.17.0.4/32 flowid 1:30
27 | tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dst 172.17.0.4/32 flowid 1:30
28 |
--------------------------------------------------------------------------------
/doc/tools/iftop.md:
--------------------------------------------------------------------------------
1 | ## iftop Usage
2 | ---
3 |
4 | iftop -nNBP
5 |
6 | -n don't do hostname lookups
7 | -N don't convert port numbers to services
8 | -B Display bandwidth in bytes
9 | -P show ports as well as hosts
10 |
--------------------------------------------------------------------------------
/doc/tools/netperf.md:
--------------------------------------------------------------------------------
1 | ## Netperf Usage
2 | ---
3 | *Run First*
4 |
5 | Server
6 |
7 | netserver -4
8 |
9 | ---
10 |
11 | netperf -H 172.17.0.2 -l 10 -f M
12 |
13 | -H host
14 | -l test duration (>0 secs) (<0 bytes|trans)
15 | -f output units: 'M' means 10^6 Bytes
16 |
17 | ---
18 |
19 | netperf -H 172.17.0.2 -l 5 -f g -- -m 1024 -M 1024 -s 1024
20 |
21 | -f output units: 'g' means 10^9 bps
22 | -m send size for both side
23 | -M recv size for both side
24 | -s send & recv socket buff size for *local* side
25 | -S send & recv socket buff size for *remote* side
26 |
--------------------------------------------------------------------------------
/example.yaml:
--------------------------------------------------------------------------------
1 | # Only support 10.250.0.2 ~ 10.250.254.254
2 | group:
3 | -
4 | name: china
5 | nodes:
6 | -
7 | ip: 10.250.1.2/32
8 | cmd: "ping -c3 g.cn"
9 | -
10 | ip: 10.250.2.2/32
11 | cmd: "ping -c3 g.cn"
12 | -
13 | ip: 10.250.3.2/32
14 | cmd: "ping -c3 g.cn"
15 | -
16 | ip: 10.250.4.2/32
17 | cmd: "ping -c3 g.cn"
18 | delay: "100ms 10ms 30%"
19 | loss: "1% 10%"
20 | -
21 | name: eu
22 | nodes:
23 | -
24 | ip: 10.250.5.2/32
25 | cmd: "ping -c3 g.cn"
26 | -
27 | ip: 10.250.6.2/32
28 | cmd: "ping -c3 g.cn"
29 | -
30 | ip: 10.250.7.2/32
31 | cmd: "ping -c3 g.cn"
32 | delay: "10ms 5ms 30%"
33 | loss: "1% 10%"
34 | -
35 | name: jpn
36 | nodes:
37 | -
38 | ip: 10.250.8.2/32
39 | cmd: "ping -c3 g.cn"
40 | -
41 | ip: 10.250.9.2/32
42 | cmd: "ping -c3 g.cn"
43 | delay: "100ms 10ms 30%"
44 | duplicate: "1%"
45 | rate: "100mbit"
46 |
47 | network:
48 | -
49 | groups:
50 | - china
51 | - eu
52 | delay: "200ms 10ms 1%"
53 | corrupt: "0.2%"
54 | rate: "10mbit"
55 |
56 | -
57 | groups:
58 | - china
59 | - jpn
60 | delay: "100ms 10ms 1%"
61 | rate: "10mbit"
62 |
63 | -
64 | groups:
65 | - jpn
66 | - eu
67 | delay: "30ms 5ms 1%"
68 | rate: "100mbit"
69 |
70 |
--------------------------------------------------------------------------------
/generate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | param=$1
4 | filter=$2
5 |
6 | generate() {
7 |
8 | SRC="/go/src/github.com/CovenantSQL/GNTE"
9 | export DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
10 | CLEAN=$DIR/scripts/clean.sh
11 |
12 | if [ -f $CLEAN ];then
13 | $CLEAN
14 | rm -rf $CLEAN
15 | fi
16 |
17 | docker pull golang:1.11-stretch
18 | docker run --rm -v $DIR:$SRC golang $SRC/scripts/gobuild.sh $param
19 |
20 | $DIR/scripts/launch.sh
21 | }
22 |
23 | get_containers() {
24 | if [ -n $filter ]; then
25 | containers="$(docker ps --format '{{.Names}}' --filter 'network=CovenantSQL_testnet' --filter name=$filter)"
26 | else
27 | containers="$(docker ps --format '{{.Names}}' --filter 'network=CovenantSQL_testnet')"
28 | fi
29 | echo $containers
30 | }
31 |
32 | stopone() {
33 | containers=`get_containers`
34 | for i in $containers; do
35 | array=("${array[@]}" $i)
36 | done
37 | len=${#array[@]}
38 | if [ 0 -eq $len ]; then
39 | return
40 | fi
41 | num=$(date +%s)
42 | ((rand=num%len))
43 | echo "Stopping ${array[$rand]}"
44 | docker stop ${array[$rand]}
45 | }
46 |
47 | stopall() {
48 | containers=`get_containers`
49 | for i in $containers; do
50 | echo "Stopping $i"
51 | docker stop $i
52 | done
53 | }
54 |
55 | startall() {
56 | containers="$(docker ps --format '{{.Names}}' --filter 'network=CovenantSQL_testnet' --filter status=exited)"
57 |
58 | for i in $containers; do
59 | echo "Starting $i"
60 | docker start $i
61 | done
62 | }
63 |
64 | case $param in
65 | "stopone")
66 | stopone
67 | ;;
68 | 'stopall')
69 | stopall
70 | ;;
71 | 'startall')
72 | startall
73 | ;;
74 | *)
75 | echo "Generate GNTE and running"
76 | generate
77 | ;;
78 | esac
79 |
--------------------------------------------------------------------------------
/logo/container_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CovenantSQL/GNTE/50eb300fdf61ade6999a44db25c64640f58e35cc/logo/container_node.png
--------------------------------------------------------------------------------
/logo/graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CovenantSQL/GNTE/50eb300fdf61ade6999a44db25c64640f58e35cc/logo/graph.png
--------------------------------------------------------------------------------
/logo/logo.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CovenantSQL/GNTE/50eb300fdf61ade6999a44db25c64640f58e35cc/logo/logo.jpeg
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "os"
7 | "strconv"
8 | "strings"
9 |
10 | yaml "gopkg.in/yaml.v2"
11 | )
12 |
13 | type tcParams struct {
14 | Delay string `yaml:"delay"`
15 | Loss string `yaml:"loss"`
16 | Duplicate string `yaml:"duplicate"`
17 | Rate string `yaml:"rate"`
18 | Corrupt string `yaml:"corrupt"`
19 | Reorder string `yaml:"reorder"`
20 | }
21 |
22 | type node struct {
23 | IP string `yaml:"ip"`
24 | CMD string `yaml:"cmd"`
25 | }
26 |
27 | type group struct {
28 | Name string `yaml:"name"`
29 | Nodes []node `yaml:"nodes"`
30 |
31 | Delay string `yaml:"delay"`
32 | Loss string `yaml:"loss"`
33 | Duplicate string `yaml:"duplicate"`
34 | Rate string `yaml:"rate"`
35 | Corrupt string `yaml:"corrupt"`
36 | Reorder string `yaml:"reorder"`
37 | }
38 |
39 | type network struct {
40 | Groups []string `yaml:"groups"`
41 |
42 | Delay string `yaml:"delay"`
43 | Loss string `yaml:"loss"`
44 | Duplicate string `yaml:"duplicate"`
45 | Rate string `yaml:"rate"`
46 | Corrupt string `yaml:"corrupt"`
47 | Reorder string `yaml:"reorder"`
48 | }
49 |
50 | type root struct {
51 | Group []group `yaml:"group"`
52 | Network []network `yaml:"network"`
53 | }
54 |
55 | func keyInArray(key string, array []string) bool {
56 | for _, data := range array {
57 | if data == key {
58 | return true
59 | }
60 | }
61 | return false
62 | }
63 |
64 | func paramsCount(str string) int {
65 | var params []string
66 | str = strings.TrimSpace(str)
67 | params = strings.Split(str, " ")
68 | return len(params)
69 | }
70 |
71 | func processOneNode(node *node, groupName string, r root, nodemap map[string]bool) []string {
72 |
73 | var tcRules []string
74 |
75 | //add init rules
76 | tcRules = append(tcRules, "#!/bin/sh\n")
77 | tcRules = append(tcRules, "#"+node.IP)
78 | tcRules = append(tcRules, "tc qdisc del dev eth0 root")
79 | tcRules = append(tcRules, "tc qdisc add dev eth0 root handle 1: htb default 2")
80 | tcRules = append(tcRules, "tc class add dev eth0 parent 1: classid 1:2 htb rate 10gbps")
81 | tcRules = append(tcRules, "tc qdisc add dev eth0 parent 1:2 handle 2: sfq")
82 |
83 | tcIndex := 10
84 | //3. build tc tree for this node
85 | for _, group := range r.Group {
86 | if group.Name == groupName { //local group
87 | rule := "tc class add dev eth0 parent 1: classid 1:" + strconv.Itoa(tcIndex) + " htb"
88 | if group.Rate != "" {
89 | rule = rule + " rate " + group.Rate
90 | } else {
91 | rule = rule + " rate 10gbps"
92 | }
93 | tcRules = append(tcRules, rule)
94 |
95 | // 3.1 parse other node in same group
96 | rule = "tc qdisc add dev eth0 parent 1:" + strconv.Itoa(tcIndex) + " handle " + strconv.Itoa(tcIndex) + ": netem"
97 | if group.Delay != "" {
98 | if paramsCount(group.Delay) > 1 {
99 | rule = rule + " delay " + group.Delay + " distribution normal"
100 | } else {
101 | rule = rule + " delay " + group.Delay
102 | }
103 | }
104 | if group.Corrupt != "" {
105 | rule = rule + " corrupt " + group.Corrupt
106 | }
107 | if group.Duplicate != "" {
108 | rule = rule + " duplicate " + group.Duplicate
109 | }
110 | if group.Loss != "" {
111 | rule = rule + " loss " + group.Loss
112 | }
113 | if group.Reorder != "" {
114 | rule = rule + " reorder " + group.Reorder
115 | }
116 | tcRules = append(tcRules, rule)
117 | for _, otherNode := range group.Nodes {
118 | if otherNode.IP == node.IP {
119 | continue
120 | }
121 |
122 | //find if has pair in node-othernode
123 | if pair, ok := nodemap[node.IP+otherNode.IP]; ok && pair {
124 | continue
125 | }
126 |
127 | //3.2 build tc leaf for inner-group
128 | rule = "tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dst " + otherNode.IP + " flowid 1:" + strconv.Itoa(tcIndex)
129 | tcRules = append(tcRules, rule)
130 |
131 | //set pair in node-othernode
132 | nodemap[node.IP+otherNode.IP] = true
133 | nodemap[otherNode.IP+node.IP] = true
134 | }
135 |
136 | } else { //other group
137 | //3.3 parse node in other group
138 | // find network first
139 | for _, network := range r.Network {
140 | if keyInArray(group.Name, network.Groups) && keyInArray(groupName, network.Groups) {
141 | rule := "tc class add dev eth0 parent 1: classid 1:" + strconv.Itoa(tcIndex) + " htb"
142 | if network.Rate != "" {
143 | rule = rule + " rate " + network.Rate
144 | } else {
145 | rule = rule + " rate 10gbps"
146 | }
147 | tcRules = append(tcRules, rule)
148 |
149 | rule = "tc qdisc add dev eth0 parent 1:" + strconv.Itoa(tcIndex) + " handle " + strconv.Itoa(tcIndex) + ": netem"
150 | if network.Delay != "" {
151 | if paramsCount(network.Delay) > 1 {
152 | rule = rule + " delay " + network.Delay + " distribution normal"
153 | } else {
154 | rule = rule + " delay " + network.Delay
155 | }
156 | }
157 | if network.Corrupt != "" {
158 | rule = rule + " corrupt " + network.Corrupt
159 | }
160 | if network.Duplicate != "" {
161 | rule = rule + " duplicate " + network.Duplicate
162 | }
163 | if network.Loss != "" {
164 | rule = rule + " loss " + network.Loss
165 | }
166 | if network.Reorder != "" {
167 | rule = rule + " reorder " + network.Reorder
168 | }
169 | tcRules = append(tcRules, rule)
170 | }
171 | }
172 |
173 | //3.4 build tc leaf for group-connection
174 | for _, otherNode := range group.Nodes {
175 | //find if has pair in node-othernode
176 | if pair, ok := nodemap[node.IP+otherNode.IP]; ok && pair {
177 | continue
178 | }
179 |
180 | rule := "tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dst " + otherNode.IP + " flowid 1:" + strconv.Itoa(tcIndex)
181 | tcRules = append(tcRules, rule)
182 |
183 | //set pair in node-othernode
184 | nodemap[node.IP+otherNode.IP] = true
185 | nodemap[otherNode.IP+node.IP] = true
186 | }
187 | }
188 |
189 | tcIndex = tcIndex + 1
190 | }
191 |
192 | tcRules = append(tcRules, "cat /scripts/bashrc >> ~/.bashrc")
193 | tcRules = append(tcRules, ". ~/.bashrc")
194 | tcRules = append(tcRules, node.CMD)
195 | tcRules = append(tcRules, "tail -f /dev/null")
196 | return tcRules
197 | }
198 |
199 | func printRules(rules []string) {
200 | fmt.Println()
201 | for _, rule := range rules {
202 | fmt.Println(rule)
203 | }
204 | fmt.Println()
205 | }
206 |
207 | func printTcScript(rules []string, node *node, groupName string) {
208 | ip := strings.Split(node.IP, "/")[0]
209 | fmt.Println(ip)
210 |
211 | var data []byte
212 | rulestr := strings.Join(rules, "\n")
213 | data = []byte(rulestr + "\n")
214 |
215 | // fmt.Println(rulestr)
216 | err := ioutil.WriteFile("scripts/"+groupName+ip+".sh", data, 0777)
217 | if err != nil {
218 | fmt.Println(err)
219 | }
220 | }
221 |
222 | func printDockerScript(r root) {
223 | launchFile, err := os.OpenFile(
224 | "scripts/launch.sh",
225 | os.O_WRONLY|os.O_CREATE|os.O_TRUNC,
226 | 0777,
227 | )
228 | if err != nil {
229 | fmt.Println(err)
230 | }
231 | defer launchFile.Close()
232 |
233 | cleanFile, err := os.OpenFile(
234 | "scripts/clean.sh",
235 | os.O_WRONLY|os.O_CREATE|os.O_TRUNC,
236 | 0777,
237 | )
238 | if err != nil {
239 | fmt.Println(err)
240 | }
241 | defer cleanFile.Close()
242 |
243 | var launchFileData, cleanFileData []string
244 | launchFileData = append(launchFileData, "#!/bin/bash -x\n")
245 | launchFileData = append(launchFileData, "docker network create --subnet=10.250.0.1/16 CovenantSQL_testnet")
246 | cleanFileData = append(cleanFileData, "#!/bin/bash -x\n")
247 |
248 | for _, group := range r.Group {
249 | for _, node := range group.Nodes {
250 | ip := strings.Split(node.IP, "/")[0]
251 | launchFileData = append(launchFileData, "echo starting "+group.Name+ip)
252 | launchFileData = append(launchFileData, "docker run -d --net CovenantSQL_testnet --ip "+ip+" -h "+group.Name+ip+
253 | " -v $DIR/scripts:/scripts --cap-add=NET_ADMIN --name "+group.Name+ip+" gnte /scripts/"+group.Name+ip+".sh")
254 |
255 | cleanFileData = append(cleanFileData, "docker rm -f "+group.Name+ip)
256 | }
257 | }
258 | cleanFileData = append(cleanFileData, "docker network rm CovenantSQL_testnet")
259 |
260 | // run dot convertion
261 | // dot -Tpng graph.gv -o graph.png
262 | launchFileData = append(launchFileData, "docker run --rm -v $DIR/scripts:/scripts gnte dot -Tpng scripts/graph.gv -o scripts/graph.png")
263 | launchFileData = append(launchFileData, "mv -f $DIR/scripts/graph.png $DIR/graph.png")
264 |
265 | launchFileByte := []byte(strings.Join(launchFileData, "\n") + "\n")
266 | _, err = launchFile.Write(launchFileByte)
267 | if err != nil {
268 | fmt.Println(err)
269 | }
270 |
271 | cleanFileByte := []byte(strings.Join(cleanFileData, "\n") + "\n")
272 | _, err = cleanFile.Write(cleanFileByte)
273 | if err != nil {
274 | fmt.Println(err)
275 | }
276 | }
277 |
278 | func printGraphScript(r root) {
279 |
280 | gvFile, err := os.OpenFile(
281 | "scripts/graph.gv",
282 | os.O_WRONLY|os.O_CREATE|os.O_TRUNC,
283 | 0666,
284 | )
285 | if err != nil {
286 | fmt.Println(err)
287 | }
288 | defer gvFile.Close()
289 |
290 | var gvFileData []string
291 | gvFileData = append(gvFileData, "digraph G {")
292 | gvFileData = append(gvFileData, " compound=true;")
293 |
294 | for _, group := range r.Group {
295 | gvFileData = append(gvFileData, " subgraph cluster_"+group.Name+" {")
296 | gvFileData = append(gvFileData, " label = "+group.Name+";")
297 | gvFileData = append(gvFileData, " style = rounded;")
298 | for i := 0; i < len(group.Nodes); i++ {
299 | for j := i + 1; j < len(group.Nodes); j++ {
300 | //"10.2.1.1/16" -> "10.3.1.1/20" [arrowhead=none, arrowtail=none, label="delay\n 100ms ±10ms 30%"];
301 | arrowinfo := " \"" + group.Nodes[i].IP + "\" -> \"" + group.Nodes[j].IP +
302 | "\" [arrowhead=none, arrowtail=none, label=\""
303 | if group.Delay != "" {
304 | arrowinfo = arrowinfo + "delay " + group.Delay + `\n`
305 | }
306 | if group.Corrupt != "" {
307 | arrowinfo = arrowinfo + "corrupt" + group.Corrupt + `\n`
308 | }
309 | if group.Duplicate != "" {
310 | arrowinfo = arrowinfo + "duplicate " + group.Duplicate + `\n`
311 | }
312 | if group.Loss != "" {
313 | arrowinfo = arrowinfo + "loss " + group.Loss + `\n`
314 | }
315 | if group.Reorder != "" {
316 | arrowinfo = arrowinfo + "reorder " + group.Reorder + `\n`
317 | }
318 |
319 | if group.Rate != "" {
320 | arrowinfo = arrowinfo + "rate " + group.Rate + `\n`
321 | }
322 | arrowinfo = arrowinfo + "\"];"
323 | gvFileData = append(gvFileData, arrowinfo)
324 | }
325 | }
326 | gvFileData = append(gvFileData, " }")
327 | }
328 |
329 | for _, network := range r.Network {
330 | //parse two group pair
331 | for i := 0; i < len(network.Groups); i++ {
332 | for j := i + 1; j < len(network.Groups); j++ {
333 | var groupNodei, groupNodej string
334 | // get group ip
335 | for _, group := range r.Group {
336 | if group.Name == network.Groups[i] {
337 | groupNodei = group.Nodes[0].IP
338 | } else if group.Name == network.Groups[j] {
339 | groupNodej = group.Nodes[0].IP
340 | }
341 | }
342 | arrowinfo := " \"" + groupNodei + "\" -> \"" + groupNodej +
343 | "\"\n [ltail=cluster_" + network.Groups[i] + ", lhead=cluster_" + network.Groups[j] +
344 | ", arrowhead=none, arrowtail=none,\n label=\""
345 | if network.Delay != "" {
346 | arrowinfo = arrowinfo + "delay " + network.Delay + `\n`
347 | }
348 | if network.Corrupt != "" {
349 | arrowinfo = arrowinfo + "corrupt" + network.Corrupt + `\n`
350 | }
351 | if network.Duplicate != "" {
352 | arrowinfo = arrowinfo + "duplicate " + network.Duplicate + `\n`
353 | }
354 | if network.Loss != "" {
355 | arrowinfo = arrowinfo + "loss " + network.Loss + `\n`
356 | }
357 | if network.Reorder != "" {
358 | arrowinfo = arrowinfo + "reorder " + network.Reorder + `\n`
359 | }
360 |
361 | if network.Rate != "" {
362 | arrowinfo = arrowinfo + "rate " + network.Rate + `\n`
363 | }
364 | arrowinfo = arrowinfo + "\"];"
365 |
366 | gvFileData = append(gvFileData, arrowinfo)
367 | }
368 | }
369 | }
370 |
371 | gvFileData = append(gvFileData, "}")
372 | gvFileByte := []byte(strings.Join(gvFileData, "\n") + "\n")
373 | _, err = gvFile.Write(gvFileByte)
374 | if err != nil {
375 | fmt.Println(err)
376 | }
377 | }
378 |
379 | /*
380 | 1. read yaml from file
381 | 2. select one node
382 | 3. build tc tree for this node
383 | 3.1 parse other node in same group
384 | 3.2 build tc leaf for inner-group
385 | 3.3 parse node in other group
386 | 3.4 build tc leaf for group-connection
387 | 4. print tc tree
388 | */
389 | func main() {
390 | r := root{}
391 | //TODO 1. read yaml from specific file
392 | configPath := ""
393 |
394 | if len(os.Args) > 1 {
395 | configPath = os.Args[1]
396 | } else {
397 | configPath = "example.yaml"
398 | }
399 |
400 | data, err := ioutil.ReadFile(configPath)
401 | if err != nil {
402 | fmt.Println(err)
403 | }
404 |
405 | err = yaml.Unmarshal(data, &r)
406 | if err != nil {
407 | fmt.Println(err)
408 | }
409 |
410 | nodemap := make(map[string]bool)
411 | //2. select one node
412 | for _, group := range r.Group {
413 | for _, node := range group.Nodes {
414 | tcRules := processOneNode(&node, group.Name, r, nodemap)
415 | //4. print tc tree
416 | printTcScript(tcRules, &node, group.Name)
417 | }
418 | }
419 | printDockerScript(r)
420 |
421 | printGraphScript(r)
422 | }
423 |
--------------------------------------------------------------------------------
/scripts/bashrc:
--------------------------------------------------------------------------------
1 | PS1="\$([[ \$? != 0 ]] && echo \"\[\033[1;37m\][\[\033[1;31m\]X\[\033[1;37m\]]\")\[\033[1;36m\]\u\[\033[1;32m\]@\[\033[1;34m\]\H\[\033[1;31m\]:\[\033[1;35m\]\w \[\033[1;36m\]\$(/bin/ls -1 | /usr/bin/wc -l | /bin/sed \"s: ::g\")\[\033[1;33m\]> \[\033[0m\]"
2 |
--------------------------------------------------------------------------------
/scripts/gobuild.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | cd /go/src/github.com/CovenantSQL/GNTE
3 | go run main.go $*
4 |
--------------------------------------------------------------------------------
/scripts/show_eth0.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | if [ -z $1 ]
4 | then
5 | tc -s qdisc show dev eth0
6 | elif [ $1 = '-w' ]
7 | then
8 | watch -n 1 tc -s qdisc show dev eth0
9 | fi
10 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 |
3 | go:
4 | - 1.4
5 | - 1.5
6 | - 1.6
7 | - 1.7
8 | - 1.8
9 | - 1.9
10 | - tip
11 |
12 | go_import_path: gopkg.in/yaml.v2
13 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/LICENSE.libyaml:
--------------------------------------------------------------------------------
1 | The following files were ported to Go from C files of libyaml, and thus
2 | are still covered by their original copyright and license:
3 |
4 | apic.go
5 | emitterc.go
6 | parserc.go
7 | readerc.go
8 | scannerc.go
9 | writerc.go
10 | yamlh.go
11 | yamlprivateh.go
12 |
13 | Copyright (c) 2006 Kirill Simonov
14 |
15 | Permission is hereby granted, free of charge, to any person obtaining a copy of
16 | this software and associated documentation files (the "Software"), to deal in
17 | the Software without restriction, including without limitation the rights to
18 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
19 | of the Software, and to permit persons to whom the Software is furnished to do
20 | so, subject to the following conditions:
21 |
22 | The above copyright notice and this permission notice shall be included in all
23 | copies or substantial portions of the Software.
24 |
25 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 | SOFTWARE.
32 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/NOTICE:
--------------------------------------------------------------------------------
1 | Copyright 2011-2016 Canonical Ltd.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/README.md:
--------------------------------------------------------------------------------
1 | # YAML support for the Go language
2 |
3 | Introduction
4 | ------------
5 |
6 | The yaml package enables Go programs to comfortably encode and decode YAML
7 | values. It was developed within [Canonical](https://www.canonical.com) as
8 | part of the [juju](https://juju.ubuntu.com) project, and is based on a
9 | pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
10 | C library to parse and generate YAML data quickly and reliably.
11 |
12 | Compatibility
13 | -------------
14 |
15 | The yaml package supports most of YAML 1.1 and 1.2, including support for
16 | anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
17 | implemented, and base-60 floats from YAML 1.1 are purposefully not
18 | supported since they're a poor design and are gone in YAML 1.2.
19 |
20 | Installation and usage
21 | ----------------------
22 |
23 | The import path for the package is *gopkg.in/yaml.v2*.
24 |
25 | To install it, run:
26 |
27 | go get gopkg.in/yaml.v2
28 |
29 | API documentation
30 | -----------------
31 |
32 | If opened in a browser, the import path itself leads to the API documentation:
33 |
34 | * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
35 |
36 | API stability
37 | -------------
38 |
39 | The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
40 |
41 |
42 | License
43 | -------
44 |
45 | The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
46 |
47 |
48 | Example
49 | -------
50 |
51 | ```Go
52 | package main
53 |
54 | import (
55 | "fmt"
56 | "log"
57 |
58 | "gopkg.in/yaml.v2"
59 | )
60 |
61 | var data = `
62 | a: Easy!
63 | b:
64 | c: 2
65 | d: [3, 4]
66 | `
67 |
68 | // Note: struct fields must be public in order for unmarshal to
69 | // correctly populate the data.
70 | type T struct {
71 | A string
72 | B struct {
73 | RenamedC int `yaml:"c"`
74 | D []int `yaml:",flow"`
75 | }
76 | }
77 |
78 | func main() {
79 | t := T{}
80 |
81 | err := yaml.Unmarshal([]byte(data), &t)
82 | if err != nil {
83 | log.Fatalf("error: %v", err)
84 | }
85 | fmt.Printf("--- t:\n%v\n\n", t)
86 |
87 | d, err := yaml.Marshal(&t)
88 | if err != nil {
89 | log.Fatalf("error: %v", err)
90 | }
91 | fmt.Printf("--- t dump:\n%s\n\n", string(d))
92 |
93 | m := make(map[interface{}]interface{})
94 |
95 | err = yaml.Unmarshal([]byte(data), &m)
96 | if err != nil {
97 | log.Fatalf("error: %v", err)
98 | }
99 | fmt.Printf("--- m:\n%v\n\n", m)
100 |
101 | d, err = yaml.Marshal(&m)
102 | if err != nil {
103 | log.Fatalf("error: %v", err)
104 | }
105 | fmt.Printf("--- m dump:\n%s\n\n", string(d))
106 | }
107 | ```
108 |
109 | This example will generate the following output:
110 |
111 | ```
112 | --- t:
113 | {Easy! {2 [3 4]}}
114 |
115 | --- t dump:
116 | a: Easy!
117 | b:
118 | c: 2
119 | d: [3, 4]
120 |
121 |
122 | --- m:
123 | map[a:Easy! b:map[c:2 d:[3 4]]]
124 |
125 | --- m dump:
126 | a: Easy!
127 | b:
128 | c: 2
129 | d:
130 | - 3
131 | - 4
132 | ```
133 |
134 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/apic.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "io"
5 | )
6 |
7 | func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
8 | //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
9 |
10 | // Check if we can move the queue at the beginning of the buffer.
11 | if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
12 | if parser.tokens_head != len(parser.tokens) {
13 | copy(parser.tokens, parser.tokens[parser.tokens_head:])
14 | }
15 | parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
16 | parser.tokens_head = 0
17 | }
18 | parser.tokens = append(parser.tokens, *token)
19 | if pos < 0 {
20 | return
21 | }
22 | copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
23 | parser.tokens[parser.tokens_head+pos] = *token
24 | }
25 |
26 | // Create a new parser object.
27 | func yaml_parser_initialize(parser *yaml_parser_t) bool {
28 | *parser = yaml_parser_t{
29 | raw_buffer: make([]byte, 0, input_raw_buffer_size),
30 | buffer: make([]byte, 0, input_buffer_size),
31 | }
32 | return true
33 | }
34 |
35 | // Destroy a parser object.
36 | func yaml_parser_delete(parser *yaml_parser_t) {
37 | *parser = yaml_parser_t{}
38 | }
39 |
40 | // String read handler.
41 | func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
42 | if parser.input_pos == len(parser.input) {
43 | return 0, io.EOF
44 | }
45 | n = copy(buffer, parser.input[parser.input_pos:])
46 | parser.input_pos += n
47 | return n, nil
48 | }
49 |
50 | // Reader read handler.
51 | func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
52 | return parser.input_reader.Read(buffer)
53 | }
54 |
55 | // Set a string input.
56 | func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
57 | if parser.read_handler != nil {
58 | panic("must set the input source only once")
59 | }
60 | parser.read_handler = yaml_string_read_handler
61 | parser.input = input
62 | parser.input_pos = 0
63 | }
64 |
65 | // Set a file input.
66 | func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
67 | if parser.read_handler != nil {
68 | panic("must set the input source only once")
69 | }
70 | parser.read_handler = yaml_reader_read_handler
71 | parser.input_reader = r
72 | }
73 |
74 | // Set the source encoding.
75 | func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
76 | if parser.encoding != yaml_ANY_ENCODING {
77 | panic("must set the encoding only once")
78 | }
79 | parser.encoding = encoding
80 | }
81 |
82 | // Create a new emitter object.
83 | func yaml_emitter_initialize(emitter *yaml_emitter_t) {
84 | *emitter = yaml_emitter_t{
85 | buffer: make([]byte, output_buffer_size),
86 | raw_buffer: make([]byte, 0, output_raw_buffer_size),
87 | states: make([]yaml_emitter_state_t, 0, initial_stack_size),
88 | events: make([]yaml_event_t, 0, initial_queue_size),
89 | }
90 | }
91 |
92 | // Destroy an emitter object.
93 | func yaml_emitter_delete(emitter *yaml_emitter_t) {
94 | *emitter = yaml_emitter_t{}
95 | }
96 |
97 | // String write handler.
98 | func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
99 | *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
100 | return nil
101 | }
102 |
103 | // yaml_writer_write_handler uses emitter.output_writer to write the
104 | // emitted text.
105 | func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
106 | _, err := emitter.output_writer.Write(buffer)
107 | return err
108 | }
109 |
110 | // Set a string output.
111 | func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
112 | if emitter.write_handler != nil {
113 | panic("must set the output target only once")
114 | }
115 | emitter.write_handler = yaml_string_write_handler
116 | emitter.output_buffer = output_buffer
117 | }
118 |
119 | // Set a file output.
120 | func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
121 | if emitter.write_handler != nil {
122 | panic("must set the output target only once")
123 | }
124 | emitter.write_handler = yaml_writer_write_handler
125 | emitter.output_writer = w
126 | }
127 |
128 | // Set the output encoding.
129 | func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
130 | if emitter.encoding != yaml_ANY_ENCODING {
131 | panic("must set the output encoding only once")
132 | }
133 | emitter.encoding = encoding
134 | }
135 |
136 | // Set the canonical output style.
137 | func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
138 | emitter.canonical = canonical
139 | }
140 |
141 | //// Set the indentation increment.
142 | func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
143 | if indent < 2 || indent > 9 {
144 | indent = 2
145 | }
146 | emitter.best_indent = indent
147 | }
148 |
149 | // Set the preferred line width.
150 | func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
151 | if width < 0 {
152 | width = -1
153 | }
154 | emitter.best_width = width
155 | }
156 |
157 | // Set if unescaped non-ASCII characters are allowed.
158 | func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
159 | emitter.unicode = unicode
160 | }
161 |
162 | // Set the preferred line break character.
163 | func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
164 | emitter.line_break = line_break
165 | }
166 |
167 | ///*
168 | // * Destroy a token object.
169 | // */
170 | //
171 | //YAML_DECLARE(void)
172 | //yaml_token_delete(yaml_token_t *token)
173 | //{
174 | // assert(token); // Non-NULL token object expected.
175 | //
176 | // switch (token.type)
177 | // {
178 | // case YAML_TAG_DIRECTIVE_TOKEN:
179 | // yaml_free(token.data.tag_directive.handle);
180 | // yaml_free(token.data.tag_directive.prefix);
181 | // break;
182 | //
183 | // case YAML_ALIAS_TOKEN:
184 | // yaml_free(token.data.alias.value);
185 | // break;
186 | //
187 | // case YAML_ANCHOR_TOKEN:
188 | // yaml_free(token.data.anchor.value);
189 | // break;
190 | //
191 | // case YAML_TAG_TOKEN:
192 | // yaml_free(token.data.tag.handle);
193 | // yaml_free(token.data.tag.suffix);
194 | // break;
195 | //
196 | // case YAML_SCALAR_TOKEN:
197 | // yaml_free(token.data.scalar.value);
198 | // break;
199 | //
200 | // default:
201 | // break;
202 | // }
203 | //
204 | // memset(token, 0, sizeof(yaml_token_t));
205 | //}
206 | //
207 | ///*
208 | // * Check if a string is a valid UTF-8 sequence.
209 | // *
210 | // * Check 'reader.c' for more details on UTF-8 encoding.
211 | // */
212 | //
213 | //static int
214 | //yaml_check_utf8(yaml_char_t *start, size_t length)
215 | //{
216 | // yaml_char_t *end = start+length;
217 | // yaml_char_t *pointer = start;
218 | //
219 | // while (pointer < end) {
220 | // unsigned char octet;
221 | // unsigned int width;
222 | // unsigned int value;
223 | // size_t k;
224 | //
225 | // octet = pointer[0];
226 | // width = (octet & 0x80) == 0x00 ? 1 :
227 | // (octet & 0xE0) == 0xC0 ? 2 :
228 | // (octet & 0xF0) == 0xE0 ? 3 :
229 | // (octet & 0xF8) == 0xF0 ? 4 : 0;
230 | // value = (octet & 0x80) == 0x00 ? octet & 0x7F :
231 | // (octet & 0xE0) == 0xC0 ? octet & 0x1F :
232 | // (octet & 0xF0) == 0xE0 ? octet & 0x0F :
233 | // (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
234 | // if (!width) return 0;
235 | // if (pointer+width > end) return 0;
236 | // for (k = 1; k < width; k ++) {
237 | // octet = pointer[k];
238 | // if ((octet & 0xC0) != 0x80) return 0;
239 | // value = (value << 6) + (octet & 0x3F);
240 | // }
241 | // if (!((width == 1) ||
242 | // (width == 2 && value >= 0x80) ||
243 | // (width == 3 && value >= 0x800) ||
244 | // (width == 4 && value >= 0x10000))) return 0;
245 | //
246 | // pointer += width;
247 | // }
248 | //
249 | // return 1;
250 | //}
251 | //
252 |
253 | // Create STREAM-START.
254 | func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
255 | *event = yaml_event_t{
256 | typ: yaml_STREAM_START_EVENT,
257 | encoding: encoding,
258 | }
259 | }
260 |
261 | // Create STREAM-END.
262 | func yaml_stream_end_event_initialize(event *yaml_event_t) {
263 | *event = yaml_event_t{
264 | typ: yaml_STREAM_END_EVENT,
265 | }
266 | }
267 |
268 | // Create DOCUMENT-START.
269 | func yaml_document_start_event_initialize(
270 | event *yaml_event_t,
271 | version_directive *yaml_version_directive_t,
272 | tag_directives []yaml_tag_directive_t,
273 | implicit bool,
274 | ) {
275 | *event = yaml_event_t{
276 | typ: yaml_DOCUMENT_START_EVENT,
277 | version_directive: version_directive,
278 | tag_directives: tag_directives,
279 | implicit: implicit,
280 | }
281 | }
282 |
283 | // Create DOCUMENT-END.
284 | func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
285 | *event = yaml_event_t{
286 | typ: yaml_DOCUMENT_END_EVENT,
287 | implicit: implicit,
288 | }
289 | }
290 |
291 | ///*
292 | // * Create ALIAS.
293 | // */
294 | //
295 | //YAML_DECLARE(int)
296 | //yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
297 | //{
298 | // mark yaml_mark_t = { 0, 0, 0 }
299 | // anchor_copy *yaml_char_t = NULL
300 | //
301 | // assert(event) // Non-NULL event object is expected.
302 | // assert(anchor) // Non-NULL anchor is expected.
303 | //
304 | // if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
305 | //
306 | // anchor_copy = yaml_strdup(anchor)
307 | // if (!anchor_copy)
308 | // return 0
309 | //
310 | // ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
311 | //
312 | // return 1
313 | //}
314 |
315 | // Create SCALAR.
316 | func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
317 | *event = yaml_event_t{
318 | typ: yaml_SCALAR_EVENT,
319 | anchor: anchor,
320 | tag: tag,
321 | value: value,
322 | implicit: plain_implicit,
323 | quoted_implicit: quoted_implicit,
324 | style: yaml_style_t(style),
325 | }
326 | return true
327 | }
328 |
329 | // Create SEQUENCE-START.
330 | func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
331 | *event = yaml_event_t{
332 | typ: yaml_SEQUENCE_START_EVENT,
333 | anchor: anchor,
334 | tag: tag,
335 | implicit: implicit,
336 | style: yaml_style_t(style),
337 | }
338 | return true
339 | }
340 |
341 | // Create SEQUENCE-END.
342 | func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
343 | *event = yaml_event_t{
344 | typ: yaml_SEQUENCE_END_EVENT,
345 | }
346 | return true
347 | }
348 |
349 | // Create MAPPING-START.
350 | func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
351 | *event = yaml_event_t{
352 | typ: yaml_MAPPING_START_EVENT,
353 | anchor: anchor,
354 | tag: tag,
355 | implicit: implicit,
356 | style: yaml_style_t(style),
357 | }
358 | }
359 |
360 | // Create MAPPING-END.
361 | func yaml_mapping_end_event_initialize(event *yaml_event_t) {
362 | *event = yaml_event_t{
363 | typ: yaml_MAPPING_END_EVENT,
364 | }
365 | }
366 |
367 | // Destroy an event object.
368 | func yaml_event_delete(event *yaml_event_t) {
369 | *event = yaml_event_t{}
370 | }
371 |
372 | ///*
373 | // * Create a document object.
374 | // */
375 | //
376 | //YAML_DECLARE(int)
377 | //yaml_document_initialize(document *yaml_document_t,
378 | // version_directive *yaml_version_directive_t,
379 | // tag_directives_start *yaml_tag_directive_t,
380 | // tag_directives_end *yaml_tag_directive_t,
381 | // start_implicit int, end_implicit int)
382 | //{
383 | // struct {
384 | // error yaml_error_type_t
385 | // } context
386 | // struct {
387 | // start *yaml_node_t
388 | // end *yaml_node_t
389 | // top *yaml_node_t
390 | // } nodes = { NULL, NULL, NULL }
391 | // version_directive_copy *yaml_version_directive_t = NULL
392 | // struct {
393 | // start *yaml_tag_directive_t
394 | // end *yaml_tag_directive_t
395 | // top *yaml_tag_directive_t
396 | // } tag_directives_copy = { NULL, NULL, NULL }
397 | // value yaml_tag_directive_t = { NULL, NULL }
398 | // mark yaml_mark_t = { 0, 0, 0 }
399 | //
400 | // assert(document) // Non-NULL document object is expected.
401 | // assert((tag_directives_start && tag_directives_end) ||
402 | // (tag_directives_start == tag_directives_end))
403 | // // Valid tag directives are expected.
404 | //
405 | // if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
406 | //
407 | // if (version_directive) {
408 | // version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
409 | // if (!version_directive_copy) goto error
410 | // version_directive_copy.major = version_directive.major
411 | // version_directive_copy.minor = version_directive.minor
412 | // }
413 | //
414 | // if (tag_directives_start != tag_directives_end) {
415 | // tag_directive *yaml_tag_directive_t
416 | // if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
417 | // goto error
418 | // for (tag_directive = tag_directives_start
419 | // tag_directive != tag_directives_end; tag_directive ++) {
420 | // assert(tag_directive.handle)
421 | // assert(tag_directive.prefix)
422 | // if (!yaml_check_utf8(tag_directive.handle,
423 | // strlen((char *)tag_directive.handle)))
424 | // goto error
425 | // if (!yaml_check_utf8(tag_directive.prefix,
426 | // strlen((char *)tag_directive.prefix)))
427 | // goto error
428 | // value.handle = yaml_strdup(tag_directive.handle)
429 | // value.prefix = yaml_strdup(tag_directive.prefix)
430 | // if (!value.handle || !value.prefix) goto error
431 | // if (!PUSH(&context, tag_directives_copy, value))
432 | // goto error
433 | // value.handle = NULL
434 | // value.prefix = NULL
435 | // }
436 | // }
437 | //
438 | // DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
439 | // tag_directives_copy.start, tag_directives_copy.top,
440 | // start_implicit, end_implicit, mark, mark)
441 | //
442 | // return 1
443 | //
444 | //error:
445 | // STACK_DEL(&context, nodes)
446 | // yaml_free(version_directive_copy)
447 | // while (!STACK_EMPTY(&context, tag_directives_copy)) {
448 | // value yaml_tag_directive_t = POP(&context, tag_directives_copy)
449 | // yaml_free(value.handle)
450 | // yaml_free(value.prefix)
451 | // }
452 | // STACK_DEL(&context, tag_directives_copy)
453 | // yaml_free(value.handle)
454 | // yaml_free(value.prefix)
455 | //
456 | // return 0
457 | //}
458 | //
459 | ///*
460 | // * Destroy a document object.
461 | // */
462 | //
463 | //YAML_DECLARE(void)
464 | //yaml_document_delete(document *yaml_document_t)
465 | //{
466 | // struct {
467 | // error yaml_error_type_t
468 | // } context
469 | // tag_directive *yaml_tag_directive_t
470 | //
471 | // context.error = YAML_NO_ERROR // Eliminate a compiler warning.
472 | //
473 | // assert(document) // Non-NULL document object is expected.
474 | //
475 | // while (!STACK_EMPTY(&context, document.nodes)) {
476 | // node yaml_node_t = POP(&context, document.nodes)
477 | // yaml_free(node.tag)
478 | // switch (node.type) {
479 | // case YAML_SCALAR_NODE:
480 | // yaml_free(node.data.scalar.value)
481 | // break
482 | // case YAML_SEQUENCE_NODE:
483 | // STACK_DEL(&context, node.data.sequence.items)
484 | // break
485 | // case YAML_MAPPING_NODE:
486 | // STACK_DEL(&context, node.data.mapping.pairs)
487 | // break
488 | // default:
489 | // assert(0) // Should not happen.
490 | // }
491 | // }
492 | // STACK_DEL(&context, document.nodes)
493 | //
494 | // yaml_free(document.version_directive)
495 | // for (tag_directive = document.tag_directives.start
496 | // tag_directive != document.tag_directives.end
497 | // tag_directive++) {
498 | // yaml_free(tag_directive.handle)
499 | // yaml_free(tag_directive.prefix)
500 | // }
501 | // yaml_free(document.tag_directives.start)
502 | //
503 | // memset(document, 0, sizeof(yaml_document_t))
504 | //}
505 | //
506 | ///**
507 | // * Get a document node.
508 | // */
509 | //
510 | //YAML_DECLARE(yaml_node_t *)
511 | //yaml_document_get_node(document *yaml_document_t, index int)
512 | //{
513 | // assert(document) // Non-NULL document object is expected.
514 | //
515 | // if (index > 0 && document.nodes.start + index <= document.nodes.top) {
516 | // return document.nodes.start + index - 1
517 | // }
518 | // return NULL
519 | //}
520 | //
521 | ///**
522 | // * Get the root object.
523 | // */
524 | //
525 | //YAML_DECLARE(yaml_node_t *)
526 | //yaml_document_get_root_node(document *yaml_document_t)
527 | //{
528 | // assert(document) // Non-NULL document object is expected.
529 | //
530 | // if (document.nodes.top != document.nodes.start) {
531 | // return document.nodes.start
532 | // }
533 | // return NULL
534 | //}
535 | //
536 | ///*
537 | // * Add a scalar node to a document.
538 | // */
539 | //
540 | //YAML_DECLARE(int)
541 | //yaml_document_add_scalar(document *yaml_document_t,
542 | // tag *yaml_char_t, value *yaml_char_t, length int,
543 | // style yaml_scalar_style_t)
544 | //{
545 | // struct {
546 | // error yaml_error_type_t
547 | // } context
548 | // mark yaml_mark_t = { 0, 0, 0 }
549 | // tag_copy *yaml_char_t = NULL
550 | // value_copy *yaml_char_t = NULL
551 | // node yaml_node_t
552 | //
553 | // assert(document) // Non-NULL document object is expected.
554 | // assert(value) // Non-NULL value is expected.
555 | //
556 | // if (!tag) {
557 | // tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
558 | // }
559 | //
560 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
561 | // tag_copy = yaml_strdup(tag)
562 | // if (!tag_copy) goto error
563 | //
564 | // if (length < 0) {
565 | // length = strlen((char *)value)
566 | // }
567 | //
568 | // if (!yaml_check_utf8(value, length)) goto error
569 | // value_copy = yaml_malloc(length+1)
570 | // if (!value_copy) goto error
571 | // memcpy(value_copy, value, length)
572 | // value_copy[length] = '\0'
573 | //
574 | // SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
575 | // if (!PUSH(&context, document.nodes, node)) goto error
576 | //
577 | // return document.nodes.top - document.nodes.start
578 | //
579 | //error:
580 | // yaml_free(tag_copy)
581 | // yaml_free(value_copy)
582 | //
583 | // return 0
584 | //}
585 | //
586 | ///*
587 | // * Add a sequence node to a document.
588 | // */
589 | //
590 | //YAML_DECLARE(int)
591 | //yaml_document_add_sequence(document *yaml_document_t,
592 | // tag *yaml_char_t, style yaml_sequence_style_t)
593 | //{
594 | // struct {
595 | // error yaml_error_type_t
596 | // } context
597 | // mark yaml_mark_t = { 0, 0, 0 }
598 | // tag_copy *yaml_char_t = NULL
599 | // struct {
600 | // start *yaml_node_item_t
601 | // end *yaml_node_item_t
602 | // top *yaml_node_item_t
603 | // } items = { NULL, NULL, NULL }
604 | // node yaml_node_t
605 | //
606 | // assert(document) // Non-NULL document object is expected.
607 | //
608 | // if (!tag) {
609 | // tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
610 | // }
611 | //
612 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
613 | // tag_copy = yaml_strdup(tag)
614 | // if (!tag_copy) goto error
615 | //
616 | // if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
617 | //
618 | // SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
619 | // style, mark, mark)
620 | // if (!PUSH(&context, document.nodes, node)) goto error
621 | //
622 | // return document.nodes.top - document.nodes.start
623 | //
624 | //error:
625 | // STACK_DEL(&context, items)
626 | // yaml_free(tag_copy)
627 | //
628 | // return 0
629 | //}
630 | //
631 | ///*
632 | // * Add a mapping node to a document.
633 | // */
634 | //
635 | //YAML_DECLARE(int)
636 | //yaml_document_add_mapping(document *yaml_document_t,
637 | // tag *yaml_char_t, style yaml_mapping_style_t)
638 | //{
639 | // struct {
640 | // error yaml_error_type_t
641 | // } context
642 | // mark yaml_mark_t = { 0, 0, 0 }
643 | // tag_copy *yaml_char_t = NULL
644 | // struct {
645 | // start *yaml_node_pair_t
646 | // end *yaml_node_pair_t
647 | // top *yaml_node_pair_t
648 | // } pairs = { NULL, NULL, NULL }
649 | // node yaml_node_t
650 | //
651 | // assert(document) // Non-NULL document object is expected.
652 | //
653 | // if (!tag) {
654 | // tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
655 | // }
656 | //
657 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
658 | // tag_copy = yaml_strdup(tag)
659 | // if (!tag_copy) goto error
660 | //
661 | // if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
662 | //
663 | // MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
664 | // style, mark, mark)
665 | // if (!PUSH(&context, document.nodes, node)) goto error
666 | //
667 | // return document.nodes.top - document.nodes.start
668 | //
669 | //error:
670 | // STACK_DEL(&context, pairs)
671 | // yaml_free(tag_copy)
672 | //
673 | // return 0
674 | //}
675 | //
676 | ///*
677 | // * Append an item to a sequence node.
678 | // */
679 | //
680 | //YAML_DECLARE(int)
681 | //yaml_document_append_sequence_item(document *yaml_document_t,
682 | // sequence int, item int)
683 | //{
684 | // struct {
685 | // error yaml_error_type_t
686 | // } context
687 | //
688 | // assert(document) // Non-NULL document is required.
689 | // assert(sequence > 0
690 | // && document.nodes.start + sequence <= document.nodes.top)
691 | // // Valid sequence id is required.
692 | // assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
693 | // // A sequence node is required.
694 | // assert(item > 0 && document.nodes.start + item <= document.nodes.top)
695 | // // Valid item id is required.
696 | //
697 | // if (!PUSH(&context,
698 | // document.nodes.start[sequence-1].data.sequence.items, item))
699 | // return 0
700 | //
701 | // return 1
702 | //}
703 | //
704 | ///*
705 | // * Append a pair of a key and a value to a mapping node.
706 | // */
707 | //
708 | //YAML_DECLARE(int)
709 | //yaml_document_append_mapping_pair(document *yaml_document_t,
710 | // mapping int, key int, value int)
711 | //{
712 | // struct {
713 | // error yaml_error_type_t
714 | // } context
715 | //
716 | // pair yaml_node_pair_t
717 | //
718 | // assert(document) // Non-NULL document is required.
719 | // assert(mapping > 0
720 | // && document.nodes.start + mapping <= document.nodes.top)
721 | // // Valid mapping id is required.
722 | // assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
723 | // // A mapping node is required.
724 | // assert(key > 0 && document.nodes.start + key <= document.nodes.top)
725 | // // Valid key id is required.
726 | // assert(value > 0 && document.nodes.start + value <= document.nodes.top)
727 | // // Valid value id is required.
728 | //
729 | // pair.key = key
730 | // pair.value = value
731 | //
732 | // if (!PUSH(&context,
733 | // document.nodes.start[mapping-1].data.mapping.pairs, pair))
734 | // return 0
735 | //
736 | // return 1
737 | //}
738 | //
739 | //
740 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/decode.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "encoding"
5 | "encoding/base64"
6 | "fmt"
7 | "io"
8 | "math"
9 | "reflect"
10 | "strconv"
11 | "time"
12 | )
13 |
14 | const (
15 | documentNode = 1 << iota
16 | mappingNode
17 | sequenceNode
18 | scalarNode
19 | aliasNode
20 | )
21 |
22 | type node struct {
23 | kind int
24 | line, column int
25 | tag string
26 | // For an alias node, alias holds the resolved alias.
27 | alias *node
28 | value string
29 | implicit bool
30 | children []*node
31 | anchors map[string]*node
32 | }
33 |
34 | // ----------------------------------------------------------------------------
35 | // Parser, produces a node tree out of a libyaml event stream.
36 |
37 | type parser struct {
38 | parser yaml_parser_t
39 | event yaml_event_t
40 | doc *node
41 | doneInit bool
42 | }
43 |
44 | func newParser(b []byte) *parser {
45 | p := parser{}
46 | if !yaml_parser_initialize(&p.parser) {
47 | panic("failed to initialize YAML emitter")
48 | }
49 | if len(b) == 0 {
50 | b = []byte{'\n'}
51 | }
52 | yaml_parser_set_input_string(&p.parser, b)
53 | return &p
54 | }
55 |
56 | func newParserFromReader(r io.Reader) *parser {
57 | p := parser{}
58 | if !yaml_parser_initialize(&p.parser) {
59 | panic("failed to initialize YAML emitter")
60 | }
61 | yaml_parser_set_input_reader(&p.parser, r)
62 | return &p
63 | }
64 |
65 | func (p *parser) init() {
66 | if p.doneInit {
67 | return
68 | }
69 | p.expect(yaml_STREAM_START_EVENT)
70 | p.doneInit = true
71 | }
72 |
73 | func (p *parser) destroy() {
74 | if p.event.typ != yaml_NO_EVENT {
75 | yaml_event_delete(&p.event)
76 | }
77 | yaml_parser_delete(&p.parser)
78 | }
79 |
80 | // expect consumes an event from the event stream and
81 | // checks that it's of the expected type.
82 | func (p *parser) expect(e yaml_event_type_t) {
83 | if p.event.typ == yaml_NO_EVENT {
84 | if !yaml_parser_parse(&p.parser, &p.event) {
85 | p.fail()
86 | }
87 | }
88 | if p.event.typ == yaml_STREAM_END_EVENT {
89 | failf("attempted to go past the end of stream; corrupted value?")
90 | }
91 | if p.event.typ != e {
92 | p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
93 | p.fail()
94 | }
95 | yaml_event_delete(&p.event)
96 | p.event.typ = yaml_NO_EVENT
97 | }
98 |
99 | // peek peeks at the next event in the event stream,
100 | // puts the results into p.event and returns the event type.
101 | func (p *parser) peek() yaml_event_type_t {
102 | if p.event.typ != yaml_NO_EVENT {
103 | return p.event.typ
104 | }
105 | if !yaml_parser_parse(&p.parser, &p.event) {
106 | p.fail()
107 | }
108 | return p.event.typ
109 | }
110 |
111 | func (p *parser) fail() {
112 | var where string
113 | var line int
114 | if p.parser.problem_mark.line != 0 {
115 | line = p.parser.problem_mark.line
116 | // Scanner errors don't iterate line before returning error
117 | if p.parser.error == yaml_SCANNER_ERROR {
118 | line++
119 | }
120 | } else if p.parser.context_mark.line != 0 {
121 | line = p.parser.context_mark.line
122 | }
123 | if line != 0 {
124 | where = "line " + strconv.Itoa(line) + ": "
125 | }
126 | var msg string
127 | if len(p.parser.problem) > 0 {
128 | msg = p.parser.problem
129 | } else {
130 | msg = "unknown problem parsing YAML content"
131 | }
132 | failf("%s%s", where, msg)
133 | }
134 |
135 | func (p *parser) anchor(n *node, anchor []byte) {
136 | if anchor != nil {
137 | p.doc.anchors[string(anchor)] = n
138 | }
139 | }
140 |
141 | func (p *parser) parse() *node {
142 | p.init()
143 | switch p.peek() {
144 | case yaml_SCALAR_EVENT:
145 | return p.scalar()
146 | case yaml_ALIAS_EVENT:
147 | return p.alias()
148 | case yaml_MAPPING_START_EVENT:
149 | return p.mapping()
150 | case yaml_SEQUENCE_START_EVENT:
151 | return p.sequence()
152 | case yaml_DOCUMENT_START_EVENT:
153 | return p.document()
154 | case yaml_STREAM_END_EVENT:
155 | // Happens when attempting to decode an empty buffer.
156 | return nil
157 | default:
158 | panic("attempted to parse unknown event: " + p.event.typ.String())
159 | }
160 | }
161 |
162 | func (p *parser) node(kind int) *node {
163 | return &node{
164 | kind: kind,
165 | line: p.event.start_mark.line,
166 | column: p.event.start_mark.column,
167 | }
168 | }
169 |
170 | func (p *parser) document() *node {
171 | n := p.node(documentNode)
172 | n.anchors = make(map[string]*node)
173 | p.doc = n
174 | p.expect(yaml_DOCUMENT_START_EVENT)
175 | n.children = append(n.children, p.parse())
176 | p.expect(yaml_DOCUMENT_END_EVENT)
177 | return n
178 | }
179 |
180 | func (p *parser) alias() *node {
181 | n := p.node(aliasNode)
182 | n.value = string(p.event.anchor)
183 | n.alias = p.doc.anchors[n.value]
184 | if n.alias == nil {
185 | failf("unknown anchor '%s' referenced", n.value)
186 | }
187 | p.expect(yaml_ALIAS_EVENT)
188 | return n
189 | }
190 |
191 | func (p *parser) scalar() *node {
192 | n := p.node(scalarNode)
193 | n.value = string(p.event.value)
194 | n.tag = string(p.event.tag)
195 | n.implicit = p.event.implicit
196 | p.anchor(n, p.event.anchor)
197 | p.expect(yaml_SCALAR_EVENT)
198 | return n
199 | }
200 |
201 | func (p *parser) sequence() *node {
202 | n := p.node(sequenceNode)
203 | p.anchor(n, p.event.anchor)
204 | p.expect(yaml_SEQUENCE_START_EVENT)
205 | for p.peek() != yaml_SEQUENCE_END_EVENT {
206 | n.children = append(n.children, p.parse())
207 | }
208 | p.expect(yaml_SEQUENCE_END_EVENT)
209 | return n
210 | }
211 |
212 | func (p *parser) mapping() *node {
213 | n := p.node(mappingNode)
214 | p.anchor(n, p.event.anchor)
215 | p.expect(yaml_MAPPING_START_EVENT)
216 | for p.peek() != yaml_MAPPING_END_EVENT {
217 | n.children = append(n.children, p.parse(), p.parse())
218 | }
219 | p.expect(yaml_MAPPING_END_EVENT)
220 | return n
221 | }
222 |
223 | // ----------------------------------------------------------------------------
224 | // Decoder, unmarshals a node into a provided value.
225 |
226 | type decoder struct {
227 | doc *node
228 | aliases map[*node]bool
229 | mapType reflect.Type
230 | terrors []string
231 | strict bool
232 | }
233 |
234 | var (
235 | mapItemType = reflect.TypeOf(MapItem{})
236 | durationType = reflect.TypeOf(time.Duration(0))
237 | defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
238 | ifaceType = defaultMapType.Elem()
239 | timeType = reflect.TypeOf(time.Time{})
240 | ptrTimeType = reflect.TypeOf(&time.Time{})
241 | )
242 |
243 | func newDecoder(strict bool) *decoder {
244 | d := &decoder{mapType: defaultMapType, strict: strict}
245 | d.aliases = make(map[*node]bool)
246 | return d
247 | }
248 |
249 | func (d *decoder) terror(n *node, tag string, out reflect.Value) {
250 | if n.tag != "" {
251 | tag = n.tag
252 | }
253 | value := n.value
254 | if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
255 | if len(value) > 10 {
256 | value = " `" + value[:7] + "...`"
257 | } else {
258 | value = " `" + value + "`"
259 | }
260 | }
261 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
262 | }
263 |
264 | func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
265 | terrlen := len(d.terrors)
266 | err := u.UnmarshalYAML(func(v interface{}) (err error) {
267 | defer handleErr(&err)
268 | d.unmarshal(n, reflect.ValueOf(v))
269 | if len(d.terrors) > terrlen {
270 | issues := d.terrors[terrlen:]
271 | d.terrors = d.terrors[:terrlen]
272 | return &TypeError{issues}
273 | }
274 | return nil
275 | })
276 | if e, ok := err.(*TypeError); ok {
277 | d.terrors = append(d.terrors, e.Errors...)
278 | return false
279 | }
280 | if err != nil {
281 | fail(err)
282 | }
283 | return true
284 | }
285 |
286 | // d.prepare initializes and dereferences pointers and calls UnmarshalYAML
287 | // if a value is found to implement it.
288 | // It returns the initialized and dereferenced out value, whether
289 | // unmarshalling was already done by UnmarshalYAML, and if so whether
290 | // its types unmarshalled appropriately.
291 | //
292 | // If n holds a null value, prepare returns before doing anything.
293 | func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
294 | if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
295 | return out, false, false
296 | }
297 | again := true
298 | for again {
299 | again = false
300 | if out.Kind() == reflect.Ptr {
301 | if out.IsNil() {
302 | out.Set(reflect.New(out.Type().Elem()))
303 | }
304 | out = out.Elem()
305 | again = true
306 | }
307 | if out.CanAddr() {
308 | if u, ok := out.Addr().Interface().(Unmarshaler); ok {
309 | good = d.callUnmarshaler(n, u)
310 | return out, true, good
311 | }
312 | }
313 | }
314 | return out, false, false
315 | }
316 |
317 | func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
318 | switch n.kind {
319 | case documentNode:
320 | return d.document(n, out)
321 | case aliasNode:
322 | return d.alias(n, out)
323 | }
324 | out, unmarshaled, good := d.prepare(n, out)
325 | if unmarshaled {
326 | return good
327 | }
328 | switch n.kind {
329 | case scalarNode:
330 | good = d.scalar(n, out)
331 | case mappingNode:
332 | good = d.mapping(n, out)
333 | case sequenceNode:
334 | good = d.sequence(n, out)
335 | default:
336 | panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
337 | }
338 | return good
339 | }
340 |
341 | func (d *decoder) document(n *node, out reflect.Value) (good bool) {
342 | if len(n.children) == 1 {
343 | d.doc = n
344 | d.unmarshal(n.children[0], out)
345 | return true
346 | }
347 | return false
348 | }
349 |
350 | func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
351 | if d.aliases[n] {
352 | // TODO this could actually be allowed in some circumstances.
353 | failf("anchor '%s' value contains itself", n.value)
354 | }
355 | d.aliases[n] = true
356 | good = d.unmarshal(n.alias, out)
357 | delete(d.aliases, n)
358 | return good
359 | }
360 |
361 | var zeroValue reflect.Value
362 |
363 | func resetMap(out reflect.Value) {
364 | for _, k := range out.MapKeys() {
365 | out.SetMapIndex(k, zeroValue)
366 | }
367 | }
368 |
369 | func (d *decoder) scalar(n *node, out reflect.Value) bool {
370 | var tag string
371 | var resolved interface{}
372 | if n.tag == "" && !n.implicit {
373 | tag = yaml_STR_TAG
374 | resolved = n.value
375 | } else {
376 | tag, resolved = resolve(n.tag, n.value)
377 | if tag == yaml_BINARY_TAG {
378 | data, err := base64.StdEncoding.DecodeString(resolved.(string))
379 | if err != nil {
380 | failf("!!binary value contains invalid base64 data")
381 | }
382 | resolved = string(data)
383 | }
384 | }
385 | if resolved == nil {
386 | if out.Kind() == reflect.Map && !out.CanAddr() {
387 | resetMap(out)
388 | } else {
389 | out.Set(reflect.Zero(out.Type()))
390 | }
391 | return true
392 | }
393 | if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
394 | // We've resolved to exactly the type we want, so use that.
395 | out.Set(resolvedv)
396 | return true
397 | }
398 | // Perhaps we can use the value as a TextUnmarshaler to
399 | // set its value.
400 | if out.CanAddr() {
401 | u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
402 | if ok {
403 | var text []byte
404 | if tag == yaml_BINARY_TAG {
405 | text = []byte(resolved.(string))
406 | } else {
407 | // We let any value be unmarshaled into TextUnmarshaler.
408 | // That might be more lax than we'd like, but the
409 | // TextUnmarshaler itself should bowl out any dubious values.
410 | text = []byte(n.value)
411 | }
412 | err := u.UnmarshalText(text)
413 | if err != nil {
414 | fail(err)
415 | }
416 | return true
417 | }
418 | }
419 | switch out.Kind() {
420 | case reflect.String:
421 | if tag == yaml_BINARY_TAG {
422 | out.SetString(resolved.(string))
423 | return true
424 | }
425 | if resolved != nil {
426 | out.SetString(n.value)
427 | return true
428 | }
429 | case reflect.Interface:
430 | if resolved == nil {
431 | out.Set(reflect.Zero(out.Type()))
432 | } else if tag == yaml_TIMESTAMP_TAG {
433 | // It looks like a timestamp but for backward compatibility
434 | // reasons we set it as a string, so that code that unmarshals
435 | // timestamp-like values into interface{} will continue to
436 | // see a string and not a time.Time.
437 | // TODO(v3) Drop this.
438 | out.Set(reflect.ValueOf(n.value))
439 | } else {
440 | out.Set(reflect.ValueOf(resolved))
441 | }
442 | return true
443 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
444 | switch resolved := resolved.(type) {
445 | case int:
446 | if !out.OverflowInt(int64(resolved)) {
447 | out.SetInt(int64(resolved))
448 | return true
449 | }
450 | case int64:
451 | if !out.OverflowInt(resolved) {
452 | out.SetInt(resolved)
453 | return true
454 | }
455 | case uint64:
456 | if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
457 | out.SetInt(int64(resolved))
458 | return true
459 | }
460 | case float64:
461 | if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
462 | out.SetInt(int64(resolved))
463 | return true
464 | }
465 | case string:
466 | if out.Type() == durationType {
467 | d, err := time.ParseDuration(resolved)
468 | if err == nil {
469 | out.SetInt(int64(d))
470 | return true
471 | }
472 | }
473 | }
474 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
475 | switch resolved := resolved.(type) {
476 | case int:
477 | if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
478 | out.SetUint(uint64(resolved))
479 | return true
480 | }
481 | case int64:
482 | if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
483 | out.SetUint(uint64(resolved))
484 | return true
485 | }
486 | case uint64:
487 | if !out.OverflowUint(uint64(resolved)) {
488 | out.SetUint(uint64(resolved))
489 | return true
490 | }
491 | case float64:
492 | if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
493 | out.SetUint(uint64(resolved))
494 | return true
495 | }
496 | }
497 | case reflect.Bool:
498 | switch resolved := resolved.(type) {
499 | case bool:
500 | out.SetBool(resolved)
501 | return true
502 | }
503 | case reflect.Float32, reflect.Float64:
504 | switch resolved := resolved.(type) {
505 | case int:
506 | out.SetFloat(float64(resolved))
507 | return true
508 | case int64:
509 | out.SetFloat(float64(resolved))
510 | return true
511 | case uint64:
512 | out.SetFloat(float64(resolved))
513 | return true
514 | case float64:
515 | out.SetFloat(resolved)
516 | return true
517 | }
518 | case reflect.Struct:
519 | if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
520 | out.Set(resolvedv)
521 | return true
522 | }
523 | case reflect.Ptr:
524 | if out.Type().Elem() == reflect.TypeOf(resolved) {
525 | // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
526 | elem := reflect.New(out.Type().Elem())
527 | elem.Elem().Set(reflect.ValueOf(resolved))
528 | out.Set(elem)
529 | return true
530 | }
531 | }
532 | d.terror(n, tag, out)
533 | return false
534 | }
535 |
536 | func settableValueOf(i interface{}) reflect.Value {
537 | v := reflect.ValueOf(i)
538 | sv := reflect.New(v.Type()).Elem()
539 | sv.Set(v)
540 | return sv
541 | }
542 |
543 | func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
544 | l := len(n.children)
545 |
546 | var iface reflect.Value
547 | switch out.Kind() {
548 | case reflect.Slice:
549 | out.Set(reflect.MakeSlice(out.Type(), l, l))
550 | case reflect.Array:
551 | if l != out.Len() {
552 | failf("invalid array: want %d elements but got %d", out.Len(), l)
553 | }
554 | case reflect.Interface:
555 | // No type hints. Will have to use a generic sequence.
556 | iface = out
557 | out = settableValueOf(make([]interface{}, l))
558 | default:
559 | d.terror(n, yaml_SEQ_TAG, out)
560 | return false
561 | }
562 | et := out.Type().Elem()
563 |
564 | j := 0
565 | for i := 0; i < l; i++ {
566 | e := reflect.New(et).Elem()
567 | if ok := d.unmarshal(n.children[i], e); ok {
568 | out.Index(j).Set(e)
569 | j++
570 | }
571 | }
572 | if out.Kind() != reflect.Array {
573 | out.Set(out.Slice(0, j))
574 | }
575 | if iface.IsValid() {
576 | iface.Set(out)
577 | }
578 | return true
579 | }
580 |
581 | func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
582 | switch out.Kind() {
583 | case reflect.Struct:
584 | return d.mappingStruct(n, out)
585 | case reflect.Slice:
586 | return d.mappingSlice(n, out)
587 | case reflect.Map:
588 | // okay
589 | case reflect.Interface:
590 | if d.mapType.Kind() == reflect.Map {
591 | iface := out
592 | out = reflect.MakeMap(d.mapType)
593 | iface.Set(out)
594 | } else {
595 | slicev := reflect.New(d.mapType).Elem()
596 | if !d.mappingSlice(n, slicev) {
597 | return false
598 | }
599 | out.Set(slicev)
600 | return true
601 | }
602 | default:
603 | d.terror(n, yaml_MAP_TAG, out)
604 | return false
605 | }
606 | outt := out.Type()
607 | kt := outt.Key()
608 | et := outt.Elem()
609 |
610 | mapType := d.mapType
611 | if outt.Key() == ifaceType && outt.Elem() == ifaceType {
612 | d.mapType = outt
613 | }
614 |
615 | if out.IsNil() {
616 | out.Set(reflect.MakeMap(outt))
617 | }
618 | l := len(n.children)
619 | for i := 0; i < l; i += 2 {
620 | if isMerge(n.children[i]) {
621 | d.merge(n.children[i+1], out)
622 | continue
623 | }
624 | k := reflect.New(kt).Elem()
625 | if d.unmarshal(n.children[i], k) {
626 | kkind := k.Kind()
627 | if kkind == reflect.Interface {
628 | kkind = k.Elem().Kind()
629 | }
630 | if kkind == reflect.Map || kkind == reflect.Slice {
631 | failf("invalid map key: %#v", k.Interface())
632 | }
633 | e := reflect.New(et).Elem()
634 | if d.unmarshal(n.children[i+1], e) {
635 | d.setMapIndex(n.children[i+1], out, k, e)
636 | }
637 | }
638 | }
639 | d.mapType = mapType
640 | return true
641 | }
642 |
643 | func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
644 | if d.strict && out.MapIndex(k) != zeroValue {
645 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
646 | return
647 | }
648 | out.SetMapIndex(k, v)
649 | }
650 |
651 | func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
652 | outt := out.Type()
653 | if outt.Elem() != mapItemType {
654 | d.terror(n, yaml_MAP_TAG, out)
655 | return false
656 | }
657 |
658 | mapType := d.mapType
659 | d.mapType = outt
660 |
661 | var slice []MapItem
662 | var l = len(n.children)
663 | for i := 0; i < l; i += 2 {
664 | if isMerge(n.children[i]) {
665 | d.merge(n.children[i+1], out)
666 | continue
667 | }
668 | item := MapItem{}
669 | k := reflect.ValueOf(&item.Key).Elem()
670 | if d.unmarshal(n.children[i], k) {
671 | v := reflect.ValueOf(&item.Value).Elem()
672 | if d.unmarshal(n.children[i+1], v) {
673 | slice = append(slice, item)
674 | }
675 | }
676 | }
677 | out.Set(reflect.ValueOf(slice))
678 | d.mapType = mapType
679 | return true
680 | }
681 |
682 | func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
683 | sinfo, err := getStructInfo(out.Type())
684 | if err != nil {
685 | panic(err)
686 | }
687 | name := settableValueOf("")
688 | l := len(n.children)
689 |
690 | var inlineMap reflect.Value
691 | var elemType reflect.Type
692 | if sinfo.InlineMap != -1 {
693 | inlineMap = out.Field(sinfo.InlineMap)
694 | inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
695 | elemType = inlineMap.Type().Elem()
696 | }
697 |
698 | var doneFields []bool
699 | if d.strict {
700 | doneFields = make([]bool, len(sinfo.FieldsList))
701 | }
702 | for i := 0; i < l; i += 2 {
703 | ni := n.children[i]
704 | if isMerge(ni) {
705 | d.merge(n.children[i+1], out)
706 | continue
707 | }
708 | if !d.unmarshal(ni, name) {
709 | continue
710 | }
711 | if info, ok := sinfo.FieldsMap[name.String()]; ok {
712 | if d.strict {
713 | if doneFields[info.Id] {
714 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
715 | continue
716 | }
717 | doneFields[info.Id] = true
718 | }
719 | var field reflect.Value
720 | if info.Inline == nil {
721 | field = out.Field(info.Num)
722 | } else {
723 | field = out.FieldByIndex(info.Inline)
724 | }
725 | d.unmarshal(n.children[i+1], field)
726 | } else if sinfo.InlineMap != -1 {
727 | if inlineMap.IsNil() {
728 | inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
729 | }
730 | value := reflect.New(elemType).Elem()
731 | d.unmarshal(n.children[i+1], value)
732 | d.setMapIndex(n.children[i+1], inlineMap, name, value)
733 | } else if d.strict {
734 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
735 | }
736 | }
737 | return true
738 | }
739 |
740 | func failWantMap() {
741 | failf("map merge requires map or sequence of maps as the value")
742 | }
743 |
744 | func (d *decoder) merge(n *node, out reflect.Value) {
745 | switch n.kind {
746 | case mappingNode:
747 | d.unmarshal(n, out)
748 | case aliasNode:
749 | an, ok := d.doc.anchors[n.value]
750 | if ok && an.kind != mappingNode {
751 | failWantMap()
752 | }
753 | d.unmarshal(n, out)
754 | case sequenceNode:
755 | // Step backwards as earlier nodes take precedence.
756 | for i := len(n.children) - 1; i >= 0; i-- {
757 | ni := n.children[i]
758 | if ni.kind == aliasNode {
759 | an, ok := d.doc.anchors[ni.value]
760 | if ok && an.kind != mappingNode {
761 | failWantMap()
762 | }
763 | } else if ni.kind != mappingNode {
764 | failWantMap()
765 | }
766 | d.unmarshal(ni, out)
767 | }
768 | default:
769 | failWantMap()
770 | }
771 | }
772 |
773 | func isMerge(n *node) bool {
774 | return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
775 | }
776 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/encode.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "encoding"
5 | "fmt"
6 | "io"
7 | "reflect"
8 | "regexp"
9 | "sort"
10 | "strconv"
11 | "strings"
12 | "time"
13 | "unicode/utf8"
14 | )
15 |
16 | type encoder struct {
17 | emitter yaml_emitter_t
18 | event yaml_event_t
19 | out []byte
20 | flow bool
21 | // doneInit holds whether the initial stream_start_event has been
22 | // emitted.
23 | doneInit bool
24 | }
25 |
26 | func newEncoder() *encoder {
27 | e := &encoder{}
28 | yaml_emitter_initialize(&e.emitter)
29 | yaml_emitter_set_output_string(&e.emitter, &e.out)
30 | yaml_emitter_set_unicode(&e.emitter, true)
31 | return e
32 | }
33 |
34 | func newEncoderWithWriter(w io.Writer) *encoder {
35 | e := &encoder{}
36 | yaml_emitter_initialize(&e.emitter)
37 | yaml_emitter_set_output_writer(&e.emitter, w)
38 | yaml_emitter_set_unicode(&e.emitter, true)
39 | return e
40 | }
41 |
42 | func (e *encoder) init() {
43 | if e.doneInit {
44 | return
45 | }
46 | yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
47 | e.emit()
48 | e.doneInit = true
49 | }
50 |
51 | func (e *encoder) finish() {
52 | e.emitter.open_ended = false
53 | yaml_stream_end_event_initialize(&e.event)
54 | e.emit()
55 | }
56 |
57 | func (e *encoder) destroy() {
58 | yaml_emitter_delete(&e.emitter)
59 | }
60 |
61 | func (e *encoder) emit() {
62 | // This will internally delete the e.event value.
63 | e.must(yaml_emitter_emit(&e.emitter, &e.event))
64 | }
65 |
66 | func (e *encoder) must(ok bool) {
67 | if !ok {
68 | msg := e.emitter.problem
69 | if msg == "" {
70 | msg = "unknown problem generating YAML content"
71 | }
72 | failf("%s", msg)
73 | }
74 | }
75 |
76 | func (e *encoder) marshalDoc(tag string, in reflect.Value) {
77 | e.init()
78 | yaml_document_start_event_initialize(&e.event, nil, nil, true)
79 | e.emit()
80 | e.marshal(tag, in)
81 | yaml_document_end_event_initialize(&e.event, true)
82 | e.emit()
83 | }
84 |
85 | func (e *encoder) marshal(tag string, in reflect.Value) {
86 | if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
87 | e.nilv()
88 | return
89 | }
90 | iface := in.Interface()
91 | switch m := iface.(type) {
92 | case time.Time, *time.Time:
93 | // Although time.Time implements TextMarshaler,
94 | // we don't want to treat it as a string for YAML
95 | // purposes because YAML has special support for
96 | // timestamps.
97 | case Marshaler:
98 | v, err := m.MarshalYAML()
99 | if err != nil {
100 | fail(err)
101 | }
102 | if v == nil {
103 | e.nilv()
104 | return
105 | }
106 | in = reflect.ValueOf(v)
107 | case encoding.TextMarshaler:
108 | text, err := m.MarshalText()
109 | if err != nil {
110 | fail(err)
111 | }
112 | in = reflect.ValueOf(string(text))
113 | case nil:
114 | e.nilv()
115 | return
116 | }
117 | switch in.Kind() {
118 | case reflect.Interface:
119 | e.marshal(tag, in.Elem())
120 | case reflect.Map:
121 | e.mapv(tag, in)
122 | case reflect.Ptr:
123 | if in.Type() == ptrTimeType {
124 | e.timev(tag, in.Elem())
125 | } else {
126 | e.marshal(tag, in.Elem())
127 | }
128 | case reflect.Struct:
129 | if in.Type() == timeType {
130 | e.timev(tag, in)
131 | } else {
132 | e.structv(tag, in)
133 | }
134 | case reflect.Slice, reflect.Array:
135 | if in.Type().Elem() == mapItemType {
136 | e.itemsv(tag, in)
137 | } else {
138 | e.slicev(tag, in)
139 | }
140 | case reflect.String:
141 | e.stringv(tag, in)
142 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
143 | if in.Type() == durationType {
144 | e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
145 | } else {
146 | e.intv(tag, in)
147 | }
148 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
149 | e.uintv(tag, in)
150 | case reflect.Float32, reflect.Float64:
151 | e.floatv(tag, in)
152 | case reflect.Bool:
153 | e.boolv(tag, in)
154 | default:
155 | panic("cannot marshal type: " + in.Type().String())
156 | }
157 | }
158 |
159 | func (e *encoder) mapv(tag string, in reflect.Value) {
160 | e.mappingv(tag, func() {
161 | keys := keyList(in.MapKeys())
162 | sort.Sort(keys)
163 | for _, k := range keys {
164 | e.marshal("", k)
165 | e.marshal("", in.MapIndex(k))
166 | }
167 | })
168 | }
169 |
170 | func (e *encoder) itemsv(tag string, in reflect.Value) {
171 | e.mappingv(tag, func() {
172 | slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
173 | for _, item := range slice {
174 | e.marshal("", reflect.ValueOf(item.Key))
175 | e.marshal("", reflect.ValueOf(item.Value))
176 | }
177 | })
178 | }
179 |
180 | func (e *encoder) structv(tag string, in reflect.Value) {
181 | sinfo, err := getStructInfo(in.Type())
182 | if err != nil {
183 | panic(err)
184 | }
185 | e.mappingv(tag, func() {
186 | for _, info := range sinfo.FieldsList {
187 | var value reflect.Value
188 | if info.Inline == nil {
189 | value = in.Field(info.Num)
190 | } else {
191 | value = in.FieldByIndex(info.Inline)
192 | }
193 | if info.OmitEmpty && isZero(value) {
194 | continue
195 | }
196 | e.marshal("", reflect.ValueOf(info.Key))
197 | e.flow = info.Flow
198 | e.marshal("", value)
199 | }
200 | if sinfo.InlineMap >= 0 {
201 | m := in.Field(sinfo.InlineMap)
202 | if m.Len() > 0 {
203 | e.flow = false
204 | keys := keyList(m.MapKeys())
205 | sort.Sort(keys)
206 | for _, k := range keys {
207 | if _, found := sinfo.FieldsMap[k.String()]; found {
208 | panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
209 | }
210 | e.marshal("", k)
211 | e.flow = false
212 | e.marshal("", m.MapIndex(k))
213 | }
214 | }
215 | }
216 | })
217 | }
218 |
219 | func (e *encoder) mappingv(tag string, f func()) {
220 | implicit := tag == ""
221 | style := yaml_BLOCK_MAPPING_STYLE
222 | if e.flow {
223 | e.flow = false
224 | style = yaml_FLOW_MAPPING_STYLE
225 | }
226 | yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
227 | e.emit()
228 | f()
229 | yaml_mapping_end_event_initialize(&e.event)
230 | e.emit()
231 | }
232 |
233 | func (e *encoder) slicev(tag string, in reflect.Value) {
234 | implicit := tag == ""
235 | style := yaml_BLOCK_SEQUENCE_STYLE
236 | if e.flow {
237 | e.flow = false
238 | style = yaml_FLOW_SEQUENCE_STYLE
239 | }
240 | e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
241 | e.emit()
242 | n := in.Len()
243 | for i := 0; i < n; i++ {
244 | e.marshal("", in.Index(i))
245 | }
246 | e.must(yaml_sequence_end_event_initialize(&e.event))
247 | e.emit()
248 | }
249 |
250 | // isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
251 | //
252 | // The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
253 | // in YAML 1.2 and by this package, but these should be marshalled quoted for
254 | // the time being for compatibility with other parsers.
255 | func isBase60Float(s string) (result bool) {
256 | // Fast path.
257 | if s == "" {
258 | return false
259 | }
260 | c := s[0]
261 | if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
262 | return false
263 | }
264 | // Do the full match.
265 | return base60float.MatchString(s)
266 | }
267 |
268 | // From http://yaml.org/type/float.html, except the regular expression there
269 | // is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
270 | var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
271 |
272 | func (e *encoder) stringv(tag string, in reflect.Value) {
273 | var style yaml_scalar_style_t
274 | s := in.String()
275 | canUsePlain := true
276 | switch {
277 | case !utf8.ValidString(s):
278 | if tag == yaml_BINARY_TAG {
279 | failf("explicitly tagged !!binary data must be base64-encoded")
280 | }
281 | if tag != "" {
282 | failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
283 | }
284 | // It can't be encoded directly as YAML so use a binary tag
285 | // and encode it as base64.
286 | tag = yaml_BINARY_TAG
287 | s = encodeBase64(s)
288 | case tag == "":
289 | // Check to see if it would resolve to a specific
290 | // tag when encoded unquoted. If it doesn't,
291 | // there's no need to quote it.
292 | rtag, _ := resolve("", s)
293 | canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
294 | }
295 | // Note: it's possible for user code to emit invalid YAML
296 | // if they explicitly specify a tag and a string containing
297 | // text that's incompatible with that tag.
298 | switch {
299 | case strings.Contains(s, "\n"):
300 | style = yaml_LITERAL_SCALAR_STYLE
301 | case canUsePlain:
302 | style = yaml_PLAIN_SCALAR_STYLE
303 | default:
304 | style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
305 | }
306 | e.emitScalar(s, "", tag, style)
307 | }
308 |
309 | func (e *encoder) boolv(tag string, in reflect.Value) {
310 | var s string
311 | if in.Bool() {
312 | s = "true"
313 | } else {
314 | s = "false"
315 | }
316 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
317 | }
318 |
319 | func (e *encoder) intv(tag string, in reflect.Value) {
320 | s := strconv.FormatInt(in.Int(), 10)
321 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
322 | }
323 |
324 | func (e *encoder) uintv(tag string, in reflect.Value) {
325 | s := strconv.FormatUint(in.Uint(), 10)
326 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
327 | }
328 |
329 | func (e *encoder) timev(tag string, in reflect.Value) {
330 | t := in.Interface().(time.Time)
331 | s := t.Format(time.RFC3339Nano)
332 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
333 | }
334 |
335 | func (e *encoder) floatv(tag string, in reflect.Value) {
336 | // Issue #352: When formatting, use the precision of the underlying value
337 | precision := 64
338 | if in.Kind() == reflect.Float32 {
339 | precision = 32
340 | }
341 |
342 | s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
343 | switch s {
344 | case "+Inf":
345 | s = ".inf"
346 | case "-Inf":
347 | s = "-.inf"
348 | case "NaN":
349 | s = ".nan"
350 | }
351 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
352 | }
353 |
354 | func (e *encoder) nilv() {
355 | e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
356 | }
357 |
358 | func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
359 | implicit := tag == ""
360 | e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
361 | e.emit()
362 | }
363 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/go.mod:
--------------------------------------------------------------------------------
1 | module "gopkg.in/yaml.v2"
2 |
3 | require (
4 | "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
5 | )
6 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/readerc.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "io"
5 | )
6 |
7 | // Set the reader error and return 0.
8 | func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
9 | parser.error = yaml_READER_ERROR
10 | parser.problem = problem
11 | parser.problem_offset = offset
12 | parser.problem_value = value
13 | return false
14 | }
15 |
16 | // Byte order marks.
17 | const (
18 | bom_UTF8 = "\xef\xbb\xbf"
19 | bom_UTF16LE = "\xff\xfe"
20 | bom_UTF16BE = "\xfe\xff"
21 | )
22 |
23 | // Determine the input stream encoding by checking the BOM symbol. If no BOM is
24 | // found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
25 | func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
26 | // Ensure that we had enough bytes in the raw buffer.
27 | for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
28 | if !yaml_parser_update_raw_buffer(parser) {
29 | return false
30 | }
31 | }
32 |
33 | // Determine the encoding.
34 | buf := parser.raw_buffer
35 | pos := parser.raw_buffer_pos
36 | avail := len(buf) - pos
37 | if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
38 | parser.encoding = yaml_UTF16LE_ENCODING
39 | parser.raw_buffer_pos += 2
40 | parser.offset += 2
41 | } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
42 | parser.encoding = yaml_UTF16BE_ENCODING
43 | parser.raw_buffer_pos += 2
44 | parser.offset += 2
45 | } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
46 | parser.encoding = yaml_UTF8_ENCODING
47 | parser.raw_buffer_pos += 3
48 | parser.offset += 3
49 | } else {
50 | parser.encoding = yaml_UTF8_ENCODING
51 | }
52 | return true
53 | }
54 |
55 | // Update the raw buffer.
56 | func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
57 | size_read := 0
58 |
59 | // Return if the raw buffer is full.
60 | if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
61 | return true
62 | }
63 |
64 | // Return on EOF.
65 | if parser.eof {
66 | return true
67 | }
68 |
69 | // Move the remaining bytes in the raw buffer to the beginning.
70 | if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
71 | copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
72 | }
73 | parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
74 | parser.raw_buffer_pos = 0
75 |
76 | // Call the read handler to fill the buffer.
77 | size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
78 | parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
79 | if err == io.EOF {
80 | parser.eof = true
81 | } else if err != nil {
82 | return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
83 | }
84 | return true
85 | }
86 |
87 | // Ensure that the buffer contains at least `length` characters.
88 | // Return true on success, false on failure.
89 | //
90 | // The length is supposed to be significantly less that the buffer size.
91 | func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
92 | if parser.read_handler == nil {
93 | panic("read handler must be set")
94 | }
95 |
96 | // [Go] This function was changed to guarantee the requested length size at EOF.
97 | // The fact we need to do this is pretty awful, but the description above implies
98 | // for that to be the case, and there are tests
99 |
100 | // If the EOF flag is set and the raw buffer is empty, do nothing.
101 | if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
102 | // [Go] ACTUALLY! Read the documentation of this function above.
103 | // This is just broken. To return true, we need to have the
104 | // given length in the buffer. Not doing that means every single
105 | // check that calls this function to make sure the buffer has a
106 | // given length is Go) panicking; or C) accessing invalid memory.
107 | //return true
108 | }
109 |
110 | // Return if the buffer contains enough characters.
111 | if parser.unread >= length {
112 | return true
113 | }
114 |
115 | // Determine the input encoding if it is not known yet.
116 | if parser.encoding == yaml_ANY_ENCODING {
117 | if !yaml_parser_determine_encoding(parser) {
118 | return false
119 | }
120 | }
121 |
122 | // Move the unread characters to the beginning of the buffer.
123 | buffer_len := len(parser.buffer)
124 | if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
125 | copy(parser.buffer, parser.buffer[parser.buffer_pos:])
126 | buffer_len -= parser.buffer_pos
127 | parser.buffer_pos = 0
128 | } else if parser.buffer_pos == buffer_len {
129 | buffer_len = 0
130 | parser.buffer_pos = 0
131 | }
132 |
133 | // Open the whole buffer for writing, and cut it before returning.
134 | parser.buffer = parser.buffer[:cap(parser.buffer)]
135 |
136 | // Fill the buffer until it has enough characters.
137 | first := true
138 | for parser.unread < length {
139 |
140 | // Fill the raw buffer if necessary.
141 | if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
142 | if !yaml_parser_update_raw_buffer(parser) {
143 | parser.buffer = parser.buffer[:buffer_len]
144 | return false
145 | }
146 | }
147 | first = false
148 |
149 | // Decode the raw buffer.
150 | inner:
151 | for parser.raw_buffer_pos != len(parser.raw_buffer) {
152 | var value rune
153 | var width int
154 |
155 | raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
156 |
157 | // Decode the next character.
158 | switch parser.encoding {
159 | case yaml_UTF8_ENCODING:
160 | // Decode a UTF-8 character. Check RFC 3629
161 | // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
162 | //
163 | // The following table (taken from the RFC) is used for
164 | // decoding.
165 | //
166 | // Char. number range | UTF-8 octet sequence
167 | // (hexadecimal) | (binary)
168 | // --------------------+------------------------------------
169 | // 0000 0000-0000 007F | 0xxxxxxx
170 | // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
171 | // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
172 | // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
173 | //
174 | // Additionally, the characters in the range 0xD800-0xDFFF
175 | // are prohibited as they are reserved for use with UTF-16
176 | // surrogate pairs.
177 |
178 | // Determine the length of the UTF-8 sequence.
179 | octet := parser.raw_buffer[parser.raw_buffer_pos]
180 | switch {
181 | case octet&0x80 == 0x00:
182 | width = 1
183 | case octet&0xE0 == 0xC0:
184 | width = 2
185 | case octet&0xF0 == 0xE0:
186 | width = 3
187 | case octet&0xF8 == 0xF0:
188 | width = 4
189 | default:
190 | // The leading octet is invalid.
191 | return yaml_parser_set_reader_error(parser,
192 | "invalid leading UTF-8 octet",
193 | parser.offset, int(octet))
194 | }
195 |
196 | // Check if the raw buffer contains an incomplete character.
197 | if width > raw_unread {
198 | if parser.eof {
199 | return yaml_parser_set_reader_error(parser,
200 | "incomplete UTF-8 octet sequence",
201 | parser.offset, -1)
202 | }
203 | break inner
204 | }
205 |
206 | // Decode the leading octet.
207 | switch {
208 | case octet&0x80 == 0x00:
209 | value = rune(octet & 0x7F)
210 | case octet&0xE0 == 0xC0:
211 | value = rune(octet & 0x1F)
212 | case octet&0xF0 == 0xE0:
213 | value = rune(octet & 0x0F)
214 | case octet&0xF8 == 0xF0:
215 | value = rune(octet & 0x07)
216 | default:
217 | value = 0
218 | }
219 |
220 | // Check and decode the trailing octets.
221 | for k := 1; k < width; k++ {
222 | octet = parser.raw_buffer[parser.raw_buffer_pos+k]
223 |
224 | // Check if the octet is valid.
225 | if (octet & 0xC0) != 0x80 {
226 | return yaml_parser_set_reader_error(parser,
227 | "invalid trailing UTF-8 octet",
228 | parser.offset+k, int(octet))
229 | }
230 |
231 | // Decode the octet.
232 | value = (value << 6) + rune(octet&0x3F)
233 | }
234 |
235 | // Check the length of the sequence against the value.
236 | switch {
237 | case width == 1:
238 | case width == 2 && value >= 0x80:
239 | case width == 3 && value >= 0x800:
240 | case width == 4 && value >= 0x10000:
241 | default:
242 | return yaml_parser_set_reader_error(parser,
243 | "invalid length of a UTF-8 sequence",
244 | parser.offset, -1)
245 | }
246 |
247 | // Check the range of the value.
248 | if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
249 | return yaml_parser_set_reader_error(parser,
250 | "invalid Unicode character",
251 | parser.offset, int(value))
252 | }
253 |
254 | case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
255 | var low, high int
256 | if parser.encoding == yaml_UTF16LE_ENCODING {
257 | low, high = 0, 1
258 | } else {
259 | low, high = 1, 0
260 | }
261 |
262 | // The UTF-16 encoding is not as simple as one might
263 | // naively think. Check RFC 2781
264 | // (http://www.ietf.org/rfc/rfc2781.txt).
265 | //
266 | // Normally, two subsequent bytes describe a Unicode
267 | // character. However a special technique (called a
268 | // surrogate pair) is used for specifying character
269 | // values larger than 0xFFFF.
270 | //
271 | // A surrogate pair consists of two pseudo-characters:
272 | // high surrogate area (0xD800-0xDBFF)
273 | // low surrogate area (0xDC00-0xDFFF)
274 | //
275 | // The following formulas are used for decoding
276 | // and encoding characters using surrogate pairs:
277 | //
278 | // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
279 | // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
280 | // W1 = 110110yyyyyyyyyy
281 | // W2 = 110111xxxxxxxxxx
282 | //
283 | // where U is the character value, W1 is the high surrogate
284 | // area, W2 is the low surrogate area.
285 |
286 | // Check for incomplete UTF-16 character.
287 | if raw_unread < 2 {
288 | if parser.eof {
289 | return yaml_parser_set_reader_error(parser,
290 | "incomplete UTF-16 character",
291 | parser.offset, -1)
292 | }
293 | break inner
294 | }
295 |
296 | // Get the character.
297 | value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
298 | (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
299 |
300 | // Check for unexpected low surrogate area.
301 | if value&0xFC00 == 0xDC00 {
302 | return yaml_parser_set_reader_error(parser,
303 | "unexpected low surrogate area",
304 | parser.offset, int(value))
305 | }
306 |
307 | // Check for a high surrogate area.
308 | if value&0xFC00 == 0xD800 {
309 | width = 4
310 |
311 | // Check for incomplete surrogate pair.
312 | if raw_unread < 4 {
313 | if parser.eof {
314 | return yaml_parser_set_reader_error(parser,
315 | "incomplete UTF-16 surrogate pair",
316 | parser.offset, -1)
317 | }
318 | break inner
319 | }
320 |
321 | // Get the next character.
322 | value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
323 | (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
324 |
325 | // Check for a low surrogate area.
326 | if value2&0xFC00 != 0xDC00 {
327 | return yaml_parser_set_reader_error(parser,
328 | "expected low surrogate area",
329 | parser.offset+2, int(value2))
330 | }
331 |
332 | // Generate the value of the surrogate pair.
333 | value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
334 | } else {
335 | width = 2
336 | }
337 |
338 | default:
339 | panic("impossible")
340 | }
341 |
342 | // Check if the character is in the allowed range:
343 | // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
344 | // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
345 | // | [#x10000-#x10FFFF] (32 bit)
346 | switch {
347 | case value == 0x09:
348 | case value == 0x0A:
349 | case value == 0x0D:
350 | case value >= 0x20 && value <= 0x7E:
351 | case value == 0x85:
352 | case value >= 0xA0 && value <= 0xD7FF:
353 | case value >= 0xE000 && value <= 0xFFFD:
354 | case value >= 0x10000 && value <= 0x10FFFF:
355 | default:
356 | return yaml_parser_set_reader_error(parser,
357 | "control characters are not allowed",
358 | parser.offset, int(value))
359 | }
360 |
361 | // Move the raw pointers.
362 | parser.raw_buffer_pos += width
363 | parser.offset += width
364 |
365 | // Finally put the character into the buffer.
366 | if value <= 0x7F {
367 | // 0000 0000-0000 007F . 0xxxxxxx
368 | parser.buffer[buffer_len+0] = byte(value)
369 | buffer_len += 1
370 | } else if value <= 0x7FF {
371 | // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
372 | parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
373 | parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
374 | buffer_len += 2
375 | } else if value <= 0xFFFF {
376 | // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
377 | parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
378 | parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
379 | parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
380 | buffer_len += 3
381 | } else {
382 | // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
383 | parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
384 | parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
385 | parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
386 | parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
387 | buffer_len += 4
388 | }
389 |
390 | parser.unread++
391 | }
392 |
393 | // On EOF, put NUL into the buffer and return.
394 | if parser.eof {
395 | parser.buffer[buffer_len] = 0
396 | buffer_len++
397 | parser.unread++
398 | break
399 | }
400 | }
401 | // [Go] Read the documentation of this function above. To return true,
402 | // we need to have the given length in the buffer. Not doing that means
403 | // every single check that calls this function to make sure the buffer
404 | // has a given length is Go) panicking; or C) accessing invalid memory.
405 | // This happens here due to the EOF above breaking early.
406 | for buffer_len < length {
407 | parser.buffer[buffer_len] = 0
408 | buffer_len++
409 | }
410 | parser.buffer = parser.buffer[:buffer_len]
411 | return true
412 | }
413 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/resolve.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "encoding/base64"
5 | "math"
6 | "regexp"
7 | "strconv"
8 | "strings"
9 | "time"
10 | )
11 |
12 | type resolveMapItem struct {
13 | value interface{}
14 | tag string
15 | }
16 |
17 | var resolveTable = make([]byte, 256)
18 | var resolveMap = make(map[string]resolveMapItem)
19 |
20 | func init() {
21 | t := resolveTable
22 | t[int('+')] = 'S' // Sign
23 | t[int('-')] = 'S'
24 | for _, c := range "0123456789" {
25 | t[int(c)] = 'D' // Digit
26 | }
27 | for _, c := range "yYnNtTfFoO~" {
28 | t[int(c)] = 'M' // In map
29 | }
30 | t[int('.')] = '.' // Float (potentially in map)
31 |
32 | var resolveMapList = []struct {
33 | v interface{}
34 | tag string
35 | l []string
36 | }{
37 | {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
38 | {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
39 | {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
40 | {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
41 | {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
42 | {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
43 | {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
44 | {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
45 | {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
46 | {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
47 | {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
48 | {"<<", yaml_MERGE_TAG, []string{"<<"}},
49 | }
50 |
51 | m := resolveMap
52 | for _, item := range resolveMapList {
53 | for _, s := range item.l {
54 | m[s] = resolveMapItem{item.v, item.tag}
55 | }
56 | }
57 | }
58 |
59 | const longTagPrefix = "tag:yaml.org,2002:"
60 |
61 | func shortTag(tag string) string {
62 | // TODO This can easily be made faster and produce less garbage.
63 | if strings.HasPrefix(tag, longTagPrefix) {
64 | return "!!" + tag[len(longTagPrefix):]
65 | }
66 | return tag
67 | }
68 |
69 | func longTag(tag string) string {
70 | if strings.HasPrefix(tag, "!!") {
71 | return longTagPrefix + tag[2:]
72 | }
73 | return tag
74 | }
75 |
76 | func resolvableTag(tag string) bool {
77 | switch tag {
78 | case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
79 | return true
80 | }
81 | return false
82 | }
83 |
84 | var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
85 |
86 | func resolve(tag string, in string) (rtag string, out interface{}) {
87 | if !resolvableTag(tag) {
88 | return tag, in
89 | }
90 |
91 | defer func() {
92 | switch tag {
93 | case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
94 | return
95 | case yaml_FLOAT_TAG:
96 | if rtag == yaml_INT_TAG {
97 | switch v := out.(type) {
98 | case int64:
99 | rtag = yaml_FLOAT_TAG
100 | out = float64(v)
101 | return
102 | case int:
103 | rtag = yaml_FLOAT_TAG
104 | out = float64(v)
105 | return
106 | }
107 | }
108 | }
109 | failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
110 | }()
111 |
112 | // Any data is accepted as a !!str or !!binary.
113 | // Otherwise, the prefix is enough of a hint about what it might be.
114 | hint := byte('N')
115 | if in != "" {
116 | hint = resolveTable[in[0]]
117 | }
118 | if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
119 | // Handle things we can lookup in a map.
120 | if item, ok := resolveMap[in]; ok {
121 | return item.tag, item.value
122 | }
123 |
124 | // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
125 | // are purposefully unsupported here. They're still quoted on
126 | // the way out for compatibility with other parser, though.
127 |
128 | switch hint {
129 | case 'M':
130 | // We've already checked the map above.
131 |
132 | case '.':
133 | // Not in the map, so maybe a normal float.
134 | floatv, err := strconv.ParseFloat(in, 64)
135 | if err == nil {
136 | return yaml_FLOAT_TAG, floatv
137 | }
138 |
139 | case 'D', 'S':
140 | // Int, float, or timestamp.
141 | // Only try values as a timestamp if the value is unquoted or there's an explicit
142 | // !!timestamp tag.
143 | if tag == "" || tag == yaml_TIMESTAMP_TAG {
144 | t, ok := parseTimestamp(in)
145 | if ok {
146 | return yaml_TIMESTAMP_TAG, t
147 | }
148 | }
149 |
150 | plain := strings.Replace(in, "_", "", -1)
151 | intv, err := strconv.ParseInt(plain, 0, 64)
152 | if err == nil {
153 | if intv == int64(int(intv)) {
154 | return yaml_INT_TAG, int(intv)
155 | } else {
156 | return yaml_INT_TAG, intv
157 | }
158 | }
159 | uintv, err := strconv.ParseUint(plain, 0, 64)
160 | if err == nil {
161 | return yaml_INT_TAG, uintv
162 | }
163 | if yamlStyleFloat.MatchString(plain) {
164 | floatv, err := strconv.ParseFloat(plain, 64)
165 | if err == nil {
166 | return yaml_FLOAT_TAG, floatv
167 | }
168 | }
169 | if strings.HasPrefix(plain, "0b") {
170 | intv, err := strconv.ParseInt(plain[2:], 2, 64)
171 | if err == nil {
172 | if intv == int64(int(intv)) {
173 | return yaml_INT_TAG, int(intv)
174 | } else {
175 | return yaml_INT_TAG, intv
176 | }
177 | }
178 | uintv, err := strconv.ParseUint(plain[2:], 2, 64)
179 | if err == nil {
180 | return yaml_INT_TAG, uintv
181 | }
182 | } else if strings.HasPrefix(plain, "-0b") {
183 | intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
184 | if err == nil {
185 | if true || intv == int64(int(intv)) {
186 | return yaml_INT_TAG, int(intv)
187 | } else {
188 | return yaml_INT_TAG, intv
189 | }
190 | }
191 | }
192 | default:
193 | panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
194 | }
195 | }
196 | return yaml_STR_TAG, in
197 | }
198 |
199 | // encodeBase64 encodes s as base64 that is broken up into multiple lines
200 | // as appropriate for the resulting length.
201 | func encodeBase64(s string) string {
202 | const lineLen = 70
203 | encLen := base64.StdEncoding.EncodedLen(len(s))
204 | lines := encLen/lineLen + 1
205 | buf := make([]byte, encLen*2+lines)
206 | in := buf[0:encLen]
207 | out := buf[encLen:]
208 | base64.StdEncoding.Encode(in, []byte(s))
209 | k := 0
210 | for i := 0; i < len(in); i += lineLen {
211 | j := i + lineLen
212 | if j > len(in) {
213 | j = len(in)
214 | }
215 | k += copy(out[k:], in[i:j])
216 | if lines > 1 {
217 | out[k] = '\n'
218 | k++
219 | }
220 | }
221 | return string(out[:k])
222 | }
223 |
224 | // This is a subset of the formats allowed by the regular expression
225 | // defined at http://yaml.org/type/timestamp.html.
226 | var allowedTimestampFormats = []string{
227 | "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
228 | "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
229 | "2006-1-2 15:4:5.999999999", // space separated with no time zone
230 | "2006-1-2", // date only
231 | // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
232 | // from the set of examples.
233 | }
234 |
235 | // parseTimestamp parses s as a timestamp string and
236 | // returns the timestamp and reports whether it succeeded.
237 | // Timestamp formats are defined at http://yaml.org/type/timestamp.html
238 | func parseTimestamp(s string) (time.Time, bool) {
239 | // TODO write code to check all the formats supported by
240 | // http://yaml.org/type/timestamp.html instead of using time.Parse.
241 |
242 | // Quick check: all date formats start with YYYY-.
243 | i := 0
244 | for ; i < len(s); i++ {
245 | if c := s[i]; c < '0' || c > '9' {
246 | break
247 | }
248 | }
249 | if i != 4 || i == len(s) || s[i] != '-' {
250 | return time.Time{}, false
251 | }
252 | for _, format := range allowedTimestampFormats {
253 | if t, err := time.Parse(format, s); err == nil {
254 | return t, true
255 | }
256 | }
257 | return time.Time{}, false
258 | }
259 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/sorter.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "reflect"
5 | "unicode"
6 | )
7 |
8 | type keyList []reflect.Value
9 |
10 | func (l keyList) Len() int { return len(l) }
11 | func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
12 | func (l keyList) Less(i, j int) bool {
13 | a := l[i]
14 | b := l[j]
15 | ak := a.Kind()
16 | bk := b.Kind()
17 | for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
18 | a = a.Elem()
19 | ak = a.Kind()
20 | }
21 | for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
22 | b = b.Elem()
23 | bk = b.Kind()
24 | }
25 | af, aok := keyFloat(a)
26 | bf, bok := keyFloat(b)
27 | if aok && bok {
28 | if af != bf {
29 | return af < bf
30 | }
31 | if ak != bk {
32 | return ak < bk
33 | }
34 | return numLess(a, b)
35 | }
36 | if ak != reflect.String || bk != reflect.String {
37 | return ak < bk
38 | }
39 | ar, br := []rune(a.String()), []rune(b.String())
40 | for i := 0; i < len(ar) && i < len(br); i++ {
41 | if ar[i] == br[i] {
42 | continue
43 | }
44 | al := unicode.IsLetter(ar[i])
45 | bl := unicode.IsLetter(br[i])
46 | if al && bl {
47 | return ar[i] < br[i]
48 | }
49 | if al || bl {
50 | return bl
51 | }
52 | var ai, bi int
53 | var an, bn int64
54 | if ar[i] == '0' || br[i] == '0' {
55 | for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
56 | if ar[j] != '0' {
57 | an = 1
58 | bn = 1
59 | break
60 | }
61 | }
62 | }
63 | for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
64 | an = an*10 + int64(ar[ai]-'0')
65 | }
66 | for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
67 | bn = bn*10 + int64(br[bi]-'0')
68 | }
69 | if an != bn {
70 | return an < bn
71 | }
72 | if ai != bi {
73 | return ai < bi
74 | }
75 | return ar[i] < br[i]
76 | }
77 | return len(ar) < len(br)
78 | }
79 |
80 | // keyFloat returns a float value for v if it is a number/bool
81 | // and whether it is a number/bool or not.
82 | func keyFloat(v reflect.Value) (f float64, ok bool) {
83 | switch v.Kind() {
84 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
85 | return float64(v.Int()), true
86 | case reflect.Float32, reflect.Float64:
87 | return v.Float(), true
88 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
89 | return float64(v.Uint()), true
90 | case reflect.Bool:
91 | if v.Bool() {
92 | return 1, true
93 | }
94 | return 0, true
95 | }
96 | return 0, false
97 | }
98 |
99 | // numLess returns whether a < b.
100 | // a and b must necessarily have the same kind.
101 | func numLess(a, b reflect.Value) bool {
102 | switch a.Kind() {
103 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
104 | return a.Int() < b.Int()
105 | case reflect.Float32, reflect.Float64:
106 | return a.Float() < b.Float()
107 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
108 | return a.Uint() < b.Uint()
109 | case reflect.Bool:
110 | return !a.Bool() && b.Bool()
111 | }
112 | panic("not a number")
113 | }
114 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/writerc.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | // Set the writer error and return false.
4 | func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
5 | emitter.error = yaml_WRITER_ERROR
6 | emitter.problem = problem
7 | return false
8 | }
9 |
10 | // Flush the output buffer.
11 | func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
12 | if emitter.write_handler == nil {
13 | panic("write handler not set")
14 | }
15 |
16 | // Check if the buffer is empty.
17 | if emitter.buffer_pos == 0 {
18 | return true
19 | }
20 |
21 | if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
22 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
23 | }
24 | emitter.buffer_pos = 0
25 | return true
26 | }
27 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/yaml.go:
--------------------------------------------------------------------------------
1 | // Package yaml implements YAML support for the Go language.
2 | //
3 | // Source code and other details for the project are available at GitHub:
4 | //
5 | // https://github.com/go-yaml/yaml
6 | //
7 | package yaml
8 |
9 | import (
10 | "errors"
11 | "fmt"
12 | "io"
13 | "reflect"
14 | "strings"
15 | "sync"
16 | )
17 |
18 | // MapSlice encodes and decodes as a YAML map.
19 | // The order of keys is preserved when encoding and decoding.
20 | type MapSlice []MapItem
21 |
22 | // MapItem is an item in a MapSlice.
23 | type MapItem struct {
24 | Key, Value interface{}
25 | }
26 |
27 | // The Unmarshaler interface may be implemented by types to customize their
28 | // behavior when being unmarshaled from a YAML document. The UnmarshalYAML
29 | // method receives a function that may be called to unmarshal the original
30 | // YAML value into a field or variable. It is safe to call the unmarshal
31 | // function parameter more than once if necessary.
32 | type Unmarshaler interface {
33 | UnmarshalYAML(unmarshal func(interface{}) error) error
34 | }
35 |
36 | // The Marshaler interface may be implemented by types to customize their
37 | // behavior when being marshaled into a YAML document. The returned value
38 | // is marshaled in place of the original value implementing Marshaler.
39 | //
40 | // If an error is returned by MarshalYAML, the marshaling procedure stops
41 | // and returns with the provided error.
42 | type Marshaler interface {
43 | MarshalYAML() (interface{}, error)
44 | }
45 |
46 | // Unmarshal decodes the first document found within the in byte slice
47 | // and assigns decoded values into the out value.
48 | //
49 | // Maps and pointers (to a struct, string, int, etc) are accepted as out
50 | // values. If an internal pointer within a struct is not initialized,
51 | // the yaml package will initialize it if necessary for unmarshalling
52 | // the provided data. The out parameter must not be nil.
53 | //
54 | // The type of the decoded values should be compatible with the respective
55 | // values in out. If one or more values cannot be decoded due to a type
56 | // mismatches, decoding continues partially until the end of the YAML
57 | // content, and a *yaml.TypeError is returned with details for all
58 | // missed values.
59 | //
60 | // Struct fields are only unmarshalled if they are exported (have an
61 | // upper case first letter), and are unmarshalled using the field name
62 | // lowercased as the default key. Custom keys may be defined via the
63 | // "yaml" name in the field tag: the content preceding the first comma
64 | // is used as the key, and the following comma-separated options are
65 | // used to tweak the marshalling process (see Marshal).
66 | // Conflicting names result in a runtime error.
67 | //
68 | // For example:
69 | //
70 | // type T struct {
71 | // F int `yaml:"a,omitempty"`
72 | // B int
73 | // }
74 | // var t T
75 | // yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
76 | //
77 | // See the documentation of Marshal for the format of tags and a list of
78 | // supported tag options.
79 | //
80 | func Unmarshal(in []byte, out interface{}) (err error) {
81 | return unmarshal(in, out, false)
82 | }
83 |
84 | // UnmarshalStrict is like Unmarshal except that any fields that are found
85 | // in the data that do not have corresponding struct members, or mapping
86 | // keys that are duplicates, will result in
87 | // an error.
88 | func UnmarshalStrict(in []byte, out interface{}) (err error) {
89 | return unmarshal(in, out, true)
90 | }
91 |
92 | // A Decorder reads and decodes YAML values from an input stream.
93 | type Decoder struct {
94 | strict bool
95 | parser *parser
96 | }
97 |
98 | // NewDecoder returns a new decoder that reads from r.
99 | //
100 | // The decoder introduces its own buffering and may read
101 | // data from r beyond the YAML values requested.
102 | func NewDecoder(r io.Reader) *Decoder {
103 | return &Decoder{
104 | parser: newParserFromReader(r),
105 | }
106 | }
107 |
108 | // SetStrict sets whether strict decoding behaviour is enabled when
109 | // decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
110 | func (dec *Decoder) SetStrict(strict bool) {
111 | dec.strict = strict
112 | }
113 |
114 | // Decode reads the next YAML-encoded value from its input
115 | // and stores it in the value pointed to by v.
116 | //
117 | // See the documentation for Unmarshal for details about the
118 | // conversion of YAML into a Go value.
119 | func (dec *Decoder) Decode(v interface{}) (err error) {
120 | d := newDecoder(dec.strict)
121 | defer handleErr(&err)
122 | node := dec.parser.parse()
123 | if node == nil {
124 | return io.EOF
125 | }
126 | out := reflect.ValueOf(v)
127 | if out.Kind() == reflect.Ptr && !out.IsNil() {
128 | out = out.Elem()
129 | }
130 | d.unmarshal(node, out)
131 | if len(d.terrors) > 0 {
132 | return &TypeError{d.terrors}
133 | }
134 | return nil
135 | }
136 |
137 | func unmarshal(in []byte, out interface{}, strict bool) (err error) {
138 | defer handleErr(&err)
139 | d := newDecoder(strict)
140 | p := newParser(in)
141 | defer p.destroy()
142 | node := p.parse()
143 | if node != nil {
144 | v := reflect.ValueOf(out)
145 | if v.Kind() == reflect.Ptr && !v.IsNil() {
146 | v = v.Elem()
147 | }
148 | d.unmarshal(node, v)
149 | }
150 | if len(d.terrors) > 0 {
151 | return &TypeError{d.terrors}
152 | }
153 | return nil
154 | }
155 |
156 | // Marshal serializes the value provided into a YAML document. The structure
157 | // of the generated document will reflect the structure of the value itself.
158 | // Maps and pointers (to struct, string, int, etc) are accepted as the in value.
159 | //
160 | // Struct fields are only marshalled if they are exported (have an upper case
161 | // first letter), and are marshalled using the field name lowercased as the
162 | // default key. Custom keys may be defined via the "yaml" name in the field
163 | // tag: the content preceding the first comma is used as the key, and the
164 | // following comma-separated options are used to tweak the marshalling process.
165 | // Conflicting names result in a runtime error.
166 | //
167 | // The field tag format accepted is:
168 | //
169 | // `(...) yaml:"[][,[,]]" (...)`
170 | //
171 | // The following flags are currently supported:
172 | //
173 | // omitempty Only include the field if it's not set to the zero
174 | // value for the type or to empty slices or maps.
175 | // Zero valued structs will be omitted if all their public
176 | // fields are zero, unless they implement an IsZero
177 | // method (see the IsZeroer interface type), in which
178 | // case the field will be included if that method returns true.
179 | //
180 | // flow Marshal using a flow style (useful for structs,
181 | // sequences and maps).
182 | //
183 | // inline Inline the field, which must be a struct or a map,
184 | // causing all of its fields or keys to be processed as if
185 | // they were part of the outer struct. For maps, keys must
186 | // not conflict with the yaml keys of other struct fields.
187 | //
188 | // In addition, if the key is "-", the field is ignored.
189 | //
190 | // For example:
191 | //
192 | // type T struct {
193 | // F int `yaml:"a,omitempty"`
194 | // B int
195 | // }
196 | // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
197 | // yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
198 | //
199 | func Marshal(in interface{}) (out []byte, err error) {
200 | defer handleErr(&err)
201 | e := newEncoder()
202 | defer e.destroy()
203 | e.marshalDoc("", reflect.ValueOf(in))
204 | e.finish()
205 | out = e.out
206 | return
207 | }
208 |
209 | // An Encoder writes YAML values to an output stream.
210 | type Encoder struct {
211 | encoder *encoder
212 | }
213 |
214 | // NewEncoder returns a new encoder that writes to w.
215 | // The Encoder should be closed after use to flush all data
216 | // to w.
217 | func NewEncoder(w io.Writer) *Encoder {
218 | return &Encoder{
219 | encoder: newEncoderWithWriter(w),
220 | }
221 | }
222 |
223 | // Encode writes the YAML encoding of v to the stream.
224 | // If multiple items are encoded to the stream, the
225 | // second and subsequent document will be preceded
226 | // with a "---" document separator, but the first will not.
227 | //
228 | // See the documentation for Marshal for details about the conversion of Go
229 | // values to YAML.
230 | func (e *Encoder) Encode(v interface{}) (err error) {
231 | defer handleErr(&err)
232 | e.encoder.marshalDoc("", reflect.ValueOf(v))
233 | return nil
234 | }
235 |
236 | // Close closes the encoder by writing any remaining data.
237 | // It does not write a stream terminating string "...".
238 | func (e *Encoder) Close() (err error) {
239 | defer handleErr(&err)
240 | e.encoder.finish()
241 | return nil
242 | }
243 |
244 | func handleErr(err *error) {
245 | if v := recover(); v != nil {
246 | if e, ok := v.(yamlError); ok {
247 | *err = e.err
248 | } else {
249 | panic(v)
250 | }
251 | }
252 | }
253 |
254 | type yamlError struct {
255 | err error
256 | }
257 |
258 | func fail(err error) {
259 | panic(yamlError{err})
260 | }
261 |
262 | func failf(format string, args ...interface{}) {
263 | panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
264 | }
265 |
266 | // A TypeError is returned by Unmarshal when one or more fields in
267 | // the YAML document cannot be properly decoded into the requested
268 | // types. When this error is returned, the value is still
269 | // unmarshaled partially.
270 | type TypeError struct {
271 | Errors []string
272 | }
273 |
274 | func (e *TypeError) Error() string {
275 | return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
276 | }
277 |
278 | // --------------------------------------------------------------------------
279 | // Maintain a mapping of keys to structure field indexes
280 |
281 | // The code in this section was copied from mgo/bson.
282 |
283 | // structInfo holds details for the serialization of fields of
284 | // a given struct.
285 | type structInfo struct {
286 | FieldsMap map[string]fieldInfo
287 | FieldsList []fieldInfo
288 |
289 | // InlineMap is the number of the field in the struct that
290 | // contains an ,inline map, or -1 if there's none.
291 | InlineMap int
292 | }
293 |
294 | type fieldInfo struct {
295 | Key string
296 | Num int
297 | OmitEmpty bool
298 | Flow bool
299 | // Id holds the unique field identifier, so we can cheaply
300 | // check for field duplicates without maintaining an extra map.
301 | Id int
302 |
303 | // Inline holds the field index if the field is part of an inlined struct.
304 | Inline []int
305 | }
306 |
307 | var structMap = make(map[reflect.Type]*structInfo)
308 | var fieldMapMutex sync.RWMutex
309 |
310 | func getStructInfo(st reflect.Type) (*structInfo, error) {
311 | fieldMapMutex.RLock()
312 | sinfo, found := structMap[st]
313 | fieldMapMutex.RUnlock()
314 | if found {
315 | return sinfo, nil
316 | }
317 |
318 | n := st.NumField()
319 | fieldsMap := make(map[string]fieldInfo)
320 | fieldsList := make([]fieldInfo, 0, n)
321 | inlineMap := -1
322 | for i := 0; i != n; i++ {
323 | field := st.Field(i)
324 | if field.PkgPath != "" && !field.Anonymous {
325 | continue // Private field
326 | }
327 |
328 | info := fieldInfo{Num: i}
329 |
330 | tag := field.Tag.Get("yaml")
331 | if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
332 | tag = string(field.Tag)
333 | }
334 | if tag == "-" {
335 | continue
336 | }
337 |
338 | inline := false
339 | fields := strings.Split(tag, ",")
340 | if len(fields) > 1 {
341 | for _, flag := range fields[1:] {
342 | switch flag {
343 | case "omitempty":
344 | info.OmitEmpty = true
345 | case "flow":
346 | info.Flow = true
347 | case "inline":
348 | inline = true
349 | default:
350 | return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
351 | }
352 | }
353 | tag = fields[0]
354 | }
355 |
356 | if inline {
357 | switch field.Type.Kind() {
358 | case reflect.Map:
359 | if inlineMap >= 0 {
360 | return nil, errors.New("Multiple ,inline maps in struct " + st.String())
361 | }
362 | if field.Type.Key() != reflect.TypeOf("") {
363 | return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
364 | }
365 | inlineMap = info.Num
366 | case reflect.Struct:
367 | sinfo, err := getStructInfo(field.Type)
368 | if err != nil {
369 | return nil, err
370 | }
371 | for _, finfo := range sinfo.FieldsList {
372 | if _, found := fieldsMap[finfo.Key]; found {
373 | msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
374 | return nil, errors.New(msg)
375 | }
376 | if finfo.Inline == nil {
377 | finfo.Inline = []int{i, finfo.Num}
378 | } else {
379 | finfo.Inline = append([]int{i}, finfo.Inline...)
380 | }
381 | finfo.Id = len(fieldsList)
382 | fieldsMap[finfo.Key] = finfo
383 | fieldsList = append(fieldsList, finfo)
384 | }
385 | default:
386 | //return nil, errors.New("Option ,inline needs a struct value or map field")
387 | return nil, errors.New("Option ,inline needs a struct value field")
388 | }
389 | continue
390 | }
391 |
392 | if tag != "" {
393 | info.Key = tag
394 | } else {
395 | info.Key = strings.ToLower(field.Name)
396 | }
397 |
398 | if _, found = fieldsMap[info.Key]; found {
399 | msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
400 | return nil, errors.New(msg)
401 | }
402 |
403 | info.Id = len(fieldsList)
404 | fieldsList = append(fieldsList, info)
405 | fieldsMap[info.Key] = info
406 | }
407 |
408 | sinfo = &structInfo{
409 | FieldsMap: fieldsMap,
410 | FieldsList: fieldsList,
411 | InlineMap: inlineMap,
412 | }
413 |
414 | fieldMapMutex.Lock()
415 | structMap[st] = sinfo
416 | fieldMapMutex.Unlock()
417 | return sinfo, nil
418 | }
419 |
420 | // IsZeroer is used to check whether an object is zero to
421 | // determine whether it should be omitted when marshaling
422 | // with the omitempty flag. One notable implementation
423 | // is time.Time.
424 | type IsZeroer interface {
425 | IsZero() bool
426 | }
427 |
428 | func isZero(v reflect.Value) bool {
429 | kind := v.Kind()
430 | if z, ok := v.Interface().(IsZeroer); ok {
431 | if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
432 | return true
433 | }
434 | return z.IsZero()
435 | }
436 | switch kind {
437 | case reflect.String:
438 | return len(v.String()) == 0
439 | case reflect.Interface, reflect.Ptr:
440 | return v.IsNil()
441 | case reflect.Slice:
442 | return v.Len() == 0
443 | case reflect.Map:
444 | return v.Len() == 0
445 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
446 | return v.Int() == 0
447 | case reflect.Float32, reflect.Float64:
448 | return v.Float() == 0
449 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
450 | return v.Uint() == 0
451 | case reflect.Bool:
452 | return !v.Bool()
453 | case reflect.Struct:
454 | vt := v.Type()
455 | for i := v.NumField() - 1; i >= 0; i-- {
456 | if vt.Field(i).PkgPath != "" {
457 | continue // Private field
458 | }
459 | if !isZero(v.Field(i)) {
460 | return false
461 | }
462 | }
463 | return true
464 | }
465 | return false
466 | }
467 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/yamlh.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | )
7 |
8 | // The version directive data.
9 | type yaml_version_directive_t struct {
10 | major int8 // The major version number.
11 | minor int8 // The minor version number.
12 | }
13 |
14 | // The tag directive data.
15 | type yaml_tag_directive_t struct {
16 | handle []byte // The tag handle.
17 | prefix []byte // The tag prefix.
18 | }
19 |
20 | type yaml_encoding_t int
21 |
22 | // The stream encoding.
23 | const (
24 | // Let the parser choose the encoding.
25 | yaml_ANY_ENCODING yaml_encoding_t = iota
26 |
27 | yaml_UTF8_ENCODING // The default UTF-8 encoding.
28 | yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
29 | yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
30 | )
31 |
32 | type yaml_break_t int
33 |
34 | // Line break types.
35 | const (
36 | // Let the parser choose the break type.
37 | yaml_ANY_BREAK yaml_break_t = iota
38 |
39 | yaml_CR_BREAK // Use CR for line breaks (Mac style).
40 | yaml_LN_BREAK // Use LN for line breaks (Unix style).
41 | yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
42 | )
43 |
44 | type yaml_error_type_t int
45 |
46 | // Many bad things could happen with the parser and emitter.
47 | const (
48 | // No error is produced.
49 | yaml_NO_ERROR yaml_error_type_t = iota
50 |
51 | yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
52 | yaml_READER_ERROR // Cannot read or decode the input stream.
53 | yaml_SCANNER_ERROR // Cannot scan the input stream.
54 | yaml_PARSER_ERROR // Cannot parse the input stream.
55 | yaml_COMPOSER_ERROR // Cannot compose a YAML document.
56 | yaml_WRITER_ERROR // Cannot write to the output stream.
57 | yaml_EMITTER_ERROR // Cannot emit a YAML stream.
58 | )
59 |
60 | // The pointer position.
61 | type yaml_mark_t struct {
62 | index int // The position index.
63 | line int // The position line.
64 | column int // The position column.
65 | }
66 |
67 | // Node Styles
68 |
69 | type yaml_style_t int8
70 |
71 | type yaml_scalar_style_t yaml_style_t
72 |
73 | // Scalar styles.
74 | const (
75 | // Let the emitter choose the style.
76 | yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
77 |
78 | yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
79 | yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
80 | yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
81 | yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
82 | yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
83 | )
84 |
85 | type yaml_sequence_style_t yaml_style_t
86 |
87 | // Sequence styles.
88 | const (
89 | // Let the emitter choose the style.
90 | yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
91 |
92 | yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
93 | yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
94 | )
95 |
96 | type yaml_mapping_style_t yaml_style_t
97 |
98 | // Mapping styles.
99 | const (
100 | // Let the emitter choose the style.
101 | yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
102 |
103 | yaml_BLOCK_MAPPING_STYLE // The block mapping style.
104 | yaml_FLOW_MAPPING_STYLE // The flow mapping style.
105 | )
106 |
107 | // Tokens
108 |
109 | type yaml_token_type_t int
110 |
111 | // Token types.
112 | const (
113 | // An empty token.
114 | yaml_NO_TOKEN yaml_token_type_t = iota
115 |
116 | yaml_STREAM_START_TOKEN // A STREAM-START token.
117 | yaml_STREAM_END_TOKEN // A STREAM-END token.
118 |
119 | yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
120 | yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
121 | yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
122 | yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
123 |
124 | yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
125 | yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
126 | yaml_BLOCK_END_TOKEN // A BLOCK-END token.
127 |
128 | yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
129 | yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
130 | yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
131 | yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
132 |
133 | yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
134 | yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
135 | yaml_KEY_TOKEN // A KEY token.
136 | yaml_VALUE_TOKEN // A VALUE token.
137 |
138 | yaml_ALIAS_TOKEN // An ALIAS token.
139 | yaml_ANCHOR_TOKEN // An ANCHOR token.
140 | yaml_TAG_TOKEN // A TAG token.
141 | yaml_SCALAR_TOKEN // A SCALAR token.
142 | )
143 |
144 | func (tt yaml_token_type_t) String() string {
145 | switch tt {
146 | case yaml_NO_TOKEN:
147 | return "yaml_NO_TOKEN"
148 | case yaml_STREAM_START_TOKEN:
149 | return "yaml_STREAM_START_TOKEN"
150 | case yaml_STREAM_END_TOKEN:
151 | return "yaml_STREAM_END_TOKEN"
152 | case yaml_VERSION_DIRECTIVE_TOKEN:
153 | return "yaml_VERSION_DIRECTIVE_TOKEN"
154 | case yaml_TAG_DIRECTIVE_TOKEN:
155 | return "yaml_TAG_DIRECTIVE_TOKEN"
156 | case yaml_DOCUMENT_START_TOKEN:
157 | return "yaml_DOCUMENT_START_TOKEN"
158 | case yaml_DOCUMENT_END_TOKEN:
159 | return "yaml_DOCUMENT_END_TOKEN"
160 | case yaml_BLOCK_SEQUENCE_START_TOKEN:
161 | return "yaml_BLOCK_SEQUENCE_START_TOKEN"
162 | case yaml_BLOCK_MAPPING_START_TOKEN:
163 | return "yaml_BLOCK_MAPPING_START_TOKEN"
164 | case yaml_BLOCK_END_TOKEN:
165 | return "yaml_BLOCK_END_TOKEN"
166 | case yaml_FLOW_SEQUENCE_START_TOKEN:
167 | return "yaml_FLOW_SEQUENCE_START_TOKEN"
168 | case yaml_FLOW_SEQUENCE_END_TOKEN:
169 | return "yaml_FLOW_SEQUENCE_END_TOKEN"
170 | case yaml_FLOW_MAPPING_START_TOKEN:
171 | return "yaml_FLOW_MAPPING_START_TOKEN"
172 | case yaml_FLOW_MAPPING_END_TOKEN:
173 | return "yaml_FLOW_MAPPING_END_TOKEN"
174 | case yaml_BLOCK_ENTRY_TOKEN:
175 | return "yaml_BLOCK_ENTRY_TOKEN"
176 | case yaml_FLOW_ENTRY_TOKEN:
177 | return "yaml_FLOW_ENTRY_TOKEN"
178 | case yaml_KEY_TOKEN:
179 | return "yaml_KEY_TOKEN"
180 | case yaml_VALUE_TOKEN:
181 | return "yaml_VALUE_TOKEN"
182 | case yaml_ALIAS_TOKEN:
183 | return "yaml_ALIAS_TOKEN"
184 | case yaml_ANCHOR_TOKEN:
185 | return "yaml_ANCHOR_TOKEN"
186 | case yaml_TAG_TOKEN:
187 | return "yaml_TAG_TOKEN"
188 | case yaml_SCALAR_TOKEN:
189 | return "yaml_SCALAR_TOKEN"
190 | }
191 | return ""
192 | }
193 |
194 | // The token structure.
195 | type yaml_token_t struct {
196 | // The token type.
197 | typ yaml_token_type_t
198 |
199 | // The start/end of the token.
200 | start_mark, end_mark yaml_mark_t
201 |
202 | // The stream encoding (for yaml_STREAM_START_TOKEN).
203 | encoding yaml_encoding_t
204 |
205 | // The alias/anchor/scalar value or tag/tag directive handle
206 | // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
207 | value []byte
208 |
209 | // The tag suffix (for yaml_TAG_TOKEN).
210 | suffix []byte
211 |
212 | // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
213 | prefix []byte
214 |
215 | // The scalar style (for yaml_SCALAR_TOKEN).
216 | style yaml_scalar_style_t
217 |
218 | // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
219 | major, minor int8
220 | }
221 |
222 | // Events
223 |
224 | type yaml_event_type_t int8
225 |
226 | // Event types.
227 | const (
228 | // An empty event.
229 | yaml_NO_EVENT yaml_event_type_t = iota
230 |
231 | yaml_STREAM_START_EVENT // A STREAM-START event.
232 | yaml_STREAM_END_EVENT // A STREAM-END event.
233 | yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
234 | yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
235 | yaml_ALIAS_EVENT // An ALIAS event.
236 | yaml_SCALAR_EVENT // A SCALAR event.
237 | yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
238 | yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
239 | yaml_MAPPING_START_EVENT // A MAPPING-START event.
240 | yaml_MAPPING_END_EVENT // A MAPPING-END event.
241 | )
242 |
243 | var eventStrings = []string{
244 | yaml_NO_EVENT: "none",
245 | yaml_STREAM_START_EVENT: "stream start",
246 | yaml_STREAM_END_EVENT: "stream end",
247 | yaml_DOCUMENT_START_EVENT: "document start",
248 | yaml_DOCUMENT_END_EVENT: "document end",
249 | yaml_ALIAS_EVENT: "alias",
250 | yaml_SCALAR_EVENT: "scalar",
251 | yaml_SEQUENCE_START_EVENT: "sequence start",
252 | yaml_SEQUENCE_END_EVENT: "sequence end",
253 | yaml_MAPPING_START_EVENT: "mapping start",
254 | yaml_MAPPING_END_EVENT: "mapping end",
255 | }
256 |
257 | func (e yaml_event_type_t) String() string {
258 | if e < 0 || int(e) >= len(eventStrings) {
259 | return fmt.Sprintf("unknown event %d", e)
260 | }
261 | return eventStrings[e]
262 | }
263 |
264 | // The event structure.
265 | type yaml_event_t struct {
266 |
267 | // The event type.
268 | typ yaml_event_type_t
269 |
270 | // The start and end of the event.
271 | start_mark, end_mark yaml_mark_t
272 |
273 | // The document encoding (for yaml_STREAM_START_EVENT).
274 | encoding yaml_encoding_t
275 |
276 | // The version directive (for yaml_DOCUMENT_START_EVENT).
277 | version_directive *yaml_version_directive_t
278 |
279 | // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
280 | tag_directives []yaml_tag_directive_t
281 |
282 | // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
283 | anchor []byte
284 |
285 | // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
286 | tag []byte
287 |
288 | // The scalar value (for yaml_SCALAR_EVENT).
289 | value []byte
290 |
291 | // Is the document start/end indicator implicit, or the tag optional?
292 | // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
293 | implicit bool
294 |
295 | // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
296 | quoted_implicit bool
297 |
298 | // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
299 | style yaml_style_t
300 | }
301 |
302 | func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
303 | func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
304 | func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
305 |
306 | // Nodes
307 |
308 | const (
309 | yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
310 | yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
311 | yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
312 | yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
313 | yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
314 | yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
315 |
316 | yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
317 | yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
318 |
319 | // Not in original libyaml.
320 | yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
321 | yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
322 |
323 | yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
324 | yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
325 | yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
326 | )
327 |
328 | type yaml_node_type_t int
329 |
330 | // Node types.
331 | const (
332 | // An empty node.
333 | yaml_NO_NODE yaml_node_type_t = iota
334 |
335 | yaml_SCALAR_NODE // A scalar node.
336 | yaml_SEQUENCE_NODE // A sequence node.
337 | yaml_MAPPING_NODE // A mapping node.
338 | )
339 |
340 | // An element of a sequence node.
341 | type yaml_node_item_t int
342 |
343 | // An element of a mapping node.
344 | type yaml_node_pair_t struct {
345 | key int // The key of the element.
346 | value int // The value of the element.
347 | }
348 |
349 | // The node structure.
350 | type yaml_node_t struct {
351 | typ yaml_node_type_t // The node type.
352 | tag []byte // The node tag.
353 |
354 | // The node data.
355 |
356 | // The scalar parameters (for yaml_SCALAR_NODE).
357 | scalar struct {
358 | value []byte // The scalar value.
359 | length int // The length of the scalar value.
360 | style yaml_scalar_style_t // The scalar style.
361 | }
362 |
363 | // The sequence parameters (for YAML_SEQUENCE_NODE).
364 | sequence struct {
365 | items_data []yaml_node_item_t // The stack of sequence items.
366 | style yaml_sequence_style_t // The sequence style.
367 | }
368 |
369 | // The mapping parameters (for yaml_MAPPING_NODE).
370 | mapping struct {
371 | pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
372 | pairs_start *yaml_node_pair_t // The beginning of the stack.
373 | pairs_end *yaml_node_pair_t // The end of the stack.
374 | pairs_top *yaml_node_pair_t // The top of the stack.
375 | style yaml_mapping_style_t // The mapping style.
376 | }
377 |
378 | start_mark yaml_mark_t // The beginning of the node.
379 | end_mark yaml_mark_t // The end of the node.
380 |
381 | }
382 |
383 | // The document structure.
384 | type yaml_document_t struct {
385 |
386 | // The document nodes.
387 | nodes []yaml_node_t
388 |
389 | // The version directive.
390 | version_directive *yaml_version_directive_t
391 |
392 | // The list of tag directives.
393 | tag_directives_data []yaml_tag_directive_t
394 | tag_directives_start int // The beginning of the tag directives list.
395 | tag_directives_end int // The end of the tag directives list.
396 |
397 | start_implicit int // Is the document start indicator implicit?
398 | end_implicit int // Is the document end indicator implicit?
399 |
400 | // The start/end of the document.
401 | start_mark, end_mark yaml_mark_t
402 | }
403 |
404 | // The prototype of a read handler.
405 | //
406 | // The read handler is called when the parser needs to read more bytes from the
407 | // source. The handler should write not more than size bytes to the buffer.
408 | // The number of written bytes should be set to the size_read variable.
409 | //
410 | // [in,out] data A pointer to an application data specified by
411 | // yaml_parser_set_input().
412 | // [out] buffer The buffer to write the data from the source.
413 | // [in] size The size of the buffer.
414 | // [out] size_read The actual number of bytes read from the source.
415 | //
416 | // On success, the handler should return 1. If the handler failed,
417 | // the returned value should be 0. On EOF, the handler should set the
418 | // size_read to 0 and return 1.
419 | type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
420 |
421 | // This structure holds information about a potential simple key.
422 | type yaml_simple_key_t struct {
423 | possible bool // Is a simple key possible?
424 | required bool // Is a simple key required?
425 | token_number int // The number of the token.
426 | mark yaml_mark_t // The position mark.
427 | }
428 |
429 | // The states of the parser.
430 | type yaml_parser_state_t int
431 |
432 | const (
433 | yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
434 |
435 | yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
436 | yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
437 | yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
438 | yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
439 | yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
440 | yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
441 | yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
442 | yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
443 | yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
444 | yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
445 | yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
446 | yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
447 | yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
448 | yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
449 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
450 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
451 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
452 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
453 | yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
454 | yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
455 | yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
456 | yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
457 | yaml_PARSE_END_STATE // Expect nothing.
458 | )
459 |
460 | func (ps yaml_parser_state_t) String() string {
461 | switch ps {
462 | case yaml_PARSE_STREAM_START_STATE:
463 | return "yaml_PARSE_STREAM_START_STATE"
464 | case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
465 | return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
466 | case yaml_PARSE_DOCUMENT_START_STATE:
467 | return "yaml_PARSE_DOCUMENT_START_STATE"
468 | case yaml_PARSE_DOCUMENT_CONTENT_STATE:
469 | return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
470 | case yaml_PARSE_DOCUMENT_END_STATE:
471 | return "yaml_PARSE_DOCUMENT_END_STATE"
472 | case yaml_PARSE_BLOCK_NODE_STATE:
473 | return "yaml_PARSE_BLOCK_NODE_STATE"
474 | case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
475 | return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
476 | case yaml_PARSE_FLOW_NODE_STATE:
477 | return "yaml_PARSE_FLOW_NODE_STATE"
478 | case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
479 | return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
480 | case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
481 | return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
482 | case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
483 | return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
484 | case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
485 | return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
486 | case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
487 | return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
488 | case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
489 | return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
490 | case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
491 | return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
492 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
493 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
494 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
495 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
496 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
497 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
498 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
499 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
500 | case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
501 | return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
502 | case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
503 | return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
504 | case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
505 | return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
506 | case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
507 | return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
508 | case yaml_PARSE_END_STATE:
509 | return "yaml_PARSE_END_STATE"
510 | }
511 | return ""
512 | }
513 |
514 | // This structure holds aliases data.
515 | type yaml_alias_data_t struct {
516 | anchor []byte // The anchor.
517 | index int // The node id.
518 | mark yaml_mark_t // The anchor mark.
519 | }
520 |
521 | // The parser structure.
522 | //
523 | // All members are internal. Manage the structure using the
524 | // yaml_parser_ family of functions.
525 | type yaml_parser_t struct {
526 |
527 | // Error handling
528 |
529 | error yaml_error_type_t // Error type.
530 |
531 | problem string // Error description.
532 |
533 | // The byte about which the problem occurred.
534 | problem_offset int
535 | problem_value int
536 | problem_mark yaml_mark_t
537 |
538 | // The error context.
539 | context string
540 | context_mark yaml_mark_t
541 |
542 | // Reader stuff
543 |
544 | read_handler yaml_read_handler_t // Read handler.
545 |
546 | input_reader io.Reader // File input data.
547 | input []byte // String input data.
548 | input_pos int
549 |
550 | eof bool // EOF flag
551 |
552 | buffer []byte // The working buffer.
553 | buffer_pos int // The current position of the buffer.
554 |
555 | unread int // The number of unread characters in the buffer.
556 |
557 | raw_buffer []byte // The raw buffer.
558 | raw_buffer_pos int // The current position of the buffer.
559 |
560 | encoding yaml_encoding_t // The input encoding.
561 |
562 | offset int // The offset of the current position (in bytes).
563 | mark yaml_mark_t // The mark of the current position.
564 |
565 | // Scanner stuff
566 |
567 | stream_start_produced bool // Have we started to scan the input stream?
568 | stream_end_produced bool // Have we reached the end of the input stream?
569 |
570 | flow_level int // The number of unclosed '[' and '{' indicators.
571 |
572 | tokens []yaml_token_t // The tokens queue.
573 | tokens_head int // The head of the tokens queue.
574 | tokens_parsed int // The number of tokens fetched from the queue.
575 | token_available bool // Does the tokens queue contain a token ready for dequeueing.
576 |
577 | indent int // The current indentation level.
578 | indents []int // The indentation levels stack.
579 |
580 | simple_key_allowed bool // May a simple key occur at the current position?
581 | simple_keys []yaml_simple_key_t // The stack of simple keys.
582 |
583 | // Parser stuff
584 |
585 | state yaml_parser_state_t // The current parser state.
586 | states []yaml_parser_state_t // The parser states stack.
587 | marks []yaml_mark_t // The stack of marks.
588 | tag_directives []yaml_tag_directive_t // The list of TAG directives.
589 |
590 | // Dumper stuff
591 |
592 | aliases []yaml_alias_data_t // The alias data.
593 |
594 | document *yaml_document_t // The currently parsed document.
595 | }
596 |
597 | // Emitter Definitions
598 |
599 | // The prototype of a write handler.
600 | //
601 | // The write handler is called when the emitter needs to flush the accumulated
602 | // characters to the output. The handler should write @a size bytes of the
603 | // @a buffer to the output.
604 | //
605 | // @param[in,out] data A pointer to an application data specified by
606 | // yaml_emitter_set_output().
607 | // @param[in] buffer The buffer with bytes to be written.
608 | // @param[in] size The size of the buffer.
609 | //
610 | // @returns On success, the handler should return @c 1. If the handler failed,
611 | // the returned value should be @c 0.
612 | //
613 | type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
614 |
615 | type yaml_emitter_state_t int
616 |
617 | // The emitter states.
618 | const (
619 | // Expect STREAM-START.
620 | yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
621 |
622 | yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
623 | yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
624 | yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
625 | yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
626 | yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
627 | yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
628 | yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
629 | yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
630 | yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
631 | yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
632 | yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
633 | yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
634 | yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
635 | yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
636 | yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
637 | yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
638 | yaml_EMIT_END_STATE // Expect nothing.
639 | )
640 |
641 | // The emitter structure.
642 | //
643 | // All members are internal. Manage the structure using the @c yaml_emitter_
644 | // family of functions.
645 | type yaml_emitter_t struct {
646 |
647 | // Error handling
648 |
649 | error yaml_error_type_t // Error type.
650 | problem string // Error description.
651 |
652 | // Writer stuff
653 |
654 | write_handler yaml_write_handler_t // Write handler.
655 |
656 | output_buffer *[]byte // String output data.
657 | output_writer io.Writer // File output data.
658 |
659 | buffer []byte // The working buffer.
660 | buffer_pos int // The current position of the buffer.
661 |
662 | raw_buffer []byte // The raw buffer.
663 | raw_buffer_pos int // The current position of the buffer.
664 |
665 | encoding yaml_encoding_t // The stream encoding.
666 |
667 | // Emitter stuff
668 |
669 | canonical bool // If the output is in the canonical style?
670 | best_indent int // The number of indentation spaces.
671 | best_width int // The preferred width of the output lines.
672 | unicode bool // Allow unescaped non-ASCII characters?
673 | line_break yaml_break_t // The preferred line break.
674 |
675 | state yaml_emitter_state_t // The current emitter state.
676 | states []yaml_emitter_state_t // The stack of states.
677 |
678 | events []yaml_event_t // The event queue.
679 | events_head int // The head of the event queue.
680 |
681 | indents []int // The stack of indentation levels.
682 |
683 | tag_directives []yaml_tag_directive_t // The list of tag directives.
684 |
685 | indent int // The current indentation level.
686 |
687 | flow_level int // The current flow level.
688 |
689 | root_context bool // Is it the document root context?
690 | sequence_context bool // Is it a sequence context?
691 | mapping_context bool // Is it a mapping context?
692 | simple_key_context bool // Is it a simple mapping key context?
693 |
694 | line int // The current line.
695 | column int // The current column.
696 | whitespace bool // If the last character was a whitespace?
697 | indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
698 | open_ended bool // If an explicit document end is required?
699 |
700 | // Anchor analysis.
701 | anchor_data struct {
702 | anchor []byte // The anchor value.
703 | alias bool // Is it an alias?
704 | }
705 |
706 | // Tag analysis.
707 | tag_data struct {
708 | handle []byte // The tag handle.
709 | suffix []byte // The tag suffix.
710 | }
711 |
712 | // Scalar analysis.
713 | scalar_data struct {
714 | value []byte // The scalar value.
715 | multiline bool // Does the scalar contain line breaks?
716 | flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
717 | block_plain_allowed bool // Can the scalar be expressed in the block plain style?
718 | single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
719 | block_allowed bool // Can the scalar be expressed in the literal or folded styles?
720 | style yaml_scalar_style_t // The output style.
721 | }
722 |
723 | // Dumper stuff
724 |
725 | opened bool // If the stream was already opened?
726 | closed bool // If the stream was already closed?
727 |
728 | // The information associated with the document nodes.
729 | anchors *struct {
730 | references int // The number of references.
731 | anchor int // The anchor id.
732 | serialized bool // If the node has been emitted?
733 | }
734 |
735 | last_anchor_id int // The last assigned anchor id.
736 |
737 | document *yaml_document_t // The currently emitted document.
738 | }
739 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/yamlprivateh.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | const (
4 | // The size of the input raw buffer.
5 | input_raw_buffer_size = 512
6 |
7 | // The size of the input buffer.
8 | // It should be possible to decode the whole raw buffer.
9 | input_buffer_size = input_raw_buffer_size * 3
10 |
11 | // The size of the output buffer.
12 | output_buffer_size = 128
13 |
14 | // The size of the output raw buffer.
15 | // It should be possible to encode the whole output buffer.
16 | output_raw_buffer_size = (output_buffer_size*2 + 2)
17 |
18 | // The size of other stacks and queues.
19 | initial_stack_size = 16
20 | initial_queue_size = 16
21 | initial_string_size = 16
22 | )
23 |
24 | // Check if the character at the specified position is an alphabetical
25 | // character, a digit, '_', or '-'.
26 | func is_alpha(b []byte, i int) bool {
27 | return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
28 | }
29 |
30 | // Check if the character at the specified position is a digit.
31 | func is_digit(b []byte, i int) bool {
32 | return b[i] >= '0' && b[i] <= '9'
33 | }
34 |
35 | // Get the value of a digit.
36 | func as_digit(b []byte, i int) int {
37 | return int(b[i]) - '0'
38 | }
39 |
40 | // Check if the character at the specified position is a hex-digit.
41 | func is_hex(b []byte, i int) bool {
42 | return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
43 | }
44 |
45 | // Get the value of a hex-digit.
46 | func as_hex(b []byte, i int) int {
47 | bi := b[i]
48 | if bi >= 'A' && bi <= 'F' {
49 | return int(bi) - 'A' + 10
50 | }
51 | if bi >= 'a' && bi <= 'f' {
52 | return int(bi) - 'a' + 10
53 | }
54 | return int(bi) - '0'
55 | }
56 |
57 | // Check if the character is ASCII.
58 | func is_ascii(b []byte, i int) bool {
59 | return b[i] <= 0x7F
60 | }
61 |
62 | // Check if the character at the start of the buffer can be printed unescaped.
63 | func is_printable(b []byte, i int) bool {
64 | return ((b[i] == 0x0A) || // . == #x0A
65 | (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
66 | (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
67 | (b[i] > 0xC2 && b[i] < 0xED) ||
68 | (b[i] == 0xED && b[i+1] < 0xA0) ||
69 | (b[i] == 0xEE) ||
70 | (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
71 | !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
72 | !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
73 | }
74 |
75 | // Check if the character at the specified position is NUL.
76 | func is_z(b []byte, i int) bool {
77 | return b[i] == 0x00
78 | }
79 |
80 | // Check if the beginning of the buffer is a BOM.
81 | func is_bom(b []byte, i int) bool {
82 | return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
83 | }
84 |
85 | // Check if the character at the specified position is space.
86 | func is_space(b []byte, i int) bool {
87 | return b[i] == ' '
88 | }
89 |
90 | // Check if the character at the specified position is tab.
91 | func is_tab(b []byte, i int) bool {
92 | return b[i] == '\t'
93 | }
94 |
95 | // Check if the character at the specified position is blank (space or tab).
96 | func is_blank(b []byte, i int) bool {
97 | //return is_space(b, i) || is_tab(b, i)
98 | return b[i] == ' ' || b[i] == '\t'
99 | }
100 |
101 | // Check if the character at the specified position is a line break.
102 | func is_break(b []byte, i int) bool {
103 | return (b[i] == '\r' || // CR (#xD)
104 | b[i] == '\n' || // LF (#xA)
105 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
106 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
107 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
108 | }
109 |
110 | func is_crlf(b []byte, i int) bool {
111 | return b[i] == '\r' && b[i+1] == '\n'
112 | }
113 |
114 | // Check if the character is a line break or NUL.
115 | func is_breakz(b []byte, i int) bool {
116 | //return is_break(b, i) || is_z(b, i)
117 | return ( // is_break:
118 | b[i] == '\r' || // CR (#xD)
119 | b[i] == '\n' || // LF (#xA)
120 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
121 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
122 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
123 | // is_z:
124 | b[i] == 0)
125 | }
126 |
127 | // Check if the character is a line break, space, or NUL.
128 | func is_spacez(b []byte, i int) bool {
129 | //return is_space(b, i) || is_breakz(b, i)
130 | return ( // is_space:
131 | b[i] == ' ' ||
132 | // is_breakz:
133 | b[i] == '\r' || // CR (#xD)
134 | b[i] == '\n' || // LF (#xA)
135 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
136 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
137 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
138 | b[i] == 0)
139 | }
140 |
141 | // Check if the character is a line break, space, tab, or NUL.
142 | func is_blankz(b []byte, i int) bool {
143 | //return is_blank(b, i) || is_breakz(b, i)
144 | return ( // is_blank:
145 | b[i] == ' ' || b[i] == '\t' ||
146 | // is_breakz:
147 | b[i] == '\r' || // CR (#xD)
148 | b[i] == '\n' || // LF (#xA)
149 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
150 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
151 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
152 | b[i] == 0)
153 | }
154 |
155 | // Determine the width of the character.
156 | func width(b byte) int {
157 | // Don't replace these by a switch without first
158 | // confirming that it is being inlined.
159 | if b&0x80 == 0x00 {
160 | return 1
161 | }
162 | if b&0xE0 == 0xC0 {
163 | return 2
164 | }
165 | if b&0xF0 == 0xE0 {
166 | return 3
167 | }
168 | if b&0xF8 == 0xF0 {
169 | return 4
170 | }
171 | return 0
172 |
173 | }
174 |
--------------------------------------------------------------------------------