├── README.md
├── mattertee
├── vendor
│ ├── gopkg.in
│ │ └── yaml.v2
│ │ │ ├── LICENSE
│ │ │ ├── writerc.go
│ │ │ ├── sorter.go
│ │ │ ├── yamlprivateh.go
│ │ │ ├── resolve.go
│ │ │ ├── encode.go
│ │ │ ├── yaml.go
│ │ │ ├── readerc.go
│ │ │ ├── decode.go
│ │ │ ├── apic.go
│ │ │ └── yamlh.go
│ ├── manifest
│ └── github.com
│ │ ├── gorilla
│ │ └── schema
│ │ │ ├── LICENSE
│ │ │ ├── converter.go
│ │ │ ├── doc.go
│ │ │ ├── cache.go
│ │ │ └── decoder.go
│ │ └── 42wim
│ │ └── matterbridge
│ │ └── matterhook
│ │ ├── matterhook.go
│ │ └── LICENSE
├── README.md
└── main.go
├── git-mattermost-hook
├── README.md
└── git-mattermost-hook
└── LICENSE
/README.md:
--------------------------------------------------------------------------------
1 | # mattertee #
2 |
3 | [mattertee](https://github.com/42wim/matterstuff/tree/master/mattertee) works like [tee](http://en.wikipedia.org/wiki/Tee_(command)) command (and inspired by [slacktee](https://github.com/course-hero/slacktee)
4 | Instead of writing the standard input to files, *mattertee* posts it to your mattermost installation
5 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2011-2016 Canonical Ltd.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/mattertee/vendor/manifest:
--------------------------------------------------------------------------------
1 | {
2 | "version": 0,
3 | "dependencies": [
4 | {
5 | "importpath": "github.com/42wim/matterbridge/matterhook",
6 | "repository": "https://github.com/42wim/matterbridge",
7 | "vcs": "git",
8 | "revision": "44144587a0314b7e2c719d279116ac86b657657e",
9 | "branch": "master",
10 | "path": "/matterhook",
11 | "notests": true
12 | },
13 | {
14 | "importpath": "github.com/gorilla/schema",
15 | "repository": "https://github.com/gorilla/schema",
16 | "vcs": "git",
17 | "revision": "0164a00ab4cd01d814d8cd5bf63fd9fcea30e23b",
18 | "branch": "master",
19 | "notests": true
20 | },
21 | {
22 | "importpath": "gopkg.in/yaml.v2",
23 | "repository": "https://gopkg.in/yaml.v2",
24 | "vcs": "git",
25 | "revision": "e4d366fc3c7938e2958e662b4258c7a89e1f0e3e",
26 | "branch": "master",
27 | "notests": true
28 | }
29 | ]
30 | }
--------------------------------------------------------------------------------
/mattertee/vendor/github.com/gorilla/schema/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are
5 | met:
6 |
7 | * Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 | * Redistributions in binary form must reproduce the above
10 | copyright notice, this list of conditions and the following disclaimer
11 | in the documentation and/or other materials provided with the
12 | distribution.
13 | * Neither the name of Google Inc. nor the names of its
14 | contributors may be used to endorse or promote products derived from
15 | this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/writerc.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | // Set the writer error and return false.
4 | func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
5 | emitter.error = yaml_WRITER_ERROR
6 | emitter.problem = problem
7 | return false
8 | }
9 |
10 | // Flush the output buffer.
11 | func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
12 | if emitter.write_handler == nil {
13 | panic("write handler not set")
14 | }
15 |
16 | // Check if the buffer is empty.
17 | if emitter.buffer_pos == 0 {
18 | return true
19 | }
20 |
21 | // If the output encoding is UTF-8, we don't need to recode the buffer.
22 | if emitter.encoding == yaml_UTF8_ENCODING {
23 | if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
24 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
25 | }
26 | emitter.buffer_pos = 0
27 | return true
28 | }
29 |
30 | // Recode the buffer into the raw buffer.
31 | var low, high int
32 | if emitter.encoding == yaml_UTF16LE_ENCODING {
33 | low, high = 0, 1
34 | } else {
35 | high, low = 1, 0
36 | }
37 |
38 | pos := 0
39 | for pos < emitter.buffer_pos {
40 | // See the "reader.c" code for more details on UTF-8 encoding. Note
41 | // that we assume that the buffer contains a valid UTF-8 sequence.
42 |
43 | // Read the next UTF-8 character.
44 | octet := emitter.buffer[pos]
45 |
46 | var w int
47 | var value rune
48 | switch {
49 | case octet&0x80 == 0x00:
50 | w, value = 1, rune(octet&0x7F)
51 | case octet&0xE0 == 0xC0:
52 | w, value = 2, rune(octet&0x1F)
53 | case octet&0xF0 == 0xE0:
54 | w, value = 3, rune(octet&0x0F)
55 | case octet&0xF8 == 0xF0:
56 | w, value = 4, rune(octet&0x07)
57 | }
58 | for k := 1; k < w; k++ {
59 | octet = emitter.buffer[pos+k]
60 | value = (value << 6) + (rune(octet) & 0x3F)
61 | }
62 | pos += w
63 |
64 | // Write the character.
65 | if value < 0x10000 {
66 | var b [2]byte
67 | b[high] = byte(value >> 8)
68 | b[low] = byte(value & 0xFF)
69 | emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
70 | } else {
71 | // Write the character using a surrogate pair (check "reader.c").
72 | var b [4]byte
73 | value -= 0x10000
74 | b[high] = byte(0xD8 + (value >> 18))
75 | b[low] = byte((value >> 10) & 0xFF)
76 | b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
77 | b[low+2] = byte(value & 0xFF)
78 | emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
79 | }
80 | }
81 |
82 | // Write the raw buffer.
83 | if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
84 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
85 | }
86 | emitter.buffer_pos = 0
87 | emitter.raw_buffer = emitter.raw_buffer[:0]
88 | return true
89 | }
90 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/sorter.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "reflect"
5 | "unicode"
6 | )
7 |
8 | type keyList []reflect.Value
9 |
10 | func (l keyList) Len() int { return len(l) }
11 | func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
12 | func (l keyList) Less(i, j int) bool {
13 | a := l[i]
14 | b := l[j]
15 | ak := a.Kind()
16 | bk := b.Kind()
17 | for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
18 | a = a.Elem()
19 | ak = a.Kind()
20 | }
21 | for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
22 | b = b.Elem()
23 | bk = b.Kind()
24 | }
25 | af, aok := keyFloat(a)
26 | bf, bok := keyFloat(b)
27 | if aok && bok {
28 | if af != bf {
29 | return af < bf
30 | }
31 | if ak != bk {
32 | return ak < bk
33 | }
34 | return numLess(a, b)
35 | }
36 | if ak != reflect.String || bk != reflect.String {
37 | return ak < bk
38 | }
39 | ar, br := []rune(a.String()), []rune(b.String())
40 | for i := 0; i < len(ar) && i < len(br); i++ {
41 | if ar[i] == br[i] {
42 | continue
43 | }
44 | al := unicode.IsLetter(ar[i])
45 | bl := unicode.IsLetter(br[i])
46 | if al && bl {
47 | return ar[i] < br[i]
48 | }
49 | if al || bl {
50 | return bl
51 | }
52 | var ai, bi int
53 | var an, bn int64
54 | for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
55 | an = an*10 + int64(ar[ai]-'0')
56 | }
57 | for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
58 | bn = bn*10 + int64(br[bi]-'0')
59 | }
60 | if an != bn {
61 | return an < bn
62 | }
63 | if ai != bi {
64 | return ai < bi
65 | }
66 | return ar[i] < br[i]
67 | }
68 | return len(ar) < len(br)
69 | }
70 |
71 | // keyFloat returns a float value for v if it is a number/bool
72 | // and whether it is a number/bool or not.
73 | func keyFloat(v reflect.Value) (f float64, ok bool) {
74 | switch v.Kind() {
75 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
76 | return float64(v.Int()), true
77 | case reflect.Float32, reflect.Float64:
78 | return v.Float(), true
79 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
80 | return float64(v.Uint()), true
81 | case reflect.Bool:
82 | if v.Bool() {
83 | return 1, true
84 | }
85 | return 0, true
86 | }
87 | return 0, false
88 | }
89 |
90 | // numLess returns whether a < b.
91 | // a and b must necessarily have the same kind.
92 | func numLess(a, b reflect.Value) bool {
93 | switch a.Kind() {
94 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
95 | return a.Int() < b.Int()
96 | case reflect.Float32, reflect.Float64:
97 | return a.Float() < b.Float()
98 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
99 | return a.Uint() < b.Uint()
100 | case reflect.Bool:
101 | return !a.Bool() && b.Bool()
102 | }
103 | panic("not a number")
104 | }
105 |
--------------------------------------------------------------------------------
/git-mattermost-hook/README.md:
--------------------------------------------------------------------------------
1 | # Git post-receive hook for mattermost
2 | (Based on https://github.com/chriseldredge/git-slack-hook)
3 |
4 | This is a bash script that posts a message into your [Mattermost](https://mattermost.org) channel when changes are pushed.
5 |
6 | Hook this script into `post-receive` for your git repositories.
7 |
8 | ## How to Install
9 | Requires [mattertee](https://github.com/42wim/matterstuff/tree/master/mattertee)
10 |
11 | Note: some git repositories may be "bare". You'll know if your repo is bare or not by checking for a `.git` folder where your repo lives.
12 |
13 | Download [git-mattermost-hook](https://raw.githubusercontent.com/42wim/matterstuff/master/git-mattermost-hook/git-mattermost-hook) onto the server which hosts your git repo.
14 |
15 | For bare repos, copy/rename it as `/path/to/your/repo/hooks/post-receive`.
16 |
17 | For normal/non-bare repos, copy/rename it as `/path/to/your/repo/.git/hooks/post-receive`.
18 |
19 | Finally, `chmod +x post-receive` to allow the script to be executed.
20 |
21 | ## Configuration
22 |
23 | Add an Incoming WebHooks integration in your mattermost
24 | Make note of the webhook URL.
25 |
26 | git config hooks.slack.webhook-url 'https://yourmattermost/hooks/key'
27 |
28 | Specify the path of [mattertee](https://github.com/42wim/matterstuff/tree/master/mattertee) binary
29 | git config hooks.slack.mattertee '/bin/mattertee'
30 |
31 | ## Optional
32 |
33 | git config hooks.slack.channel '#general'
34 |
35 | '#channelname' - post to channel
36 | 'groupname' - post to group
37 |
38 | Specifies a channel to post in Slack instead of the default.
39 |
40 | git config hooks.slack.username 'git'
41 |
42 | Specifies a username to post as. If not specified, the default name `incoming-webhook` will be used.
43 |
44 | git config hooks.slack.icon-url 'https://example.com/icon.png'
45 |
46 | Specifies an emoji icon to display in Slack instead of the default.
47 |
48 | git config hooks.slack.repo-nice-name 'My Awesome Repository'
49 |
50 | Specifies a repository nice name that will be shown in messages.
51 |
52 | git config hooks.slack.show-only-last-commit true
53 |
54 | Specifies whether you want to show only the last commit (or all) when pushing multiple commits.
55 |
56 | git config hooks.slack.branch-regexp regexp
57 |
58 | Specifies if you want to send only certain branches
59 |
60 | ## Linking to Changesets
61 |
62 | When the following parameters are set, revision hashes will be turned into links to a web view of your repository.
63 |
64 | git config hooks.slack.repos-root '/path/to/repos'
65 | git config hooks.slack.changeset-url-pattern 'http://yourserver/%repo_path%/changeset/%rev_hash%'
66 |
67 | For example, if your repository is in `/usr/local/repos/myrepo`, set repos_root to `/usr/local/repos/` and set `changeset_url_pattern` to `http://yourserver/%repo_path%/changeset/%rev_hash%` or whatever.
68 |
69 | Links can also be created that summarize a list of commits:
70 |
71 | git config hooks.slack.compare-url-pattern 'http://yourserver/%repo_path%/compare/%old_rev_hash%..%new_rev_hash%'
72 |
--------------------------------------------------------------------------------
/mattertee/README.md:
--------------------------------------------------------------------------------
1 | # mattertee #
2 |
3 | *mattertee* works like [tee](http://en.wikipedia.org/wiki/Tee_(command)) command (and inspired by [slacktee](https://github.com/course-hero/slacktee)
4 | Instead of writing the standard input to files, *mattertee* posts it to your mattermost installation
5 |
6 |
7 | ## binaries
8 | Binaries will be found [here] (https://github.com/42wim/matterstuff/releases/)
9 |
10 | ## building
11 | Make sure you have [Go](https://golang.org/doc/install) properly installed, including setting up your [GOPATH] (https://golang.org/doc/code.html#GOPATH)
12 |
13 | ```
14 | cd $GOPATH
15 | go get github.com/42wim/matterstuff/mattertee
16 | ```
17 |
18 | You should now have mattertee binary in the bin directory:
19 |
20 | ```
21 | $ ls bin/
22 | mattertee
23 | ```
24 |
25 | ## usage
26 |
27 | ### On the command line
28 |
29 | ```
30 | Usage of ./mattertee:
31 | -c string
32 | Post input values to specified channel or user.
33 | -i string
34 | This url is used as icon for posting.
35 | -l string
36 | Specify the language used for syntax highlighting (ruby/python/...).
37 | -m string
38 | Mattermost incoming webhooks URL.
39 | -n Post input values without buffering.
40 | -p Don't surround the post with triple backticks.
41 | -u string
42 | This username is used for posting. (default "mattertee")
43 | -x Add extra info (user/hostname/timestamp).
44 | ```
45 |
46 | ### Configuration files
47 |
48 | Mattertee will also read from configuration files in order. Later files can (partially) override earlier files.
49 |
50 | On Linux: /etc/mattertee.conf, $HOME/.mattertee.conf
51 |
52 | On Windows: $HOMEDIR/.mattertee.conf
53 |
54 | On all OSes, a file '.mattertee.conf' in the current directory will be read too.
55 |
56 | The files (if present) should be yaml-formatted; eg.:
57 |
58 | ```yaml
59 | username: thisisme
60 | channel: mychannel
61 | icon_url: http://..../image.png
62 | matter_url: http://mattermost.at.my.domain/hooks/hookid
63 | title: Some title
64 | language: ruby
65 | plain_text: true
66 | no_buffer: false
67 | extra: true
68 | ```
69 |
70 | Command line options will still override anything set in the configuration files.
71 |
72 | ### Using environment variables
73 |
74 | You can also set MM_HOOK containing your mattermost incoming webhook URL as an enviroment variable.
75 |
76 | ```
77 | export MM_HOOK=https://yourmattermost/hooks/webhookkey
78 | ```
79 |
80 | ### example
81 | ```
82 | uptime | mattertee -c off-topic -x -m https://yourmattermost/hooks/webhookkey
83 | ```
84 |
85 | or if you've set MM_HOOK environment variable:
86 |
87 | ```
88 | uptime | mattertee -c off-topic -x
89 | ```
90 |
91 | 
92 |
93 | ```
94 | cat test.rb |mattertee -c off-topic -l ruby
95 | ```
96 |
97 | 
98 |
99 | ## examples (taken from slacktee)
100 | If you'd like to post the output of `ls` command, you can do it like this.
101 |
102 | ```
103 | ls | mattertee
104 | ```
105 |
106 | To post the output of `tail -f` command line by line, use `-n` option.
107 |
108 | ```
109 | tail -f foobar.log | mattertee -n
110 | ```
111 |
112 | You can specify `channel`, `username`, `iconurl` too.
113 |
114 | ```
115 | ls | mattertee -c "general" -u "mattertee" -i "http://myimage.png"
116 | ```
117 |
118 | Of course, you can connect another command with pipe.
119 |
120 | ```
121 | ls | mattertee | email "ls" foo@example.com
122 | ```
123 |
--------------------------------------------------------------------------------
/mattertee/vendor/github.com/gorilla/schema/converter.go:
--------------------------------------------------------------------------------
1 | // Copyright 2012 The Gorilla Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | package schema
6 |
7 | import (
8 | "reflect"
9 | "strconv"
10 | )
11 |
12 | type Converter func(string) reflect.Value
13 |
14 | var (
15 | invalidValue = reflect.Value{}
16 | boolType = reflect.Bool
17 | float32Type = reflect.Float32
18 | float64Type = reflect.Float64
19 | intType = reflect.Int
20 | int8Type = reflect.Int8
21 | int16Type = reflect.Int16
22 | int32Type = reflect.Int32
23 | int64Type = reflect.Int64
24 | stringType = reflect.String
25 | uintType = reflect.Uint
26 | uint8Type = reflect.Uint8
27 | uint16Type = reflect.Uint16
28 | uint32Type = reflect.Uint32
29 | uint64Type = reflect.Uint64
30 | )
31 |
32 | // Default converters for basic types.
33 | var converters = map[reflect.Kind]Converter{
34 | boolType: convertBool,
35 | float32Type: convertFloat32,
36 | float64Type: convertFloat64,
37 | intType: convertInt,
38 | int8Type: convertInt8,
39 | int16Type: convertInt16,
40 | int32Type: convertInt32,
41 | int64Type: convertInt64,
42 | stringType: convertString,
43 | uintType: convertUint,
44 | uint8Type: convertUint8,
45 | uint16Type: convertUint16,
46 | uint32Type: convertUint32,
47 | uint64Type: convertUint64,
48 | }
49 |
50 | func convertBool(value string) reflect.Value {
51 | if value == "on" {
52 | return reflect.ValueOf(true)
53 | } else if v, err := strconv.ParseBool(value); err == nil {
54 | return reflect.ValueOf(v)
55 | }
56 | return invalidValue
57 | }
58 |
59 | func convertFloat32(value string) reflect.Value {
60 | if v, err := strconv.ParseFloat(value, 32); err == nil {
61 | return reflect.ValueOf(float32(v))
62 | }
63 | return invalidValue
64 | }
65 |
66 | func convertFloat64(value string) reflect.Value {
67 | if v, err := strconv.ParseFloat(value, 64); err == nil {
68 | return reflect.ValueOf(v)
69 | }
70 | return invalidValue
71 | }
72 |
73 | func convertInt(value string) reflect.Value {
74 | if v, err := strconv.ParseInt(value, 10, 0); err == nil {
75 | return reflect.ValueOf(int(v))
76 | }
77 | return invalidValue
78 | }
79 |
80 | func convertInt8(value string) reflect.Value {
81 | if v, err := strconv.ParseInt(value, 10, 8); err == nil {
82 | return reflect.ValueOf(int8(v))
83 | }
84 | return invalidValue
85 | }
86 |
87 | func convertInt16(value string) reflect.Value {
88 | if v, err := strconv.ParseInt(value, 10, 16); err == nil {
89 | return reflect.ValueOf(int16(v))
90 | }
91 | return invalidValue
92 | }
93 |
94 | func convertInt32(value string) reflect.Value {
95 | if v, err := strconv.ParseInt(value, 10, 32); err == nil {
96 | return reflect.ValueOf(int32(v))
97 | }
98 | return invalidValue
99 | }
100 |
101 | func convertInt64(value string) reflect.Value {
102 | if v, err := strconv.ParseInt(value, 10, 64); err == nil {
103 | return reflect.ValueOf(v)
104 | }
105 | return invalidValue
106 | }
107 |
108 | func convertString(value string) reflect.Value {
109 | return reflect.ValueOf(value)
110 | }
111 |
112 | func convertUint(value string) reflect.Value {
113 | if v, err := strconv.ParseUint(value, 10, 0); err == nil {
114 | return reflect.ValueOf(uint(v))
115 | }
116 | return invalidValue
117 | }
118 |
119 | func convertUint8(value string) reflect.Value {
120 | if v, err := strconv.ParseUint(value, 10, 8); err == nil {
121 | return reflect.ValueOf(uint8(v))
122 | }
123 | return invalidValue
124 | }
125 |
126 | func convertUint16(value string) reflect.Value {
127 | if v, err := strconv.ParseUint(value, 10, 16); err == nil {
128 | return reflect.ValueOf(uint16(v))
129 | }
130 | return invalidValue
131 | }
132 |
133 | func convertUint32(value string) reflect.Value {
134 | if v, err := strconv.ParseUint(value, 10, 32); err == nil {
135 | return reflect.ValueOf(uint32(v))
136 | }
137 | return invalidValue
138 | }
139 |
140 | func convertUint64(value string) reflect.Value {
141 | if v, err := strconv.ParseUint(value, 10, 64); err == nil {
142 | return reflect.ValueOf(v)
143 | }
144 | return invalidValue
145 | }
146 |
--------------------------------------------------------------------------------
/mattertee/vendor/github.com/gorilla/schema/doc.go:
--------------------------------------------------------------------------------
1 | // Copyright 2012 The Gorilla Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | /*
6 | Package gorilla/schema fills a struct with form values.
7 |
8 | The basic usage is really simple. Given this struct:
9 |
10 | type Person struct {
11 | Name string
12 | Phone string
13 | }
14 |
15 | ...we can fill it passing a map to the Decode() function:
16 |
17 | values := map[string][]string{
18 | "Name": {"John"},
19 | "Phone": {"999-999-999"},
20 | }
21 | person := new(Person)
22 | decoder := schema.NewDecoder()
23 | decoder.Decode(person, values)
24 |
25 | This is just a simple example and it doesn't make a lot of sense to create
26 | the map manually. Typically it will come from a http.Request object and
27 | will be of type url.Values: http.Request.Form or http.Request.MultipartForm:
28 |
29 | func MyHandler(w http.ResponseWriter, r *http.Request) {
30 | err := r.ParseForm()
31 |
32 | if err != nil {
33 | // Handle error
34 | }
35 |
36 | decoder := schema.NewDecoder()
37 | // r.PostForm is a map of our POST form values
38 | err := decoder.Decode(person, r.PostForm)
39 |
40 | if err != nil {
41 | // Handle error
42 | }
43 |
44 | // Do something with person.Name or person.Phone
45 | }
46 |
47 | Note: it is a good idea to set a Decoder instance as a package global,
48 | because it caches meta-data about structs, and a instance can be shared safely:
49 |
50 | var decoder = schema.NewDecoder()
51 |
52 | To define custom names for fields, use a struct tag "schema". To not populate
53 | certain fields, use a dash for the name and it will be ignored:
54 |
55 | type Person struct {
56 | Name string `schema:"name"` // custom name
57 | Phone string `schema:"phone"` // custom name
58 | Admin bool `schema:"-"` // this field is never set
59 | }
60 |
61 | The supported field types in the destination struct are:
62 |
63 | * bool
64 | * float variants (float32, float64)
65 | * int variants (int, int8, int16, int32, int64)
66 | * string
67 | * uint variants (uint, uint8, uint16, uint32, uint64)
68 | * struct
69 | * a pointer to one of the above types
70 | * a slice or a pointer to a slice of one of the above types
71 |
72 | Non-supported types are simply ignored, however custom types can be registered
73 | to be converted.
74 |
75 | To fill nested structs, keys must use a dotted notation as the "path" for the
76 | field. So for example, to fill the struct Person below:
77 |
78 | type Phone struct {
79 | Label string
80 | Number string
81 | }
82 |
83 | type Person struct {
84 | Name string
85 | Phone Phone
86 | }
87 |
88 | ...the source map must have the keys "Name", "Phone.Label" and "Phone.Number".
89 | This means that an HTML form to fill a Person struct must look like this:
90 |
91 |
96 |
97 | Single values are filled using the first value for a key from the source map.
98 | Slices are filled using all values for a key from the source map. So to fill
99 | a Person with multiple Phone values, like:
100 |
101 | type Person struct {
102 | Name string
103 | Phones []Phone
104 | }
105 |
106 | ...an HTML form that accepts three Phone values would look like this:
107 |
108 |
117 |
118 | Notice that only for slices of structs the slice index is required.
119 | This is needed for disambiguation: if the nested struct also had a slice
120 | field, we could not translate multiple values to it if we did not use an
121 | index for the parent struct.
122 |
123 | There's also the possibility to create a custom type that implements the
124 | TextUnmarshaler interface, and in this case there's no need to registry
125 | a converter, like:
126 |
127 | type Person struct {
128 | Emails []Email
129 | }
130 |
131 | type Email struct {
132 | *mail.Address
133 | }
134 |
135 | func (e *Email) UnmarshalText(text []byte) (err error) {
136 | e.Address, err = mail.ParseAddress(string(text))
137 | return
138 | }
139 |
140 | ...an HTML form that accepts three Email values would look like this:
141 |
142 |
147 | */
148 | package schema
149 |
--------------------------------------------------------------------------------
/mattertee/vendor/github.com/42wim/matterbridge/matterhook/matterhook.go:
--------------------------------------------------------------------------------
1 | //Package matterhook provides interaction with mattermost incoming/outgoing webhooks
2 | package matterhook
3 |
4 | import (
5 | "bytes"
6 | "crypto/tls"
7 | "encoding/json"
8 | "fmt"
9 | "github.com/gorilla/schema"
10 | "io"
11 | "io/ioutil"
12 | "log"
13 | "net"
14 | "net/http"
15 | )
16 |
17 | // OMessage for mattermost incoming webhook. (send to mattermost)
18 | type OMessage struct {
19 | Channel string `json:"channel,omitempty"`
20 | IconURL string `json:"icon_url,omitempty"`
21 | IconEmoji string `json:"icon_emoji,omitempty"`
22 | UserName string `json:"username,omitempty"`
23 | Text string `json:"text"`
24 | Attachments interface{} `json:"attachments,omitempty"`
25 | Type string `json:"type,omitempty"`
26 | }
27 |
28 | // IMessage for mattermost outgoing webhook. (received from mattermost)
29 | type IMessage struct {
30 | Token string `schema:"token"`
31 | TeamID string `schema:"team_id"`
32 | TeamDomain string `schema:"team_domain"`
33 | ChannelID string `schema:"channel_id"`
34 | ChannelName string `schema:"channel_name"`
35 | Timestamp string `schema:"timestamp"`
36 | UserID string `schema:"user_id"`
37 | UserName string `schema:"user_name"`
38 | PostId string `schema:"post_id"`
39 | Text string `schema:"text"`
40 | TriggerWord string `schema:"trigger_word"`
41 | }
42 |
43 | // Client for Mattermost.
44 | type Client struct {
45 | Url string // URL for incoming webhooks on mattermost.
46 | In chan IMessage
47 | Out chan OMessage
48 | httpclient *http.Client
49 | Config
50 | }
51 |
52 | // Config for client.
53 | type Config struct {
54 | BindAddress string // Address to listen on
55 | Token string // Only allow this token from Mattermost. (Allow everything when empty)
56 | InsecureSkipVerify bool // disable certificate checking
57 | DisableServer bool // Do not start server for outgoing webhooks from Mattermost.
58 | }
59 |
60 | // New Mattermost client.
61 | func New(url string, config Config) *Client {
62 | c := &Client{Url: url, In: make(chan IMessage), Out: make(chan OMessage), Config: config}
63 | tr := &http.Transport{
64 | TLSClientConfig: &tls.Config{InsecureSkipVerify: config.InsecureSkipVerify},
65 | }
66 | c.httpclient = &http.Client{Transport: tr}
67 | if !c.DisableServer {
68 | _, _, err := net.SplitHostPort(c.BindAddress)
69 | if err != nil {
70 | log.Fatalf("incorrect bindaddress %s", c.BindAddress)
71 | }
72 | go c.StartServer()
73 | }
74 | return c
75 | }
76 |
77 | // StartServer starts a webserver listening for incoming mattermost POSTS.
78 | func (c *Client) StartServer() {
79 | mux := http.NewServeMux()
80 | mux.Handle("/", c)
81 | log.Printf("Listening on http://%v...\n", c.BindAddress)
82 | if err := http.ListenAndServe(c.BindAddress, mux); err != nil {
83 | log.Fatal(err)
84 | }
85 | }
86 |
87 | // ServeHTTP implementation.
88 | func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) {
89 | if r.Method != "POST" {
90 | log.Println("invalid " + r.Method + " connection from " + r.RemoteAddr)
91 | http.NotFound(w, r)
92 | return
93 | }
94 | msg := IMessage{}
95 | err := r.ParseForm()
96 | if err != nil {
97 | log.Println(err)
98 | http.NotFound(w, r)
99 | return
100 | }
101 | defer r.Body.Close()
102 | decoder := schema.NewDecoder()
103 | err = decoder.Decode(&msg, r.PostForm)
104 | if err != nil {
105 | log.Println(err)
106 | http.NotFound(w, r)
107 | return
108 | }
109 | if msg.Token == "" {
110 | log.Println("no token from " + r.RemoteAddr)
111 | http.NotFound(w, r)
112 | return
113 | }
114 | if c.Token != "" {
115 | if msg.Token != c.Token {
116 | log.Println("invalid token " + msg.Token + " from " + r.RemoteAddr)
117 | http.NotFound(w, r)
118 | return
119 | }
120 | }
121 | c.In <- msg
122 | }
123 |
124 | // Receive returns an incoming message from mattermost outgoing webhooks URL.
125 | func (c *Client) Receive() IMessage {
126 | for {
127 | select {
128 | case msg := <-c.In:
129 | return msg
130 | }
131 | }
132 | }
133 |
134 | // Send sends a msg to mattermost incoming webhooks URL.
135 | func (c *Client) Send(msg OMessage) error {
136 | buf, err := json.Marshal(msg)
137 | if err != nil {
138 | return err
139 | }
140 | resp, err := c.httpclient.Post(c.Url, "application/json", bytes.NewReader(buf))
141 | if err != nil {
142 | return err
143 | }
144 | defer resp.Body.Close()
145 |
146 | // Read entire body to completion to re-use keep-alive connections.
147 | io.Copy(ioutil.Discard, resp.Body)
148 |
149 | if resp.StatusCode != 200 {
150 | return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
151 | }
152 | return nil
153 | }
154 |
--------------------------------------------------------------------------------
/mattertee/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "flag"
7 | "fmt"
8 | "github.com/42wim/matterbridge/matterhook"
9 | "gopkg.in/yaml.v2"
10 | "io"
11 | "io/ioutil"
12 | "os"
13 | "os/user"
14 | "path/filepath"
15 | "runtime"
16 | "time"
17 | )
18 |
19 | type config struct {
20 | Username, Channel, Title, Language string
21 | IconURL string `yaml:"icon_url"`
22 | MatterURL string `yaml:"matter_url"`
23 | PlainText bool `yaml:"plain_text"`
24 | NoBuffer bool `yaml:"no_buffer"`
25 | Extra bool
26 | }
27 |
28 | var cfg = config{
29 | Channel: "",
30 | IconURL: "",
31 | Language: "",
32 | MatterURL: "",
33 | Title: "",
34 | Username: "mattertee",
35 | Extra: false,
36 | NoBuffer: false,
37 | PlainText: false,
38 | }
39 |
40 | var flagVersion bool
41 | var version = "0.1.0"
42 |
43 | func init() {
44 | // Read configuration from files
45 | read_configurations()
46 |
47 | // Now override configuration with command line parameters
48 | flag.Usage = func() {
49 | fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
50 | flag.PrintDefaults()
51 | fmt.Fprintf(os.Stderr, "\n")
52 | fmt.Fprintf(os.Stderr, "The following configuration files will also be read:")
53 | fmt.Fprintf(os.Stderr, " (+ if file is present)\n")
54 | for _, file := range config_files() {
55 | if _, err := os.Stat(file); os.IsNotExist(err) {
56 | fmt.Fprintf(os.Stderr, " - %s\n", file)
57 | } else {
58 | fmt.Fprintf(os.Stderr, " + %s\n", file)
59 | }
60 | }
61 | }
62 | flag.StringVar(&cfg.Channel, "c", cfg.Channel, "Post input values to specified channel or user.")
63 | flag.StringVar(&cfg.IconURL, "i", cfg.IconURL, "This url is used as icon for posting.")
64 | flag.StringVar(&cfg.Language, "l", cfg.Language, "Specify the language used for syntax highlighting (ruby/python/...)")
65 | flag.StringVar(&cfg.MatterURL, "m", cfg.MatterURL, "Mattermost incoming webhooks URL.")
66 | flag.StringVar(&cfg.Title, "t", cfg.Title, "This title is added to posts. (not with -n)")
67 | flag.StringVar(&cfg.Username, "u", cfg.Username, "This username is used for posting.")
68 | flag.BoolVar(&cfg.Extra, "x", cfg.Extra, "Add extra info (user/hostname/timestamp).")
69 | flag.BoolVar(&cfg.NoBuffer, "n", cfg.NoBuffer, "Post input values without buffering.")
70 | flag.BoolVar(&cfg.PlainText, "p", cfg.PlainText, "Don't surround the post with triple backticks.")
71 | flag.BoolVar(&flagVersion, "version", false, "show version.")
72 |
73 | flag.Parse()
74 | }
75 |
76 | func config_files() []string {
77 | // config_files will list configuration files which will be read in order and can override
78 | // previous files
79 | config_files := []string{}
80 |
81 | if runtime.GOOS == "linux" {
82 | config_files = append(config_files, "/etc/mattertee.conf")
83 | }
84 |
85 | usr, err := user.Current()
86 | if err == nil {
87 | config_files = append(config_files, filepath.Join(usr.HomeDir, ".mattertee.conf"))
88 | }
89 |
90 | config_files = append(config_files, ".mattertee.conf")
91 |
92 | return config_files
93 | }
94 |
95 | func read_configurations() {
96 | config_files := config_files()
97 |
98 | for _, file := range config_files {
99 | err := read_configuration(file)
100 | if err != nil {
101 | // something went wrong - report (but don't fail)
102 | fmt.Fprintf(os.Stderr, "An error has occurred while reading configuration file '%s': %s\n", file, err)
103 | }
104 | }
105 | }
106 |
107 | func read_configuration(filename string) error {
108 | data, err := ioutil.ReadFile(filename)
109 | if err != nil {
110 | // File doesn't exist, so skip it
111 | return nil
112 | }
113 |
114 | err = yaml.Unmarshal([]byte(data), &cfg)
115 | if err != nil {
116 | return err
117 | }
118 |
119 | return nil
120 | }
121 |
122 | func md(text string) string {
123 | return "```" + cfg.Language + "\n" + text + "```"
124 | }
125 |
126 | func extraInfo() string {
127 | u, _ := user.Current()
128 | hname, _ := os.Hostname()
129 | return "\n" + u.Username + "@" + hname + " (_" + time.Now().Format(time.RFC3339) + "_)\n"
130 | }
131 |
132 | func main() {
133 | if flagVersion {
134 | fmt.Println("version:", version)
135 | return
136 | }
137 | url := os.Getenv("MM_HOOK")
138 | if cfg.MatterURL != "" {
139 | url = cfg.MatterURL
140 | }
141 | m := matterhook.New(url, matterhook.Config{DisableServer: true})
142 | msg := matterhook.OMessage{}
143 | msg.UserName = cfg.Username
144 | msg.Channel = cfg.Channel
145 | msg.IconURL = cfg.IconURL
146 | if cfg.NoBuffer {
147 | scanner := bufio.NewScanner(os.Stdin)
148 | for scanner.Scan() {
149 | line := scanner.Text()
150 | fmt.Println(line)
151 | msg.Text = md(line)
152 | if cfg.PlainText {
153 | msg.Text = line
154 | }
155 | m.Send(msg)
156 | }
157 | } else {
158 | buf := new(bytes.Buffer)
159 | io.Copy(buf, os.Stdin)
160 | text := buf.String()
161 | fmt.Print(text)
162 | msg.Text = md(text)
163 | if cfg.PlainText {
164 | msg.Text = text
165 | }
166 | if cfg.Extra {
167 | msg.Text += extraInfo()
168 | }
169 | if cfg.Title != "" {
170 | msg.Text = cfg.Title + "\n" + msg.Text
171 | }
172 | m.Send(msg)
173 | }
174 | }
175 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/yamlprivateh.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | const (
4 | // The size of the input raw buffer.
5 | input_raw_buffer_size = 512
6 |
7 | // The size of the input buffer.
8 | // It should be possible to decode the whole raw buffer.
9 | input_buffer_size = input_raw_buffer_size * 3
10 |
11 | // The size of the output buffer.
12 | output_buffer_size = 128
13 |
14 | // The size of the output raw buffer.
15 | // It should be possible to encode the whole output buffer.
16 | output_raw_buffer_size = (output_buffer_size*2 + 2)
17 |
18 | // The size of other stacks and queues.
19 | initial_stack_size = 16
20 | initial_queue_size = 16
21 | initial_string_size = 16
22 | )
23 |
24 | // Check if the character at the specified position is an alphabetical
25 | // character, a digit, '_', or '-'.
26 | func is_alpha(b []byte, i int) bool {
27 | return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
28 | }
29 |
30 | // Check if the character at the specified position is a digit.
31 | func is_digit(b []byte, i int) bool {
32 | return b[i] >= '0' && b[i] <= '9'
33 | }
34 |
35 | // Get the value of a digit.
36 | func as_digit(b []byte, i int) int {
37 | return int(b[i]) - '0'
38 | }
39 |
40 | // Check if the character at the specified position is a hex-digit.
41 | func is_hex(b []byte, i int) bool {
42 | return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
43 | }
44 |
45 | // Get the value of a hex-digit.
46 | func as_hex(b []byte, i int) int {
47 | bi := b[i]
48 | if bi >= 'A' && bi <= 'F' {
49 | return int(bi) - 'A' + 10
50 | }
51 | if bi >= 'a' && bi <= 'f' {
52 | return int(bi) - 'a' + 10
53 | }
54 | return int(bi) - '0'
55 | }
56 |
57 | // Check if the character is ASCII.
58 | func is_ascii(b []byte, i int) bool {
59 | return b[i] <= 0x7F
60 | }
61 |
62 | // Check if the character at the start of the buffer can be printed unescaped.
63 | func is_printable(b []byte, i int) bool {
64 | return ((b[i] == 0x0A) || // . == #x0A
65 | (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
66 | (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
67 | (b[i] > 0xC2 && b[i] < 0xED) ||
68 | (b[i] == 0xED && b[i+1] < 0xA0) ||
69 | (b[i] == 0xEE) ||
70 | (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
71 | !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
72 | !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
73 | }
74 |
75 | // Check if the character at the specified position is NUL.
76 | func is_z(b []byte, i int) bool {
77 | return b[i] == 0x00
78 | }
79 |
80 | // Check if the beginning of the buffer is a BOM.
81 | func is_bom(b []byte, i int) bool {
82 | return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
83 | }
84 |
85 | // Check if the character at the specified position is space.
86 | func is_space(b []byte, i int) bool {
87 | return b[i] == ' '
88 | }
89 |
90 | // Check if the character at the specified position is tab.
91 | func is_tab(b []byte, i int) bool {
92 | return b[i] == '\t'
93 | }
94 |
95 | // Check if the character at the specified position is blank (space or tab).
96 | func is_blank(b []byte, i int) bool {
97 | //return is_space(b, i) || is_tab(b, i)
98 | return b[i] == ' ' || b[i] == '\t'
99 | }
100 |
101 | // Check if the character at the specified position is a line break.
102 | func is_break(b []byte, i int) bool {
103 | return (b[i] == '\r' || // CR (#xD)
104 | b[i] == '\n' || // LF (#xA)
105 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
106 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
107 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
108 | }
109 |
110 | func is_crlf(b []byte, i int) bool {
111 | return b[i] == '\r' && b[i+1] == '\n'
112 | }
113 |
114 | // Check if the character is a line break or NUL.
115 | func is_breakz(b []byte, i int) bool {
116 | //return is_break(b, i) || is_z(b, i)
117 | return ( // is_break:
118 | b[i] == '\r' || // CR (#xD)
119 | b[i] == '\n' || // LF (#xA)
120 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
121 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
122 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
123 | // is_z:
124 | b[i] == 0)
125 | }
126 |
127 | // Check if the character is a line break, space, or NUL.
128 | func is_spacez(b []byte, i int) bool {
129 | //return is_space(b, i) || is_breakz(b, i)
130 | return ( // is_space:
131 | b[i] == ' ' ||
132 | // is_breakz:
133 | b[i] == '\r' || // CR (#xD)
134 | b[i] == '\n' || // LF (#xA)
135 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
136 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
137 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
138 | b[i] == 0)
139 | }
140 |
141 | // Check if the character is a line break, space, tab, or NUL.
142 | func is_blankz(b []byte, i int) bool {
143 | //return is_blank(b, i) || is_breakz(b, i)
144 | return ( // is_blank:
145 | b[i] == ' ' || b[i] == '\t' ||
146 | // is_breakz:
147 | b[i] == '\r' || // CR (#xD)
148 | b[i] == '\n' || // LF (#xA)
149 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
150 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
151 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
152 | b[i] == 0)
153 | }
154 |
155 | // Determine the width of the character.
156 | func width(b byte) int {
157 | // Don't replace these by a switch without first
158 | // confirming that it is being inlined.
159 | if b&0x80 == 0x00 {
160 | return 1
161 | }
162 | if b&0xE0 == 0xC0 {
163 | return 2
164 | }
165 | if b&0xF0 == 0xE0 {
166 | return 3
167 | }
168 | if b&0xF8 == 0xF0 {
169 | return 4
170 | }
171 | return 0
172 |
173 | }
174 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/resolve.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "encoding/base64"
5 | "math"
6 | "strconv"
7 | "strings"
8 | "unicode/utf8"
9 | )
10 |
11 | type resolveMapItem struct {
12 | value interface{}
13 | tag string
14 | }
15 |
16 | var resolveTable = make([]byte, 256)
17 | var resolveMap = make(map[string]resolveMapItem)
18 |
19 | func init() {
20 | t := resolveTable
21 | t[int('+')] = 'S' // Sign
22 | t[int('-')] = 'S'
23 | for _, c := range "0123456789" {
24 | t[int(c)] = 'D' // Digit
25 | }
26 | for _, c := range "yYnNtTfFoO~" {
27 | t[int(c)] = 'M' // In map
28 | }
29 | t[int('.')] = '.' // Float (potentially in map)
30 |
31 | var resolveMapList = []struct {
32 | v interface{}
33 | tag string
34 | l []string
35 | }{
36 | {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
37 | {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
38 | {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
39 | {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
40 | {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
41 | {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
42 | {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
43 | {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
44 | {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
45 | {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
46 | {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
47 | {"<<", yaml_MERGE_TAG, []string{"<<"}},
48 | }
49 |
50 | m := resolveMap
51 | for _, item := range resolveMapList {
52 | for _, s := range item.l {
53 | m[s] = resolveMapItem{item.v, item.tag}
54 | }
55 | }
56 | }
57 |
58 | const longTagPrefix = "tag:yaml.org,2002:"
59 |
60 | func shortTag(tag string) string {
61 | // TODO This can easily be made faster and produce less garbage.
62 | if strings.HasPrefix(tag, longTagPrefix) {
63 | return "!!" + tag[len(longTagPrefix):]
64 | }
65 | return tag
66 | }
67 |
68 | func longTag(tag string) string {
69 | if strings.HasPrefix(tag, "!!") {
70 | return longTagPrefix + tag[2:]
71 | }
72 | return tag
73 | }
74 |
75 | func resolvableTag(tag string) bool {
76 | switch tag {
77 | case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
78 | return true
79 | }
80 | return false
81 | }
82 |
83 | func resolve(tag string, in string) (rtag string, out interface{}) {
84 | if !resolvableTag(tag) {
85 | return tag, in
86 | }
87 |
88 | defer func() {
89 | switch tag {
90 | case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
91 | return
92 | }
93 | failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
94 | }()
95 |
96 | // Any data is accepted as a !!str or !!binary.
97 | // Otherwise, the prefix is enough of a hint about what it might be.
98 | hint := byte('N')
99 | if in != "" {
100 | hint = resolveTable[in[0]]
101 | }
102 | if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
103 | // Handle things we can lookup in a map.
104 | if item, ok := resolveMap[in]; ok {
105 | return item.tag, item.value
106 | }
107 |
108 | // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
109 | // are purposefully unsupported here. They're still quoted on
110 | // the way out for compatibility with other parser, though.
111 |
112 | switch hint {
113 | case 'M':
114 | // We've already checked the map above.
115 |
116 | case '.':
117 | // Not in the map, so maybe a normal float.
118 | floatv, err := strconv.ParseFloat(in, 64)
119 | if err == nil {
120 | return yaml_FLOAT_TAG, floatv
121 | }
122 |
123 | case 'D', 'S':
124 | // Int, float, or timestamp.
125 | plain := strings.Replace(in, "_", "", -1)
126 | intv, err := strconv.ParseInt(plain, 0, 64)
127 | if err == nil {
128 | if intv == int64(int(intv)) {
129 | return yaml_INT_TAG, int(intv)
130 | } else {
131 | return yaml_INT_TAG, intv
132 | }
133 | }
134 | uintv, err := strconv.ParseUint(plain, 0, 64)
135 | if err == nil {
136 | return yaml_INT_TAG, uintv
137 | }
138 | floatv, err := strconv.ParseFloat(plain, 64)
139 | if err == nil {
140 | return yaml_FLOAT_TAG, floatv
141 | }
142 | if strings.HasPrefix(plain, "0b") {
143 | intv, err := strconv.ParseInt(plain[2:], 2, 64)
144 | if err == nil {
145 | if intv == int64(int(intv)) {
146 | return yaml_INT_TAG, int(intv)
147 | } else {
148 | return yaml_INT_TAG, intv
149 | }
150 | }
151 | uintv, err := strconv.ParseUint(plain[2:], 2, 64)
152 | if err == nil {
153 | return yaml_INT_TAG, uintv
154 | }
155 | } else if strings.HasPrefix(plain, "-0b") {
156 | intv, err := strconv.ParseInt(plain[3:], 2, 64)
157 | if err == nil {
158 | if intv == int64(int(intv)) {
159 | return yaml_INT_TAG, -int(intv)
160 | } else {
161 | return yaml_INT_TAG, -intv
162 | }
163 | }
164 | }
165 | // XXX Handle timestamps here.
166 |
167 | default:
168 | panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
169 | }
170 | }
171 | if tag == yaml_BINARY_TAG {
172 | return yaml_BINARY_TAG, in
173 | }
174 | if utf8.ValidString(in) {
175 | return yaml_STR_TAG, in
176 | }
177 | return yaml_BINARY_TAG, encodeBase64(in)
178 | }
179 |
180 | // encodeBase64 encodes s as base64 that is broken up into multiple lines
181 | // as appropriate for the resulting length.
182 | func encodeBase64(s string) string {
183 | const lineLen = 70
184 | encLen := base64.StdEncoding.EncodedLen(len(s))
185 | lines := encLen/lineLen + 1
186 | buf := make([]byte, encLen*2+lines)
187 | in := buf[0:encLen]
188 | out := buf[encLen:]
189 | base64.StdEncoding.Encode(in, []byte(s))
190 | k := 0
191 | for i := 0; i < len(in); i += lineLen {
192 | j := i + lineLen
193 | if j > len(in) {
194 | j = len(in)
195 | }
196 | k += copy(out[k:], in[i:j])
197 | if lines > 1 {
198 | out[k] = '\n'
199 | k++
200 | }
201 | }
202 | return string(out[:k])
203 | }
204 |
--------------------------------------------------------------------------------
/mattertee/vendor/github.com/gorilla/schema/cache.go:
--------------------------------------------------------------------------------
1 | // Copyright 2012 The Gorilla Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | package schema
6 |
7 | import (
8 | "errors"
9 | "reflect"
10 | "strconv"
11 | "strings"
12 | "sync"
13 | )
14 |
15 | var invalidPath = errors.New("schema: invalid path")
16 |
17 | // newCache returns a new cache.
18 | func newCache() *cache {
19 | c := cache{
20 | m: make(map[reflect.Type]*structInfo),
21 | conv: make(map[reflect.Kind]Converter),
22 | regconv: make(map[reflect.Type]Converter),
23 | tag: "schema",
24 | }
25 | for k, v := range converters {
26 | c.conv[k] = v
27 | }
28 | return &c
29 | }
30 |
31 | // cache caches meta-data about a struct.
32 | type cache struct {
33 | l sync.RWMutex
34 | m map[reflect.Type]*structInfo
35 | conv map[reflect.Kind]Converter
36 | regconv map[reflect.Type]Converter
37 | tag string
38 | }
39 |
40 | // parsePath parses a path in dotted notation verifying that it is a valid
41 | // path to a struct field.
42 | //
43 | // It returns "path parts" which contain indices to fields to be used by
44 | // reflect.Value.FieldByString(). Multiple parts are required for slices of
45 | // structs.
46 | func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) {
47 | var struc *structInfo
48 | var field *fieldInfo
49 | var index64 int64
50 | var err error
51 | parts := make([]pathPart, 0)
52 | path := make([]string, 0)
53 | keys := strings.Split(p, ".")
54 | for i := 0; i < len(keys); i++ {
55 | if t.Kind() != reflect.Struct {
56 | return nil, invalidPath
57 | }
58 | if struc = c.get(t); struc == nil {
59 | return nil, invalidPath
60 | }
61 | if field = struc.get(keys[i]); field == nil {
62 | return nil, invalidPath
63 | }
64 | // Valid field. Append index.
65 | path = append(path, field.name)
66 | if field.ss {
67 | // Parse a special case: slices of structs.
68 | // i+1 must be the slice index.
69 | //
70 | // Now that struct can implements TextUnmarshaler interface,
71 | // we don't need to force the struct's fields to appear in the path.
72 | // So checking i+2 is not necessary anymore.
73 | i++
74 | if i+1 > len(keys) {
75 | return nil, invalidPath
76 | }
77 | if index64, err = strconv.ParseInt(keys[i], 10, 0); err != nil {
78 | return nil, invalidPath
79 | }
80 | parts = append(parts, pathPart{
81 | path: path,
82 | field: field,
83 | index: int(index64),
84 | })
85 | path = make([]string, 0)
86 |
87 | // Get the next struct type, dropping ptrs.
88 | if field.typ.Kind() == reflect.Ptr {
89 | t = field.typ.Elem()
90 | } else {
91 | t = field.typ
92 | }
93 | if t.Kind() == reflect.Slice {
94 | t = t.Elem()
95 | if t.Kind() == reflect.Ptr {
96 | t = t.Elem()
97 | }
98 | }
99 | } else if field.typ.Kind() == reflect.Ptr {
100 | t = field.typ.Elem()
101 | } else {
102 | t = field.typ
103 | }
104 | }
105 | // Add the remaining.
106 | parts = append(parts, pathPart{
107 | path: path,
108 | field: field,
109 | index: -1,
110 | })
111 | return parts, nil
112 | }
113 |
114 | // get returns a cached structInfo, creating it if necessary.
115 | func (c *cache) get(t reflect.Type) *structInfo {
116 | c.l.RLock()
117 | info := c.m[t]
118 | c.l.RUnlock()
119 | if info == nil {
120 | info = c.create(t, nil)
121 | c.l.Lock()
122 | c.m[t] = info
123 | c.l.Unlock()
124 | }
125 | return info
126 | }
127 |
128 | // create creates a structInfo with meta-data about a struct.
129 | func (c *cache) create(t reflect.Type, info *structInfo) *structInfo {
130 | if info == nil {
131 | info = &structInfo{fields: []*fieldInfo{}}
132 | }
133 | for i := 0; i < t.NumField(); i++ {
134 | field := t.Field(i)
135 | if field.Anonymous {
136 | ft := field.Type
137 | if ft.Kind() == reflect.Ptr {
138 | ft = ft.Elem()
139 | }
140 | if ft.Kind() == reflect.Struct {
141 | c.create(ft, info)
142 | }
143 | }
144 | c.createField(field, info)
145 | }
146 | return info
147 | }
148 |
149 | // createField creates a fieldInfo for the given field.
150 | func (c *cache) createField(field reflect.StructField, info *structInfo) {
151 | alias, options := fieldAlias(field, c.tag)
152 | if alias == "-" {
153 | // Ignore this field.
154 | return
155 | }
156 | // Check if the type is supported and don't cache it if not.
157 | // First let's get the basic type.
158 | isSlice, isStruct := false, false
159 | ft := field.Type
160 | if ft.Kind() == reflect.Ptr {
161 | ft = ft.Elem()
162 | }
163 | if isSlice = ft.Kind() == reflect.Slice; isSlice {
164 | ft = ft.Elem()
165 | if ft.Kind() == reflect.Ptr {
166 | ft = ft.Elem()
167 | }
168 | }
169 | if ft.Kind() == reflect.Array {
170 | ft = ft.Elem()
171 | if ft.Kind() == reflect.Ptr {
172 | ft = ft.Elem()
173 | }
174 | }
175 | if isStruct = ft.Kind() == reflect.Struct; !isStruct {
176 | if conv := c.conv[ft.Kind()]; conv == nil {
177 | // Type is not supported.
178 | return
179 | }
180 | }
181 |
182 | info.fields = append(info.fields, &fieldInfo{
183 | typ: field.Type,
184 | name: field.Name,
185 | ss: isSlice && isStruct,
186 | alias: alias,
187 | required: options.Contains("required"),
188 | })
189 | }
190 |
191 | // converter returns the converter for a type.
192 | func (c *cache) converter(t reflect.Type) Converter {
193 | conv := c.regconv[t]
194 | if conv == nil {
195 | conv = c.conv[t.Kind()]
196 | }
197 | return conv
198 | }
199 |
200 | // ----------------------------------------------------------------------------
201 |
202 | type structInfo struct {
203 | fields []*fieldInfo
204 | }
205 |
206 | func (i *structInfo) get(alias string) *fieldInfo {
207 | for _, field := range i.fields {
208 | if strings.EqualFold(field.alias, alias) {
209 | return field
210 | }
211 | }
212 | return nil
213 | }
214 |
215 | type fieldInfo struct {
216 | typ reflect.Type
217 | name string // field name in the struct.
218 | ss bool // true if this is a slice of structs.
219 | alias string
220 | required bool // tag option
221 | }
222 |
223 | type pathPart struct {
224 | field *fieldInfo
225 | path []string // path to the field: walks structs using field names.
226 | index int // struct index in slices of structs.
227 | }
228 |
229 | // ----------------------------------------------------------------------------
230 |
231 | // fieldAlias parses a field tag to get a field alias.
232 | func fieldAlias(field reflect.StructField, tagName string) (alias string, options tagOptions) {
233 | if tag := field.Tag.Get(tagName); tag != "" {
234 | alias, options = parseTag(tag)
235 | }
236 | if alias == "" {
237 | alias = field.Name
238 | }
239 | return alias, options
240 | }
241 |
242 | // tagOptions is the string following a comma in a struct field's tag, or
243 | // the empty string. It does not include the leading comma.
244 | type tagOptions []string
245 |
246 | // parseTag splits a struct field's url tag into its name and comma-separated
247 | // options.
248 | func parseTag(tag string) (string, tagOptions) {
249 | s := strings.Split(tag, ",")
250 | return s[0], s[1:]
251 | }
252 |
253 | // Contains checks whether the tagOptions contains the specified option.
254 | func (o tagOptions) Contains(option string) bool {
255 | for _, s := range o {
256 | if s == option {
257 | return true
258 | }
259 | }
260 | return false
261 | }
262 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/encode.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "encoding"
5 | "fmt"
6 | "reflect"
7 | "regexp"
8 | "sort"
9 | "strconv"
10 | "strings"
11 | "time"
12 | )
13 |
14 | type encoder struct {
15 | emitter yaml_emitter_t
16 | event yaml_event_t
17 | out []byte
18 | flow bool
19 | }
20 |
21 | func newEncoder() (e *encoder) {
22 | e = &encoder{}
23 | e.must(yaml_emitter_initialize(&e.emitter))
24 | yaml_emitter_set_output_string(&e.emitter, &e.out)
25 | yaml_emitter_set_unicode(&e.emitter, true)
26 | e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
27 | e.emit()
28 | e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
29 | e.emit()
30 | return e
31 | }
32 |
33 | func (e *encoder) finish() {
34 | e.must(yaml_document_end_event_initialize(&e.event, true))
35 | e.emit()
36 | e.emitter.open_ended = false
37 | e.must(yaml_stream_end_event_initialize(&e.event))
38 | e.emit()
39 | }
40 |
41 | func (e *encoder) destroy() {
42 | yaml_emitter_delete(&e.emitter)
43 | }
44 |
45 | func (e *encoder) emit() {
46 | // This will internally delete the e.event value.
47 | if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
48 | e.must(false)
49 | }
50 | }
51 |
52 | func (e *encoder) must(ok bool) {
53 | if !ok {
54 | msg := e.emitter.problem
55 | if msg == "" {
56 | msg = "unknown problem generating YAML content"
57 | }
58 | failf("%s", msg)
59 | }
60 | }
61 |
62 | func (e *encoder) marshal(tag string, in reflect.Value) {
63 | if !in.IsValid() {
64 | e.nilv()
65 | return
66 | }
67 | iface := in.Interface()
68 | if m, ok := iface.(Marshaler); ok {
69 | v, err := m.MarshalYAML()
70 | if err != nil {
71 | fail(err)
72 | }
73 | if v == nil {
74 | e.nilv()
75 | return
76 | }
77 | in = reflect.ValueOf(v)
78 | } else if m, ok := iface.(encoding.TextMarshaler); ok {
79 | text, err := m.MarshalText()
80 | if err != nil {
81 | fail(err)
82 | }
83 | in = reflect.ValueOf(string(text))
84 | }
85 | switch in.Kind() {
86 | case reflect.Interface:
87 | if in.IsNil() {
88 | e.nilv()
89 | } else {
90 | e.marshal(tag, in.Elem())
91 | }
92 | case reflect.Map:
93 | e.mapv(tag, in)
94 | case reflect.Ptr:
95 | if in.IsNil() {
96 | e.nilv()
97 | } else {
98 | e.marshal(tag, in.Elem())
99 | }
100 | case reflect.Struct:
101 | e.structv(tag, in)
102 | case reflect.Slice:
103 | if in.Type().Elem() == mapItemType {
104 | e.itemsv(tag, in)
105 | } else {
106 | e.slicev(tag, in)
107 | }
108 | case reflect.String:
109 | e.stringv(tag, in)
110 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
111 | if in.Type() == durationType {
112 | e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
113 | } else {
114 | e.intv(tag, in)
115 | }
116 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
117 | e.uintv(tag, in)
118 | case reflect.Float32, reflect.Float64:
119 | e.floatv(tag, in)
120 | case reflect.Bool:
121 | e.boolv(tag, in)
122 | default:
123 | panic("cannot marshal type: " + in.Type().String())
124 | }
125 | }
126 |
127 | func (e *encoder) mapv(tag string, in reflect.Value) {
128 | e.mappingv(tag, func() {
129 | keys := keyList(in.MapKeys())
130 | sort.Sort(keys)
131 | for _, k := range keys {
132 | e.marshal("", k)
133 | e.marshal("", in.MapIndex(k))
134 | }
135 | })
136 | }
137 |
138 | func (e *encoder) itemsv(tag string, in reflect.Value) {
139 | e.mappingv(tag, func() {
140 | slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
141 | for _, item := range slice {
142 | e.marshal("", reflect.ValueOf(item.Key))
143 | e.marshal("", reflect.ValueOf(item.Value))
144 | }
145 | })
146 | }
147 |
148 | func (e *encoder) structv(tag string, in reflect.Value) {
149 | sinfo, err := getStructInfo(in.Type())
150 | if err != nil {
151 | panic(err)
152 | }
153 | e.mappingv(tag, func() {
154 | for _, info := range sinfo.FieldsList {
155 | var value reflect.Value
156 | if info.Inline == nil {
157 | value = in.Field(info.Num)
158 | } else {
159 | value = in.FieldByIndex(info.Inline)
160 | }
161 | if info.OmitEmpty && isZero(value) {
162 | continue
163 | }
164 | e.marshal("", reflect.ValueOf(info.Key))
165 | e.flow = info.Flow
166 | e.marshal("", value)
167 | }
168 | if sinfo.InlineMap >= 0 {
169 | m := in.Field(sinfo.InlineMap)
170 | if m.Len() > 0 {
171 | e.flow = false
172 | keys := keyList(m.MapKeys())
173 | sort.Sort(keys)
174 | for _, k := range keys {
175 | if _, found := sinfo.FieldsMap[k.String()]; found {
176 | panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
177 | }
178 | e.marshal("", k)
179 | e.flow = false
180 | e.marshal("", m.MapIndex(k))
181 | }
182 | }
183 | }
184 | })
185 | }
186 |
187 | func (e *encoder) mappingv(tag string, f func()) {
188 | implicit := tag == ""
189 | style := yaml_BLOCK_MAPPING_STYLE
190 | if e.flow {
191 | e.flow = false
192 | style = yaml_FLOW_MAPPING_STYLE
193 | }
194 | e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
195 | e.emit()
196 | f()
197 | e.must(yaml_mapping_end_event_initialize(&e.event))
198 | e.emit()
199 | }
200 |
201 | func (e *encoder) slicev(tag string, in reflect.Value) {
202 | implicit := tag == ""
203 | style := yaml_BLOCK_SEQUENCE_STYLE
204 | if e.flow {
205 | e.flow = false
206 | style = yaml_FLOW_SEQUENCE_STYLE
207 | }
208 | e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
209 | e.emit()
210 | n := in.Len()
211 | for i := 0; i < n; i++ {
212 | e.marshal("", in.Index(i))
213 | }
214 | e.must(yaml_sequence_end_event_initialize(&e.event))
215 | e.emit()
216 | }
217 |
218 | // isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
219 | //
220 | // The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
221 | // in YAML 1.2 and by this package, but these should be marshalled quoted for
222 | // the time being for compatibility with other parsers.
223 | func isBase60Float(s string) (result bool) {
224 | // Fast path.
225 | if s == "" {
226 | return false
227 | }
228 | c := s[0]
229 | if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
230 | return false
231 | }
232 | // Do the full match.
233 | return base60float.MatchString(s)
234 | }
235 |
236 | // From http://yaml.org/type/float.html, except the regular expression there
237 | // is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
238 | var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
239 |
240 | func (e *encoder) stringv(tag string, in reflect.Value) {
241 | var style yaml_scalar_style_t
242 | s := in.String()
243 | rtag, rs := resolve("", s)
244 | if rtag == yaml_BINARY_TAG {
245 | if tag == "" || tag == yaml_STR_TAG {
246 | tag = rtag
247 | s = rs.(string)
248 | } else if tag == yaml_BINARY_TAG {
249 | failf("explicitly tagged !!binary data must be base64-encoded")
250 | } else {
251 | failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
252 | }
253 | }
254 | if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
255 | style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
256 | } else if strings.Contains(s, "\n") {
257 | style = yaml_LITERAL_SCALAR_STYLE
258 | } else {
259 | style = yaml_PLAIN_SCALAR_STYLE
260 | }
261 | e.emitScalar(s, "", tag, style)
262 | }
263 |
264 | func (e *encoder) boolv(tag string, in reflect.Value) {
265 | var s string
266 | if in.Bool() {
267 | s = "true"
268 | } else {
269 | s = "false"
270 | }
271 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
272 | }
273 |
274 | func (e *encoder) intv(tag string, in reflect.Value) {
275 | s := strconv.FormatInt(in.Int(), 10)
276 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
277 | }
278 |
279 | func (e *encoder) uintv(tag string, in reflect.Value) {
280 | s := strconv.FormatUint(in.Uint(), 10)
281 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
282 | }
283 |
284 | func (e *encoder) floatv(tag string, in reflect.Value) {
285 | // FIXME: Handle 64 bits here.
286 | s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
287 | switch s {
288 | case "+Inf":
289 | s = ".inf"
290 | case "-Inf":
291 | s = "-.inf"
292 | case "NaN":
293 | s = ".nan"
294 | }
295 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
296 | }
297 |
298 | func (e *encoder) nilv() {
299 | e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
300 | }
301 |
302 | func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
303 | implicit := tag == ""
304 | e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
305 | e.emit()
306 | }
307 |
--------------------------------------------------------------------------------
/git-mattermost-hook/git-mattermost-hook:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Slack (slack.com) notification post-receive hook.
4 | #
5 | # Based on: https://github.com/joemiller/git-hooks Campfire notification post-receive hook. Author: Joe Miller
6 | # (http://joemiller.me)
7 | #
8 | # Based on post-receive.irc by Mikael Fridh https://gist.github.com/1821358
9 | #
10 | # Settings needed:
11 | # git config hooks.slack.webhook-url "https://hooks.slack.com/services/..."
12 | # git config hooks.slack.channel "general"
13 | #
14 | # - The Slack webhook URL can be found in:
15 | # https://my.slack.com/services/new/incoming-webhook
16 | # Based on https://github.com/chriseldredge/git-slack-hook
17 | #
18 | function help() {
19 | echo "Required config settings:"
20 | echo " git config hooks.slack.webhook-url 'https://hooks.slack.com/services/...'"
21 | echo " git config hooks.slack.channel 'general'"
22 | echo " git config hooks.slack.show-only-last-commit true #optional"
23 | echo " git config hooks.slack.username 'git' #optional"
24 | echo " git config hooks.slack.icon-url 'http://imgur/icon.png' #optional"
25 | echo " git config hooks.slack.icon-emoji ':twisted_rightwards_arrows:' #optional"
26 | echo " git config hooks.slack.repo-nice-name 'MyRepo' #optional"
27 | echo " git config hooks.slack.repos-root '/path/to/repos' #optional"
28 | echo " git config hooks.slack.changeset-url-pattern 'http://yourserver/%repo_path%/changeset/%rev_hash%' #optional"
29 | echo " git config hooks.slack.compare-url-pattern 'http://yourserver/%repo_path%/changeset/%old_rev_hash%..%new_rev_hash%' #optional"
30 | echo " git config hooks.slack.branch-regexp 'regexp' #optional"
31 | echo " git config hooks.slack.mattertee '/bin/mattertee'"
32 | }
33 |
34 | function replace_variables() {
35 | sed "s|%repo_path%|$repopath|g;s|%old_rev_hash%|$oldrev|g;s|%new_rev_hash%|$newrev|g;s|%rev_hash%|$newrev|g;s|%repo_prefix%|$repoprefix|g;s|%3B|;|g"
36 | }
37 |
38 | function notify() {
39 | oldrev=$(git rev-parse $1)
40 | newrev=$(git rev-parse $2)
41 | refname="$3"
42 |
43 | # --- Interpret
44 | # 0000->1234 (create)
45 | # 1234->2345 (update)
46 | # 2345->0000 (delete)
47 | if expr "$oldrev" : '0*$' >/dev/null
48 | then
49 | change_type="create"
50 | else
51 | if expr "$newrev" : '0*$' >/dev/null
52 | then
53 | change_type="delete"
54 | else
55 | change_type="update"
56 | fi
57 | fi
58 |
59 | # --- Get the revision types
60 | newrev_type=$(git cat-file -t $newrev 2> /dev/null)
61 | oldrev_type=$(git cat-file -t "$oldrev" 2> /dev/null)
62 | case "$change_type" in
63 | create|update)
64 | rev="$newrev"
65 | rev_type="$newrev_type"
66 | ;;
67 | delete)
68 | rev="$oldrev"
69 | rev_type="$oldrev_type"
70 | ;;
71 | esac
72 |
73 | # The revision type tells us what type the commit is, combined with
74 | # the location of the ref we can decide between
75 | # - working branch
76 | # - tracking branch
77 | # - unannoted tag
78 | # - annotated tag
79 | case "$refname","$rev_type" in
80 | refs/tags/*,commit)
81 | # un-annotated tag
82 | refname_type="tag"
83 | short_refname=${refname##refs/tags/}
84 | ;;
85 | refs/tags/*,tag)
86 | # annotated tag
87 | refname_type="annotated tag"
88 | short_refname=${refname##refs/tags/}
89 | # change recipients
90 | if [ -n "$announcerecipients" ]; then
91 | recipients="$announcerecipients"
92 | fi
93 | ;;
94 | refs/heads/*,commit)
95 | # branch
96 | refname_type="branch"
97 | short_refname=${refname##refs/heads/}
98 | ;;
99 | refs/remotes/*,commit)
100 | # tracking branch
101 | refname_type="tracking branch"
102 | short_refname=${refname##refs/remotes/}
103 | echo >&2 "*** Push-update of tracking branch, $refname"
104 | echo >&2 "*** - no notification generated."
105 | return 0
106 | ;;
107 | *)
108 | # Anything else (is there anything else?)
109 | echo >&2 "*** Unknown type of update to $refname ($rev_type)"
110 | echo >&2 "*** - no notification generated"
111 | return 0
112 | ;;
113 | esac
114 |
115 | branchregexp=$(git config --get hooks.slack.branch-regexp)
116 | if [ -n "$branchregexp" ]; then
117 | if [[ ! $short_refname =~ $branchregexp ]]; then
118 | exit 0;
119 | fi
120 | fi
121 |
122 | #channels=$(git config hooks.irc.channel)
123 |
124 | # plural suffix, default "", changed to "s" if commits > 1
125 | s=""
126 |
127 | # Repo name, either Gitolite or normal repo.
128 | if [ -n "$GL_REPO" ]; then
129 | # it's a gitolite repo
130 | repodir=$(basename $(pwd))
131 | repo=$GL_REPO
132 | else
133 | repodir=$(basename $(pwd))
134 | if [ "$repodir" == ".git" ]; then
135 | repodir=$(dirname $PWD)
136 | repodir=$(basename $repodir)
137 | fi
138 | repo=${repodir%.git}
139 | fi
140 |
141 | repoprefix=$(git config hooks.slack.repo-nice-name || git config hooks.irc.prefix || git config hooks.emailprefix || echo "$repo")
142 | onlylast=$(git config --get hooks.slack.show-only-last-commit)
143 | onlylast=$onlylast && [ -n "$onlylast" ]
144 | fullcommit=$(git config --get hooks.slack.show-full-commit)
145 |
146 | # Get the user information
147 | # If $GL_USER is set we're running under gitolite.
148 | if [ -n "$GL_USER" ]; then
149 | user=$GL_USER
150 | else
151 | user=$USER
152 | fi
153 |
154 | case ${change_type} in
155 | "create")
156 | header="New ${refname_type} *${short_refname}* has been created in ${repoprefix}"
157 | single_commit_suffix="commit"
158 | ;;
159 | "delete")
160 | header="$(tr '[:lower:]' '[:upper:]' <<< ${refname_type:0:1})${refname_type:1} *$short_refname* has been deleted from ${repoprefix}"
161 | single_commit_suffix="commit"
162 | ;;
163 | "update")
164 | num=$(git log --pretty=oneline ${1}..${2}|wc -l|tr -d ' ')
165 | branch=${3/refs\/heads\//}
166 |
167 | if [ ${num} -gt 1 ]; then
168 | header="${num} new commits *pushed* to *${short_refname}* in ${repoprefix}"
169 | single_commit_suffix="one"
170 | s="s"
171 | else
172 | header="A new commit has been *pushed* to *${short_refname}* in ${repoprefix}"
173 | single_commit_suffix="one"
174 | fi
175 |
176 | ;;
177 | *)
178 | # most weird ... this should never happen
179 | echo >&2 "*** Unknown type of update to $refname ($rev_type)"
180 | echo >&2 "*** - notifications will probably screw up."
181 | ;;
182 | esac
183 |
184 | if $onlylast && [[ "${change_type}" != "delete" ]]; then
185 | header="$header, showing last $single_commit_suffix:"
186 | fi
187 |
188 |
189 | if [[ "${change_type}" != "delete" && "${refname_type}" == "branch" ]]; then
190 | changeseturlpattern=$(git config --get hooks.slack.changeset-url-pattern)
191 | compareurlpattern=$(git config --get hooks.slack.compare-url-pattern)
192 | reporoot=$(git config --get hooks.slack.repos-root)
193 |
194 | urlformat=
195 | if [ -n "$changeseturlpattern" -a -n "$reporoot" ]; then
196 | if [[ $PWD == ${reporoot}* ]]; then
197 | repopath=$PWD
198 | base=$(basename $PWD)
199 | if [ "$base" == ".git" ]; then
200 | repopath=$(dirname $repopath)
201 | fi
202 | idx=$(echo $reporoot | wc -c | tr -d ' ')
203 | repopath=$(echo $repopath | cut -c$idx- | sed s#^/##g)
204 | urlformat=$(echo $changeseturlpattern | replace_variables)
205 |
206 | if [ -n "$compareurlpattern" ]; then
207 | comparelink=$(echo $compareurlpattern | replace_variables)
208 | header=$(echo $header | sed -e "s|\([a-zA-Z0-9]\{1,\} new commit[s]\{0,1\}\)|\<$comparelink\|\\1\>|")
209 | fi
210 | else
211 | echo >&2 "$PWD is not in $reporoot. Not creating hyperlinks."
212 | fi
213 | fi
214 |
215 | formattedurl=""
216 | if [ -n "$urlformat" ]; then
217 | formattedurl="<${urlformat}|%h> "
218 | fi
219 |
220 |
221 | nl="\\\\n"
222 |
223 | if [[ "${change_type}" == "update" ]]; then
224 | start="${1}"
225 | else
226 | start="HEAD"
227 | fi
228 |
229 | end="${2}"
230 |
231 |
232 | # merge `git log` output with $header
233 | if $onlylast; then
234 | countarg="-n 1"
235 | else
236 | countarg=""
237 | fi
238 |
239 | # Process the log and escape double quotes; assuming for now that committer names don't have five semicolons in them
240 | log_out=$( git log --pretty=format:"%cN;;;;;${formattedurl}" $countarg ${start}..${end} \
241 | | sed ':a;N;$!ba;s/\n//g' \
242 | | sed -e 's/\\/\\\\/g' \
243 | | sed -e 's/"/\\"/g' \
244 | | sed -e 's/\(.*\);;;;;\(.*\)/\2,/' )
245 |
246 | fields=${log_out%?}
247 | logstats=$( git log --stat ${start}..${end})
248 | fi
249 |
250 | # slack API uses \n substitution for newlines
251 | # msg=$(echo "${msg}" | perl -p -e 's/\+/+/mg')
252 |
253 | webhook_url=$(git config --get hooks.slack.webhook-url)
254 | channel=$(git config --get hooks.slack.channel)
255 | username=$(git config --get hooks.slack.username)
256 | iconurl=$(git config --get hooks.slack.icon-url)
257 | iconemoji=$(git config --get hooks.slack.icon-emoji)
258 | mattertee=$(git config --get hooks.slack.mattertee)
259 |
260 | if [ -z "$webhook_url" ]; then
261 | echo "ERROR: config settings not found"
262 | help
263 | exit 1
264 | fi
265 |
266 | echo -e "${header} ${fields} \n \`\`\`\n${logstats}\`\`\`" | $mattertee -p -m "$webhook_url" -c "$channel" -u "$username" -i "$iconurl" > /dev/null 2>&1
267 | }
268 |
269 | # MAIN PROGRAM
270 | # Read all refs from stdin, notify slack for each
271 | while read line; do
272 | set -- $line
273 | notify $*
274 | RET=$?
275 | done
276 |
277 | exit $RET
278 |
--------------------------------------------------------------------------------
/mattertee/vendor/github.com/gorilla/schema/decoder.go:
--------------------------------------------------------------------------------
1 | // Copyright 2012 The Gorilla Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | package schema
6 |
7 | import (
8 | "encoding"
9 | "errors"
10 | "fmt"
11 | "reflect"
12 | "strings"
13 | )
14 |
15 | // NewDecoder returns a new Decoder.
16 | func NewDecoder() *Decoder {
17 | return &Decoder{cache: newCache()}
18 | }
19 |
20 | // Decoder decodes values from a map[string][]string to a struct.
21 | type Decoder struct {
22 | cache *cache
23 | zeroEmpty bool
24 | ignoreUnknownKeys bool
25 | }
26 |
27 | // SetAliasTag changes the tag used to locate custom field aliases.
28 | // The default tag is "schema".
29 | func (d *Decoder) SetAliasTag(tag string) {
30 | d.cache.tag = tag
31 | }
32 |
33 | // ZeroEmpty controls the behaviour when the decoder encounters empty values
34 | // in a map.
35 | // If z is true and a key in the map has the empty string as a value
36 | // then the corresponding struct field is set to the zero value.
37 | // If z is false then empty strings are ignored.
38 | //
39 | // The default value is false, that is empty values do not change
40 | // the value of the struct field.
41 | func (d *Decoder) ZeroEmpty(z bool) {
42 | d.zeroEmpty = z
43 | }
44 |
45 | // IgnoreUnknownKeys controls the behaviour when the decoder encounters unknown
46 | // keys in the map.
47 | // If i is true and an unknown field is encountered, it is ignored. This is
48 | // similar to how unknown keys are handled by encoding/json.
49 | // If i is false then Decode will return an error. Note that any valid keys
50 | // will still be decoded in to the target struct.
51 | //
52 | // To preserve backwards compatibility, the default value is false.
53 | func (d *Decoder) IgnoreUnknownKeys(i bool) {
54 | d.ignoreUnknownKeys = i
55 | }
56 |
57 | // RegisterConverter registers a converter function for a custom type.
58 | func (d *Decoder) RegisterConverter(value interface{}, converterFunc Converter) {
59 | d.cache.regconv[reflect.TypeOf(value)] = converterFunc
60 | }
61 |
62 | // Decode decodes a map[string][]string to a struct.
63 | //
64 | // The first parameter must be a pointer to a struct.
65 | //
66 | // The second parameter is a map, typically url.Values from an HTTP request.
67 | // Keys are "paths" in dotted notation to the struct fields and nested structs.
68 | //
69 | // See the package documentation for a full explanation of the mechanics.
70 | func (d *Decoder) Decode(dst interface{}, src map[string][]string) error {
71 | v := reflect.ValueOf(dst)
72 | if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
73 | return errors.New("schema: interface must be a pointer to struct")
74 | }
75 | v = v.Elem()
76 | t := v.Type()
77 | errors := MultiError{}
78 | for path, values := range src {
79 | if parts, err := d.cache.parsePath(path, t); err == nil {
80 | if err = d.decode(v, path, parts, values); err != nil {
81 | errors[path] = err
82 | }
83 | } else if !d.ignoreUnknownKeys {
84 | errors[path] = fmt.Errorf("schema: invalid path %q", path)
85 | }
86 | }
87 | if len(errors) > 0 {
88 | return errors
89 | }
90 | return d.checkRequired(t, src, "")
91 | }
92 |
93 | // checkRequired checks whether requred field empty
94 | //
95 | // check type t recursively if t has struct fields, and prefix is same as parsePath: in dotted notation
96 | //
97 | // src is the source map for decoding, we use it here to see if those required fields are included in src
98 | func (d *Decoder) checkRequired(t reflect.Type, src map[string][]string, prefix string) error {
99 | struc := d.cache.get(t)
100 | if struc == nil {
101 | // unexpect, cache.get never return nil
102 | return errors.New("cache fail")
103 | }
104 |
105 | for _, f := range struc.fields {
106 | if f.typ.Kind() == reflect.Struct {
107 | err := d.checkRequired(f.typ, src, prefix+f.alias+".")
108 | if err != nil {
109 | return err
110 | }
111 | }
112 | if f.required {
113 | key := f.alias
114 | if prefix != "" {
115 | key = prefix + key
116 | }
117 | if isEmpty(f.typ, src[key]) {
118 | return fmt.Errorf("%v is empty", key)
119 | }
120 | }
121 | }
122 | return nil
123 | }
124 |
125 | // isEmpty returns true if value is empty for specific type
126 | func isEmpty(t reflect.Type, value []string) bool {
127 | if len(value) == 0 {
128 | return true
129 | }
130 | switch t.Kind() {
131 | case boolType, float32Type, float64Type, intType, int8Type, int32Type, int64Type, stringType, uint8Type, uint16Type, uint32Type, uint64Type:
132 | return len(value[0]) == 0
133 | }
134 | return false
135 | }
136 |
137 | // decode fills a struct field using a parsed path.
138 | func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values []string) error {
139 | // Get the field walking the struct fields by index.
140 | for _, name := range parts[0].path {
141 | if v.Type().Kind() == reflect.Ptr {
142 | if v.IsNil() {
143 | v.Set(reflect.New(v.Type().Elem()))
144 | }
145 | v = v.Elem()
146 | }
147 | v = v.FieldByName(name)
148 | }
149 |
150 | // Don't even bother for unexported fields.
151 | if !v.CanSet() {
152 | return nil
153 | }
154 |
155 | // Dereference if needed.
156 | t := v.Type()
157 | if t.Kind() == reflect.Ptr {
158 | t = t.Elem()
159 | if v.IsNil() {
160 | v.Set(reflect.New(t))
161 | }
162 | v = v.Elem()
163 | }
164 |
165 | // Slice of structs. Let's go recursive.
166 | if len(parts) > 1 {
167 | idx := parts[0].index
168 | if v.IsNil() || v.Len() < idx+1 {
169 | value := reflect.MakeSlice(t, idx+1, idx+1)
170 | if v.Len() < idx+1 {
171 | // Resize it.
172 | reflect.Copy(value, v)
173 | }
174 | v.Set(value)
175 | }
176 | return d.decode(v.Index(idx), path, parts[1:], values)
177 | }
178 |
179 | // Get the converter early in case there is one for a slice type.
180 | conv := d.cache.converter(t)
181 | if conv == nil && t.Kind() == reflect.Slice {
182 | var items []reflect.Value
183 | elemT := t.Elem()
184 | isPtrElem := elemT.Kind() == reflect.Ptr
185 | if isPtrElem {
186 | elemT = elemT.Elem()
187 | }
188 |
189 | // Try to get a converter for the element type.
190 | conv := d.cache.converter(elemT)
191 | if conv == nil {
192 | // As we are not dealing with slice of structs here, we don't need to check if the type
193 | // implements TextUnmarshaler interface
194 | return fmt.Errorf("schema: converter not found for %v", elemT)
195 | }
196 |
197 | for key, value := range values {
198 | if value == "" {
199 | if d.zeroEmpty {
200 | items = append(items, reflect.Zero(elemT))
201 | }
202 | } else if item := conv(value); item.IsValid() {
203 | if isPtrElem {
204 | ptr := reflect.New(elemT)
205 | ptr.Elem().Set(item)
206 | item = ptr
207 | }
208 | if item.Type() != elemT && !isPtrElem {
209 | item = item.Convert(elemT)
210 | }
211 | items = append(items, item)
212 | } else {
213 | if strings.Contains(value, ",") {
214 | values := strings.Split(value, ",")
215 | for _, value := range values {
216 | if value == "" {
217 | if d.zeroEmpty {
218 | items = append(items, reflect.Zero(elemT))
219 | }
220 | } else if item := conv(value); item.IsValid() {
221 | if isPtrElem {
222 | ptr := reflect.New(elemT)
223 | ptr.Elem().Set(item)
224 | item = ptr
225 | }
226 | if item.Type() != elemT && !isPtrElem {
227 | item = item.Convert(elemT)
228 | }
229 | items = append(items, item)
230 | } else {
231 | return ConversionError{
232 | Key: path,
233 | Type: elemT,
234 | Index: key,
235 | }
236 | }
237 | }
238 | } else {
239 | return ConversionError{
240 | Key: path,
241 | Type: elemT,
242 | Index: key,
243 | }
244 | }
245 | }
246 | }
247 | value := reflect.Append(reflect.MakeSlice(t, 0, 0), items...)
248 | v.Set(value)
249 | } else {
250 | val := ""
251 | // Use the last value provided if any values were provided
252 | if len(values) > 0 {
253 | val = values[len(values)-1]
254 | }
255 |
256 | if val == "" {
257 | if d.zeroEmpty {
258 | v.Set(reflect.Zero(t))
259 | }
260 | } else if conv != nil {
261 | if value := conv(val); value.IsValid() {
262 | v.Set(value.Convert(t))
263 | } else {
264 | return ConversionError{
265 | Key: path,
266 | Type: t,
267 | Index: -1,
268 | }
269 | }
270 | } else {
271 | // When there's no registered conversion for the custom type, we will check if the type
272 | // implements the TextUnmarshaler interface. As the UnmarshalText function should be applied
273 | // to the pointer of the type, we convert the value to pointer.
274 | if v.CanAddr() {
275 | v = v.Addr()
276 | }
277 |
278 | if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
279 | if err := u.UnmarshalText([]byte(val)); err != nil {
280 | return ConversionError{
281 | Key: path,
282 | Type: t,
283 | Index: -1,
284 | Err: err,
285 | }
286 | }
287 |
288 | } else {
289 | return fmt.Errorf("schema: converter not found for %v", t)
290 | }
291 | }
292 | }
293 | return nil
294 | }
295 |
296 | // Errors ---------------------------------------------------------------------
297 |
298 | // ConversionError stores information about a failed conversion.
299 | type ConversionError struct {
300 | Key string // key from the source map.
301 | Type reflect.Type // expected type of elem
302 | Index int // index for multi-value fields; -1 for single-value fields.
303 | Err error // low-level error (when it exists)
304 | }
305 |
306 | func (e ConversionError) Error() string {
307 | var output string
308 |
309 | if e.Index < 0 {
310 | output = fmt.Sprintf("schema: error converting value for %q", e.Key)
311 | } else {
312 | output = fmt.Sprintf("schema: error converting value for index %d of %q",
313 | e.Index, e.Key)
314 | }
315 |
316 | if e.Err != nil {
317 | output = fmt.Sprintf("%s. Details: %s", output, e.Err)
318 | }
319 |
320 | return output
321 | }
322 |
323 | // MultiError stores multiple decoding errors.
324 | //
325 | // Borrowed from the App Engine SDK.
326 | type MultiError map[string]error
327 |
328 | func (e MultiError) Error() string {
329 | s := ""
330 | for _, err := range e {
331 | s = err.Error()
332 | break
333 | }
334 | switch len(e) {
335 | case 0:
336 | return "(0 errors)"
337 | case 1:
338 | return s
339 | case 2:
340 | return s + " (and 1 other error)"
341 | }
342 | return fmt.Sprintf("%s (and %d other errors)", s, len(e)-1)
343 | }
344 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/yaml.go:
--------------------------------------------------------------------------------
1 | // Package yaml implements YAML support for the Go language.
2 | //
3 | // Source code and other details for the project are available at GitHub:
4 | //
5 | // https://github.com/go-yaml/yaml
6 | //
7 | package yaml
8 |
9 | import (
10 | "errors"
11 | "fmt"
12 | "reflect"
13 | "strings"
14 | "sync"
15 | )
16 |
17 | // MapSlice encodes and decodes as a YAML map.
18 | // The order of keys is preserved when encoding and decoding.
19 | type MapSlice []MapItem
20 |
21 | // MapItem is an item in a MapSlice.
22 | type MapItem struct {
23 | Key, Value interface{}
24 | }
25 |
26 | // The Unmarshaler interface may be implemented by types to customize their
27 | // behavior when being unmarshaled from a YAML document. The UnmarshalYAML
28 | // method receives a function that may be called to unmarshal the original
29 | // YAML value into a field or variable. It is safe to call the unmarshal
30 | // function parameter more than once if necessary.
31 | type Unmarshaler interface {
32 | UnmarshalYAML(unmarshal func(interface{}) error) error
33 | }
34 |
35 | // The Marshaler interface may be implemented by types to customize their
36 | // behavior when being marshaled into a YAML document. The returned value
37 | // is marshaled in place of the original value implementing Marshaler.
38 | //
39 | // If an error is returned by MarshalYAML, the marshaling procedure stops
40 | // and returns with the provided error.
41 | type Marshaler interface {
42 | MarshalYAML() (interface{}, error)
43 | }
44 |
45 | // Unmarshal decodes the first document found within the in byte slice
46 | // and assigns decoded values into the out value.
47 | //
48 | // Maps and pointers (to a struct, string, int, etc) are accepted as out
49 | // values. If an internal pointer within a struct is not initialized,
50 | // the yaml package will initialize it if necessary for unmarshalling
51 | // the provided data. The out parameter must not be nil.
52 | //
53 | // The type of the decoded values should be compatible with the respective
54 | // values in out. If one or more values cannot be decoded due to a type
55 | // mismatches, decoding continues partially until the end of the YAML
56 | // content, and a *yaml.TypeError is returned with details for all
57 | // missed values.
58 | //
59 | // Struct fields are only unmarshalled if they are exported (have an
60 | // upper case first letter), and are unmarshalled using the field name
61 | // lowercased as the default key. Custom keys may be defined via the
62 | // "yaml" name in the field tag: the content preceding the first comma
63 | // is used as the key, and the following comma-separated options are
64 | // used to tweak the marshalling process (see Marshal).
65 | // Conflicting names result in a runtime error.
66 | //
67 | // For example:
68 | //
69 | // type T struct {
70 | // F int `yaml:"a,omitempty"`
71 | // B int
72 | // }
73 | // var t T
74 | // yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
75 | //
76 | // See the documentation of Marshal for the format of tags and a list of
77 | // supported tag options.
78 | //
79 | func Unmarshal(in []byte, out interface{}) (err error) {
80 | defer handleErr(&err)
81 | d := newDecoder()
82 | p := newParser(in)
83 | defer p.destroy()
84 | node := p.parse()
85 | if node != nil {
86 | v := reflect.ValueOf(out)
87 | if v.Kind() == reflect.Ptr && !v.IsNil() {
88 | v = v.Elem()
89 | }
90 | d.unmarshal(node, v)
91 | }
92 | if len(d.terrors) > 0 {
93 | return &TypeError{d.terrors}
94 | }
95 | return nil
96 | }
97 |
98 | // Marshal serializes the value provided into a YAML document. The structure
99 | // of the generated document will reflect the structure of the value itself.
100 | // Maps and pointers (to struct, string, int, etc) are accepted as the in value.
101 | //
102 | // Struct fields are only unmarshalled if they are exported (have an upper case
103 | // first letter), and are unmarshalled using the field name lowercased as the
104 | // default key. Custom keys may be defined via the "yaml" name in the field
105 | // tag: the content preceding the first comma is used as the key, and the
106 | // following comma-separated options are used to tweak the marshalling process.
107 | // Conflicting names result in a runtime error.
108 | //
109 | // The field tag format accepted is:
110 | //
111 | // `(...) yaml:"[][,[,]]" (...)`
112 | //
113 | // The following flags are currently supported:
114 | //
115 | // omitempty Only include the field if it's not set to the zero
116 | // value for the type or to empty slices or maps.
117 | // Does not apply to zero valued structs.
118 | //
119 | // flow Marshal using a flow style (useful for structs,
120 | // sequences and maps).
121 | //
122 | // inline Inline the field, which must be a struct or a map,
123 | // causing all of its fields or keys to be processed as if
124 | // they were part of the outer struct. For maps, keys must
125 | // not conflict with the yaml keys of other struct fields.
126 | //
127 | // In addition, if the key is "-", the field is ignored.
128 | //
129 | // For example:
130 | //
131 | // type T struct {
132 | // F int "a,omitempty"
133 | // B int
134 | // }
135 | // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
136 | // yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
137 | //
138 | func Marshal(in interface{}) (out []byte, err error) {
139 | defer handleErr(&err)
140 | e := newEncoder()
141 | defer e.destroy()
142 | e.marshal("", reflect.ValueOf(in))
143 | e.finish()
144 | out = e.out
145 | return
146 | }
147 |
148 | func handleErr(err *error) {
149 | if v := recover(); v != nil {
150 | if e, ok := v.(yamlError); ok {
151 | *err = e.err
152 | } else {
153 | panic(v)
154 | }
155 | }
156 | }
157 |
158 | type yamlError struct {
159 | err error
160 | }
161 |
162 | func fail(err error) {
163 | panic(yamlError{err})
164 | }
165 |
166 | func failf(format string, args ...interface{}) {
167 | panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
168 | }
169 |
170 | // A TypeError is returned by Unmarshal when one or more fields in
171 | // the YAML document cannot be properly decoded into the requested
172 | // types. When this error is returned, the value is still
173 | // unmarshaled partially.
174 | type TypeError struct {
175 | Errors []string
176 | }
177 |
178 | func (e *TypeError) Error() string {
179 | return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
180 | }
181 |
182 | // --------------------------------------------------------------------------
183 | // Maintain a mapping of keys to structure field indexes
184 |
185 | // The code in this section was copied from mgo/bson.
186 |
187 | // structInfo holds details for the serialization of fields of
188 | // a given struct.
189 | type structInfo struct {
190 | FieldsMap map[string]fieldInfo
191 | FieldsList []fieldInfo
192 |
193 | // InlineMap is the number of the field in the struct that
194 | // contains an ,inline map, or -1 if there's none.
195 | InlineMap int
196 | }
197 |
198 | type fieldInfo struct {
199 | Key string
200 | Num int
201 | OmitEmpty bool
202 | Flow bool
203 |
204 | // Inline holds the field index if the field is part of an inlined struct.
205 | Inline []int
206 | }
207 |
208 | var structMap = make(map[reflect.Type]*structInfo)
209 | var fieldMapMutex sync.RWMutex
210 |
211 | func getStructInfo(st reflect.Type) (*structInfo, error) {
212 | fieldMapMutex.RLock()
213 | sinfo, found := structMap[st]
214 | fieldMapMutex.RUnlock()
215 | if found {
216 | return sinfo, nil
217 | }
218 |
219 | n := st.NumField()
220 | fieldsMap := make(map[string]fieldInfo)
221 | fieldsList := make([]fieldInfo, 0, n)
222 | inlineMap := -1
223 | for i := 0; i != n; i++ {
224 | field := st.Field(i)
225 | if field.PkgPath != "" && !field.Anonymous {
226 | continue // Private field
227 | }
228 |
229 | info := fieldInfo{Num: i}
230 |
231 | tag := field.Tag.Get("yaml")
232 | if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
233 | tag = string(field.Tag)
234 | }
235 | if tag == "-" {
236 | continue
237 | }
238 |
239 | inline := false
240 | fields := strings.Split(tag, ",")
241 | if len(fields) > 1 {
242 | for _, flag := range fields[1:] {
243 | switch flag {
244 | case "omitempty":
245 | info.OmitEmpty = true
246 | case "flow":
247 | info.Flow = true
248 | case "inline":
249 | inline = true
250 | default:
251 | return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
252 | }
253 | }
254 | tag = fields[0]
255 | }
256 |
257 | if inline {
258 | switch field.Type.Kind() {
259 | case reflect.Map:
260 | if inlineMap >= 0 {
261 | return nil, errors.New("Multiple ,inline maps in struct " + st.String())
262 | }
263 | if field.Type.Key() != reflect.TypeOf("") {
264 | return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
265 | }
266 | inlineMap = info.Num
267 | case reflect.Struct:
268 | sinfo, err := getStructInfo(field.Type)
269 | if err != nil {
270 | return nil, err
271 | }
272 | for _, finfo := range sinfo.FieldsList {
273 | if _, found := fieldsMap[finfo.Key]; found {
274 | msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
275 | return nil, errors.New(msg)
276 | }
277 | if finfo.Inline == nil {
278 | finfo.Inline = []int{i, finfo.Num}
279 | } else {
280 | finfo.Inline = append([]int{i}, finfo.Inline...)
281 | }
282 | fieldsMap[finfo.Key] = finfo
283 | fieldsList = append(fieldsList, finfo)
284 | }
285 | default:
286 | //return nil, errors.New("Option ,inline needs a struct value or map field")
287 | return nil, errors.New("Option ,inline needs a struct value field")
288 | }
289 | continue
290 | }
291 |
292 | if tag != "" {
293 | info.Key = tag
294 | } else {
295 | info.Key = strings.ToLower(field.Name)
296 | }
297 |
298 | if _, found = fieldsMap[info.Key]; found {
299 | msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
300 | return nil, errors.New(msg)
301 | }
302 |
303 | fieldsList = append(fieldsList, info)
304 | fieldsMap[info.Key] = info
305 | }
306 |
307 | sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
308 |
309 | fieldMapMutex.Lock()
310 | structMap[st] = sinfo
311 | fieldMapMutex.Unlock()
312 | return sinfo, nil
313 | }
314 |
315 | func isZero(v reflect.Value) bool {
316 | switch v.Kind() {
317 | case reflect.String:
318 | return len(v.String()) == 0
319 | case reflect.Interface, reflect.Ptr:
320 | return v.IsNil()
321 | case reflect.Slice:
322 | return v.Len() == 0
323 | case reflect.Map:
324 | return v.Len() == 0
325 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
326 | return v.Int() == 0
327 | case reflect.Float32, reflect.Float64:
328 | return v.Float() == 0
329 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
330 | return v.Uint() == 0
331 | case reflect.Bool:
332 | return !v.Bool()
333 | case reflect.Struct:
334 | vt := v.Type()
335 | for i := v.NumField() - 1; i >= 0; i-- {
336 | if vt.Field(i).PkgPath != "" {
337 | continue // Private field
338 | }
339 | if !isZero(v.Field(i)) {
340 | return false
341 | }
342 | }
343 | return true
344 | }
345 | return false
346 | }
347 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/mattertee/vendor/github.com/42wim/matterbridge/matterhook/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/readerc.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "io"
5 | )
6 |
7 | // Set the reader error and return 0.
8 | func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
9 | parser.error = yaml_READER_ERROR
10 | parser.problem = problem
11 | parser.problem_offset = offset
12 | parser.problem_value = value
13 | return false
14 | }
15 |
16 | // Byte order marks.
17 | const (
18 | bom_UTF8 = "\xef\xbb\xbf"
19 | bom_UTF16LE = "\xff\xfe"
20 | bom_UTF16BE = "\xfe\xff"
21 | )
22 |
23 | // Determine the input stream encoding by checking the BOM symbol. If no BOM is
24 | // found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
25 | func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
26 | // Ensure that we had enough bytes in the raw buffer.
27 | for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
28 | if !yaml_parser_update_raw_buffer(parser) {
29 | return false
30 | }
31 | }
32 |
33 | // Determine the encoding.
34 | buf := parser.raw_buffer
35 | pos := parser.raw_buffer_pos
36 | avail := len(buf) - pos
37 | if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
38 | parser.encoding = yaml_UTF16LE_ENCODING
39 | parser.raw_buffer_pos += 2
40 | parser.offset += 2
41 | } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
42 | parser.encoding = yaml_UTF16BE_ENCODING
43 | parser.raw_buffer_pos += 2
44 | parser.offset += 2
45 | } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
46 | parser.encoding = yaml_UTF8_ENCODING
47 | parser.raw_buffer_pos += 3
48 | parser.offset += 3
49 | } else {
50 | parser.encoding = yaml_UTF8_ENCODING
51 | }
52 | return true
53 | }
54 |
55 | // Update the raw buffer.
56 | func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
57 | size_read := 0
58 |
59 | // Return if the raw buffer is full.
60 | if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
61 | return true
62 | }
63 |
64 | // Return on EOF.
65 | if parser.eof {
66 | return true
67 | }
68 |
69 | // Move the remaining bytes in the raw buffer to the beginning.
70 | if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
71 | copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
72 | }
73 | parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
74 | parser.raw_buffer_pos = 0
75 |
76 | // Call the read handler to fill the buffer.
77 | size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
78 | parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
79 | if err == io.EOF {
80 | parser.eof = true
81 | } else if err != nil {
82 | return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
83 | }
84 | return true
85 | }
86 |
87 | // Ensure that the buffer contains at least `length` characters.
88 | // Return true on success, false on failure.
89 | //
90 | // The length is supposed to be significantly less that the buffer size.
91 | func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
92 | if parser.read_handler == nil {
93 | panic("read handler must be set")
94 | }
95 |
96 | // If the EOF flag is set and the raw buffer is empty, do nothing.
97 | if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
98 | return true
99 | }
100 |
101 | // Return if the buffer contains enough characters.
102 | if parser.unread >= length {
103 | return true
104 | }
105 |
106 | // Determine the input encoding if it is not known yet.
107 | if parser.encoding == yaml_ANY_ENCODING {
108 | if !yaml_parser_determine_encoding(parser) {
109 | return false
110 | }
111 | }
112 |
113 | // Move the unread characters to the beginning of the buffer.
114 | buffer_len := len(parser.buffer)
115 | if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
116 | copy(parser.buffer, parser.buffer[parser.buffer_pos:])
117 | buffer_len -= parser.buffer_pos
118 | parser.buffer_pos = 0
119 | } else if parser.buffer_pos == buffer_len {
120 | buffer_len = 0
121 | parser.buffer_pos = 0
122 | }
123 |
124 | // Open the whole buffer for writing, and cut it before returning.
125 | parser.buffer = parser.buffer[:cap(parser.buffer)]
126 |
127 | // Fill the buffer until it has enough characters.
128 | first := true
129 | for parser.unread < length {
130 |
131 | // Fill the raw buffer if necessary.
132 | if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
133 | if !yaml_parser_update_raw_buffer(parser) {
134 | parser.buffer = parser.buffer[:buffer_len]
135 | return false
136 | }
137 | }
138 | first = false
139 |
140 | // Decode the raw buffer.
141 | inner:
142 | for parser.raw_buffer_pos != len(parser.raw_buffer) {
143 | var value rune
144 | var width int
145 |
146 | raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
147 |
148 | // Decode the next character.
149 | switch parser.encoding {
150 | case yaml_UTF8_ENCODING:
151 | // Decode a UTF-8 character. Check RFC 3629
152 | // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
153 | //
154 | // The following table (taken from the RFC) is used for
155 | // decoding.
156 | //
157 | // Char. number range | UTF-8 octet sequence
158 | // (hexadecimal) | (binary)
159 | // --------------------+------------------------------------
160 | // 0000 0000-0000 007F | 0xxxxxxx
161 | // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
162 | // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
163 | // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
164 | //
165 | // Additionally, the characters in the range 0xD800-0xDFFF
166 | // are prohibited as they are reserved for use with UTF-16
167 | // surrogate pairs.
168 |
169 | // Determine the length of the UTF-8 sequence.
170 | octet := parser.raw_buffer[parser.raw_buffer_pos]
171 | switch {
172 | case octet&0x80 == 0x00:
173 | width = 1
174 | case octet&0xE0 == 0xC0:
175 | width = 2
176 | case octet&0xF0 == 0xE0:
177 | width = 3
178 | case octet&0xF8 == 0xF0:
179 | width = 4
180 | default:
181 | // The leading octet is invalid.
182 | return yaml_parser_set_reader_error(parser,
183 | "invalid leading UTF-8 octet",
184 | parser.offset, int(octet))
185 | }
186 |
187 | // Check if the raw buffer contains an incomplete character.
188 | if width > raw_unread {
189 | if parser.eof {
190 | return yaml_parser_set_reader_error(parser,
191 | "incomplete UTF-8 octet sequence",
192 | parser.offset, -1)
193 | }
194 | break inner
195 | }
196 |
197 | // Decode the leading octet.
198 | switch {
199 | case octet&0x80 == 0x00:
200 | value = rune(octet & 0x7F)
201 | case octet&0xE0 == 0xC0:
202 | value = rune(octet & 0x1F)
203 | case octet&0xF0 == 0xE0:
204 | value = rune(octet & 0x0F)
205 | case octet&0xF8 == 0xF0:
206 | value = rune(octet & 0x07)
207 | default:
208 | value = 0
209 | }
210 |
211 | // Check and decode the trailing octets.
212 | for k := 1; k < width; k++ {
213 | octet = parser.raw_buffer[parser.raw_buffer_pos+k]
214 |
215 | // Check if the octet is valid.
216 | if (octet & 0xC0) != 0x80 {
217 | return yaml_parser_set_reader_error(parser,
218 | "invalid trailing UTF-8 octet",
219 | parser.offset+k, int(octet))
220 | }
221 |
222 | // Decode the octet.
223 | value = (value << 6) + rune(octet&0x3F)
224 | }
225 |
226 | // Check the length of the sequence against the value.
227 | switch {
228 | case width == 1:
229 | case width == 2 && value >= 0x80:
230 | case width == 3 && value >= 0x800:
231 | case width == 4 && value >= 0x10000:
232 | default:
233 | return yaml_parser_set_reader_error(parser,
234 | "invalid length of a UTF-8 sequence",
235 | parser.offset, -1)
236 | }
237 |
238 | // Check the range of the value.
239 | if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
240 | return yaml_parser_set_reader_error(parser,
241 | "invalid Unicode character",
242 | parser.offset, int(value))
243 | }
244 |
245 | case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
246 | var low, high int
247 | if parser.encoding == yaml_UTF16LE_ENCODING {
248 | low, high = 0, 1
249 | } else {
250 | low, high = 1, 0
251 | }
252 |
253 | // The UTF-16 encoding is not as simple as one might
254 | // naively think. Check RFC 2781
255 | // (http://www.ietf.org/rfc/rfc2781.txt).
256 | //
257 | // Normally, two subsequent bytes describe a Unicode
258 | // character. However a special technique (called a
259 | // surrogate pair) is used for specifying character
260 | // values larger than 0xFFFF.
261 | //
262 | // A surrogate pair consists of two pseudo-characters:
263 | // high surrogate area (0xD800-0xDBFF)
264 | // low surrogate area (0xDC00-0xDFFF)
265 | //
266 | // The following formulas are used for decoding
267 | // and encoding characters using surrogate pairs:
268 | //
269 | // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
270 | // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
271 | // W1 = 110110yyyyyyyyyy
272 | // W2 = 110111xxxxxxxxxx
273 | //
274 | // where U is the character value, W1 is the high surrogate
275 | // area, W2 is the low surrogate area.
276 |
277 | // Check for incomplete UTF-16 character.
278 | if raw_unread < 2 {
279 | if parser.eof {
280 | return yaml_parser_set_reader_error(parser,
281 | "incomplete UTF-16 character",
282 | parser.offset, -1)
283 | }
284 | break inner
285 | }
286 |
287 | // Get the character.
288 | value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
289 | (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
290 |
291 | // Check for unexpected low surrogate area.
292 | if value&0xFC00 == 0xDC00 {
293 | return yaml_parser_set_reader_error(parser,
294 | "unexpected low surrogate area",
295 | parser.offset, int(value))
296 | }
297 |
298 | // Check for a high surrogate area.
299 | if value&0xFC00 == 0xD800 {
300 | width = 4
301 |
302 | // Check for incomplete surrogate pair.
303 | if raw_unread < 4 {
304 | if parser.eof {
305 | return yaml_parser_set_reader_error(parser,
306 | "incomplete UTF-16 surrogate pair",
307 | parser.offset, -1)
308 | }
309 | break inner
310 | }
311 |
312 | // Get the next character.
313 | value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
314 | (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
315 |
316 | // Check for a low surrogate area.
317 | if value2&0xFC00 != 0xDC00 {
318 | return yaml_parser_set_reader_error(parser,
319 | "expected low surrogate area",
320 | parser.offset+2, int(value2))
321 | }
322 |
323 | // Generate the value of the surrogate pair.
324 | value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
325 | } else {
326 | width = 2
327 | }
328 |
329 | default:
330 | panic("impossible")
331 | }
332 |
333 | // Check if the character is in the allowed range:
334 | // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
335 | // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
336 | // | [#x10000-#x10FFFF] (32 bit)
337 | switch {
338 | case value == 0x09:
339 | case value == 0x0A:
340 | case value == 0x0D:
341 | case value >= 0x20 && value <= 0x7E:
342 | case value == 0x85:
343 | case value >= 0xA0 && value <= 0xD7FF:
344 | case value >= 0xE000 && value <= 0xFFFD:
345 | case value >= 0x10000 && value <= 0x10FFFF:
346 | default:
347 | return yaml_parser_set_reader_error(parser,
348 | "control characters are not allowed",
349 | parser.offset, int(value))
350 | }
351 |
352 | // Move the raw pointers.
353 | parser.raw_buffer_pos += width
354 | parser.offset += width
355 |
356 | // Finally put the character into the buffer.
357 | if value <= 0x7F {
358 | // 0000 0000-0000 007F . 0xxxxxxx
359 | parser.buffer[buffer_len+0] = byte(value)
360 | buffer_len += 1
361 | } else if value <= 0x7FF {
362 | // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
363 | parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
364 | parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
365 | buffer_len += 2
366 | } else if value <= 0xFFFF {
367 | // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
368 | parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
369 | parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
370 | parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
371 | buffer_len += 3
372 | } else {
373 | // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
374 | parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
375 | parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
376 | parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
377 | parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
378 | buffer_len += 4
379 | }
380 |
381 | parser.unread++
382 | }
383 |
384 | // On EOF, put NUL into the buffer and return.
385 | if parser.eof {
386 | parser.buffer[buffer_len] = 0
387 | buffer_len++
388 | parser.unread++
389 | break
390 | }
391 | }
392 | parser.buffer = parser.buffer[:buffer_len]
393 | return true
394 | }
395 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/decode.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "encoding"
5 | "encoding/base64"
6 | "fmt"
7 | "math"
8 | "reflect"
9 | "strconv"
10 | "time"
11 | )
12 |
13 | const (
14 | documentNode = 1 << iota
15 | mappingNode
16 | sequenceNode
17 | scalarNode
18 | aliasNode
19 | )
20 |
21 | type node struct {
22 | kind int
23 | line, column int
24 | tag string
25 | value string
26 | implicit bool
27 | children []*node
28 | anchors map[string]*node
29 | }
30 |
31 | // ----------------------------------------------------------------------------
32 | // Parser, produces a node tree out of a libyaml event stream.
33 |
34 | type parser struct {
35 | parser yaml_parser_t
36 | event yaml_event_t
37 | doc *node
38 | }
39 |
40 | func newParser(b []byte) *parser {
41 | p := parser{}
42 | if !yaml_parser_initialize(&p.parser) {
43 | panic("failed to initialize YAML emitter")
44 | }
45 |
46 | if len(b) == 0 {
47 | b = []byte{'\n'}
48 | }
49 |
50 | yaml_parser_set_input_string(&p.parser, b)
51 |
52 | p.skip()
53 | if p.event.typ != yaml_STREAM_START_EVENT {
54 | panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
55 | }
56 | p.skip()
57 | return &p
58 | }
59 |
60 | func (p *parser) destroy() {
61 | if p.event.typ != yaml_NO_EVENT {
62 | yaml_event_delete(&p.event)
63 | }
64 | yaml_parser_delete(&p.parser)
65 | }
66 |
67 | func (p *parser) skip() {
68 | if p.event.typ != yaml_NO_EVENT {
69 | if p.event.typ == yaml_STREAM_END_EVENT {
70 | failf("attempted to go past the end of stream; corrupted value?")
71 | }
72 | yaml_event_delete(&p.event)
73 | }
74 | if !yaml_parser_parse(&p.parser, &p.event) {
75 | p.fail()
76 | }
77 | }
78 |
79 | func (p *parser) fail() {
80 | var where string
81 | var line int
82 | if p.parser.problem_mark.line != 0 {
83 | line = p.parser.problem_mark.line
84 | } else if p.parser.context_mark.line != 0 {
85 | line = p.parser.context_mark.line
86 | }
87 | if line != 0 {
88 | where = "line " + strconv.Itoa(line) + ": "
89 | }
90 | var msg string
91 | if len(p.parser.problem) > 0 {
92 | msg = p.parser.problem
93 | } else {
94 | msg = "unknown problem parsing YAML content"
95 | }
96 | failf("%s%s", where, msg)
97 | }
98 |
99 | func (p *parser) anchor(n *node, anchor []byte) {
100 | if anchor != nil {
101 | p.doc.anchors[string(anchor)] = n
102 | }
103 | }
104 |
105 | func (p *parser) parse() *node {
106 | switch p.event.typ {
107 | case yaml_SCALAR_EVENT:
108 | return p.scalar()
109 | case yaml_ALIAS_EVENT:
110 | return p.alias()
111 | case yaml_MAPPING_START_EVENT:
112 | return p.mapping()
113 | case yaml_SEQUENCE_START_EVENT:
114 | return p.sequence()
115 | case yaml_DOCUMENT_START_EVENT:
116 | return p.document()
117 | case yaml_STREAM_END_EVENT:
118 | // Happens when attempting to decode an empty buffer.
119 | return nil
120 | default:
121 | panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
122 | }
123 | panic("unreachable")
124 | }
125 |
126 | func (p *parser) node(kind int) *node {
127 | return &node{
128 | kind: kind,
129 | line: p.event.start_mark.line,
130 | column: p.event.start_mark.column,
131 | }
132 | }
133 |
134 | func (p *parser) document() *node {
135 | n := p.node(documentNode)
136 | n.anchors = make(map[string]*node)
137 | p.doc = n
138 | p.skip()
139 | n.children = append(n.children, p.parse())
140 | if p.event.typ != yaml_DOCUMENT_END_EVENT {
141 | panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
142 | }
143 | p.skip()
144 | return n
145 | }
146 |
147 | func (p *parser) alias() *node {
148 | n := p.node(aliasNode)
149 | n.value = string(p.event.anchor)
150 | p.skip()
151 | return n
152 | }
153 |
154 | func (p *parser) scalar() *node {
155 | n := p.node(scalarNode)
156 | n.value = string(p.event.value)
157 | n.tag = string(p.event.tag)
158 | n.implicit = p.event.implicit
159 | p.anchor(n, p.event.anchor)
160 | p.skip()
161 | return n
162 | }
163 |
164 | func (p *parser) sequence() *node {
165 | n := p.node(sequenceNode)
166 | p.anchor(n, p.event.anchor)
167 | p.skip()
168 | for p.event.typ != yaml_SEQUENCE_END_EVENT {
169 | n.children = append(n.children, p.parse())
170 | }
171 | p.skip()
172 | return n
173 | }
174 |
175 | func (p *parser) mapping() *node {
176 | n := p.node(mappingNode)
177 | p.anchor(n, p.event.anchor)
178 | p.skip()
179 | for p.event.typ != yaml_MAPPING_END_EVENT {
180 | n.children = append(n.children, p.parse(), p.parse())
181 | }
182 | p.skip()
183 | return n
184 | }
185 |
186 | // ----------------------------------------------------------------------------
187 | // Decoder, unmarshals a node into a provided value.
188 |
189 | type decoder struct {
190 | doc *node
191 | aliases map[string]bool
192 | mapType reflect.Type
193 | terrors []string
194 | }
195 |
196 | var (
197 | mapItemType = reflect.TypeOf(MapItem{})
198 | durationType = reflect.TypeOf(time.Duration(0))
199 | defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
200 | ifaceType = defaultMapType.Elem()
201 | )
202 |
203 | func newDecoder() *decoder {
204 | d := &decoder{mapType: defaultMapType}
205 | d.aliases = make(map[string]bool)
206 | return d
207 | }
208 |
209 | func (d *decoder) terror(n *node, tag string, out reflect.Value) {
210 | if n.tag != "" {
211 | tag = n.tag
212 | }
213 | value := n.value
214 | if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
215 | if len(value) > 10 {
216 | value = " `" + value[:7] + "...`"
217 | } else {
218 | value = " `" + value + "`"
219 | }
220 | }
221 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
222 | }
223 |
224 | func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
225 | terrlen := len(d.terrors)
226 | err := u.UnmarshalYAML(func(v interface{}) (err error) {
227 | defer handleErr(&err)
228 | d.unmarshal(n, reflect.ValueOf(v))
229 | if len(d.terrors) > terrlen {
230 | issues := d.terrors[terrlen:]
231 | d.terrors = d.terrors[:terrlen]
232 | return &TypeError{issues}
233 | }
234 | return nil
235 | })
236 | if e, ok := err.(*TypeError); ok {
237 | d.terrors = append(d.terrors, e.Errors...)
238 | return false
239 | }
240 | if err != nil {
241 | fail(err)
242 | }
243 | return true
244 | }
245 |
246 | // d.prepare initializes and dereferences pointers and calls UnmarshalYAML
247 | // if a value is found to implement it.
248 | // It returns the initialized and dereferenced out value, whether
249 | // unmarshalling was already done by UnmarshalYAML, and if so whether
250 | // its types unmarshalled appropriately.
251 | //
252 | // If n holds a null value, prepare returns before doing anything.
253 | func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
254 | if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
255 | return out, false, false
256 | }
257 | again := true
258 | for again {
259 | again = false
260 | if out.Kind() == reflect.Ptr {
261 | if out.IsNil() {
262 | out.Set(reflect.New(out.Type().Elem()))
263 | }
264 | out = out.Elem()
265 | again = true
266 | }
267 | if out.CanAddr() {
268 | if u, ok := out.Addr().Interface().(Unmarshaler); ok {
269 | good = d.callUnmarshaler(n, u)
270 | return out, true, good
271 | }
272 | }
273 | }
274 | return out, false, false
275 | }
276 |
277 | func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
278 | switch n.kind {
279 | case documentNode:
280 | return d.document(n, out)
281 | case aliasNode:
282 | return d.alias(n, out)
283 | }
284 | out, unmarshaled, good := d.prepare(n, out)
285 | if unmarshaled {
286 | return good
287 | }
288 | switch n.kind {
289 | case scalarNode:
290 | good = d.scalar(n, out)
291 | case mappingNode:
292 | good = d.mapping(n, out)
293 | case sequenceNode:
294 | good = d.sequence(n, out)
295 | default:
296 | panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
297 | }
298 | return good
299 | }
300 |
301 | func (d *decoder) document(n *node, out reflect.Value) (good bool) {
302 | if len(n.children) == 1 {
303 | d.doc = n
304 | d.unmarshal(n.children[0], out)
305 | return true
306 | }
307 | return false
308 | }
309 |
310 | func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
311 | an, ok := d.doc.anchors[n.value]
312 | if !ok {
313 | failf("unknown anchor '%s' referenced", n.value)
314 | }
315 | if d.aliases[n.value] {
316 | failf("anchor '%s' value contains itself", n.value)
317 | }
318 | d.aliases[n.value] = true
319 | good = d.unmarshal(an, out)
320 | delete(d.aliases, n.value)
321 | return good
322 | }
323 |
324 | var zeroValue reflect.Value
325 |
326 | func resetMap(out reflect.Value) {
327 | for _, k := range out.MapKeys() {
328 | out.SetMapIndex(k, zeroValue)
329 | }
330 | }
331 |
332 | func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
333 | var tag string
334 | var resolved interface{}
335 | if n.tag == "" && !n.implicit {
336 | tag = yaml_STR_TAG
337 | resolved = n.value
338 | } else {
339 | tag, resolved = resolve(n.tag, n.value)
340 | if tag == yaml_BINARY_TAG {
341 | data, err := base64.StdEncoding.DecodeString(resolved.(string))
342 | if err != nil {
343 | failf("!!binary value contains invalid base64 data")
344 | }
345 | resolved = string(data)
346 | }
347 | }
348 | if resolved == nil {
349 | if out.Kind() == reflect.Map && !out.CanAddr() {
350 | resetMap(out)
351 | } else {
352 | out.Set(reflect.Zero(out.Type()))
353 | }
354 | return true
355 | }
356 | if s, ok := resolved.(string); ok && out.CanAddr() {
357 | if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
358 | err := u.UnmarshalText([]byte(s))
359 | if err != nil {
360 | fail(err)
361 | }
362 | return true
363 | }
364 | }
365 | switch out.Kind() {
366 | case reflect.String:
367 | if tag == yaml_BINARY_TAG {
368 | out.SetString(resolved.(string))
369 | good = true
370 | } else if resolved != nil {
371 | out.SetString(n.value)
372 | good = true
373 | }
374 | case reflect.Interface:
375 | if resolved == nil {
376 | out.Set(reflect.Zero(out.Type()))
377 | } else {
378 | out.Set(reflect.ValueOf(resolved))
379 | }
380 | good = true
381 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
382 | switch resolved := resolved.(type) {
383 | case int:
384 | if !out.OverflowInt(int64(resolved)) {
385 | out.SetInt(int64(resolved))
386 | good = true
387 | }
388 | case int64:
389 | if !out.OverflowInt(resolved) {
390 | out.SetInt(resolved)
391 | good = true
392 | }
393 | case uint64:
394 | if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
395 | out.SetInt(int64(resolved))
396 | good = true
397 | }
398 | case float64:
399 | if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
400 | out.SetInt(int64(resolved))
401 | good = true
402 | }
403 | case string:
404 | if out.Type() == durationType {
405 | d, err := time.ParseDuration(resolved)
406 | if err == nil {
407 | out.SetInt(int64(d))
408 | good = true
409 | }
410 | }
411 | }
412 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
413 | switch resolved := resolved.(type) {
414 | case int:
415 | if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
416 | out.SetUint(uint64(resolved))
417 | good = true
418 | }
419 | case int64:
420 | if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
421 | out.SetUint(uint64(resolved))
422 | good = true
423 | }
424 | case uint64:
425 | if !out.OverflowUint(uint64(resolved)) {
426 | out.SetUint(uint64(resolved))
427 | good = true
428 | }
429 | case float64:
430 | if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
431 | out.SetUint(uint64(resolved))
432 | good = true
433 | }
434 | }
435 | case reflect.Bool:
436 | switch resolved := resolved.(type) {
437 | case bool:
438 | out.SetBool(resolved)
439 | good = true
440 | }
441 | case reflect.Float32, reflect.Float64:
442 | switch resolved := resolved.(type) {
443 | case int:
444 | out.SetFloat(float64(resolved))
445 | good = true
446 | case int64:
447 | out.SetFloat(float64(resolved))
448 | good = true
449 | case uint64:
450 | out.SetFloat(float64(resolved))
451 | good = true
452 | case float64:
453 | out.SetFloat(resolved)
454 | good = true
455 | }
456 | case reflect.Ptr:
457 | if out.Type().Elem() == reflect.TypeOf(resolved) {
458 | // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
459 | elem := reflect.New(out.Type().Elem())
460 | elem.Elem().Set(reflect.ValueOf(resolved))
461 | out.Set(elem)
462 | good = true
463 | }
464 | }
465 | if !good {
466 | d.terror(n, tag, out)
467 | }
468 | return good
469 | }
470 |
471 | func settableValueOf(i interface{}) reflect.Value {
472 | v := reflect.ValueOf(i)
473 | sv := reflect.New(v.Type()).Elem()
474 | sv.Set(v)
475 | return sv
476 | }
477 |
478 | func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
479 | l := len(n.children)
480 |
481 | var iface reflect.Value
482 | switch out.Kind() {
483 | case reflect.Slice:
484 | out.Set(reflect.MakeSlice(out.Type(), l, l))
485 | case reflect.Interface:
486 | // No type hints. Will have to use a generic sequence.
487 | iface = out
488 | out = settableValueOf(make([]interface{}, l))
489 | default:
490 | d.terror(n, yaml_SEQ_TAG, out)
491 | return false
492 | }
493 | et := out.Type().Elem()
494 |
495 | j := 0
496 | for i := 0; i < l; i++ {
497 | e := reflect.New(et).Elem()
498 | if ok := d.unmarshal(n.children[i], e); ok {
499 | out.Index(j).Set(e)
500 | j++
501 | }
502 | }
503 | out.Set(out.Slice(0, j))
504 | if iface.IsValid() {
505 | iface.Set(out)
506 | }
507 | return true
508 | }
509 |
510 | func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
511 | switch out.Kind() {
512 | case reflect.Struct:
513 | return d.mappingStruct(n, out)
514 | case reflect.Slice:
515 | return d.mappingSlice(n, out)
516 | case reflect.Map:
517 | // okay
518 | case reflect.Interface:
519 | if d.mapType.Kind() == reflect.Map {
520 | iface := out
521 | out = reflect.MakeMap(d.mapType)
522 | iface.Set(out)
523 | } else {
524 | slicev := reflect.New(d.mapType).Elem()
525 | if !d.mappingSlice(n, slicev) {
526 | return false
527 | }
528 | out.Set(slicev)
529 | return true
530 | }
531 | default:
532 | d.terror(n, yaml_MAP_TAG, out)
533 | return false
534 | }
535 | outt := out.Type()
536 | kt := outt.Key()
537 | et := outt.Elem()
538 |
539 | mapType := d.mapType
540 | if outt.Key() == ifaceType && outt.Elem() == ifaceType {
541 | d.mapType = outt
542 | }
543 |
544 | if out.IsNil() {
545 | out.Set(reflect.MakeMap(outt))
546 | }
547 | l := len(n.children)
548 | for i := 0; i < l; i += 2 {
549 | if isMerge(n.children[i]) {
550 | d.merge(n.children[i+1], out)
551 | continue
552 | }
553 | k := reflect.New(kt).Elem()
554 | if d.unmarshal(n.children[i], k) {
555 | kkind := k.Kind()
556 | if kkind == reflect.Interface {
557 | kkind = k.Elem().Kind()
558 | }
559 | if kkind == reflect.Map || kkind == reflect.Slice {
560 | failf("invalid map key: %#v", k.Interface())
561 | }
562 | e := reflect.New(et).Elem()
563 | if d.unmarshal(n.children[i+1], e) {
564 | out.SetMapIndex(k, e)
565 | }
566 | }
567 | }
568 | d.mapType = mapType
569 | return true
570 | }
571 |
572 | func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
573 | outt := out.Type()
574 | if outt.Elem() != mapItemType {
575 | d.terror(n, yaml_MAP_TAG, out)
576 | return false
577 | }
578 |
579 | mapType := d.mapType
580 | d.mapType = outt
581 |
582 | var slice []MapItem
583 | var l = len(n.children)
584 | for i := 0; i < l; i += 2 {
585 | if isMerge(n.children[i]) {
586 | d.merge(n.children[i+1], out)
587 | continue
588 | }
589 | item := MapItem{}
590 | k := reflect.ValueOf(&item.Key).Elem()
591 | if d.unmarshal(n.children[i], k) {
592 | v := reflect.ValueOf(&item.Value).Elem()
593 | if d.unmarshal(n.children[i+1], v) {
594 | slice = append(slice, item)
595 | }
596 | }
597 | }
598 | out.Set(reflect.ValueOf(slice))
599 | d.mapType = mapType
600 | return true
601 | }
602 |
603 | func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
604 | sinfo, err := getStructInfo(out.Type())
605 | if err != nil {
606 | panic(err)
607 | }
608 | name := settableValueOf("")
609 | l := len(n.children)
610 |
611 | var inlineMap reflect.Value
612 | var elemType reflect.Type
613 | if sinfo.InlineMap != -1 {
614 | inlineMap = out.Field(sinfo.InlineMap)
615 | inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
616 | elemType = inlineMap.Type().Elem()
617 | }
618 |
619 | for i := 0; i < l; i += 2 {
620 | ni := n.children[i]
621 | if isMerge(ni) {
622 | d.merge(n.children[i+1], out)
623 | continue
624 | }
625 | if !d.unmarshal(ni, name) {
626 | continue
627 | }
628 | if info, ok := sinfo.FieldsMap[name.String()]; ok {
629 | var field reflect.Value
630 | if info.Inline == nil {
631 | field = out.Field(info.Num)
632 | } else {
633 | field = out.FieldByIndex(info.Inline)
634 | }
635 | d.unmarshal(n.children[i+1], field)
636 | } else if sinfo.InlineMap != -1 {
637 | if inlineMap.IsNil() {
638 | inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
639 | }
640 | value := reflect.New(elemType).Elem()
641 | d.unmarshal(n.children[i+1], value)
642 | inlineMap.SetMapIndex(name, value)
643 | }
644 | }
645 | return true
646 | }
647 |
648 | func failWantMap() {
649 | failf("map merge requires map or sequence of maps as the value")
650 | }
651 |
652 | func (d *decoder) merge(n *node, out reflect.Value) {
653 | switch n.kind {
654 | case mappingNode:
655 | d.unmarshal(n, out)
656 | case aliasNode:
657 | an, ok := d.doc.anchors[n.value]
658 | if ok && an.kind != mappingNode {
659 | failWantMap()
660 | }
661 | d.unmarshal(n, out)
662 | case sequenceNode:
663 | // Step backwards as earlier nodes take precedence.
664 | for i := len(n.children) - 1; i >= 0; i-- {
665 | ni := n.children[i]
666 | if ni.kind == aliasNode {
667 | an, ok := d.doc.anchors[ni.value]
668 | if ok && an.kind != mappingNode {
669 | failWantMap()
670 | }
671 | } else if ni.kind != mappingNode {
672 | failWantMap()
673 | }
674 | d.unmarshal(ni, out)
675 | }
676 | default:
677 | failWantMap()
678 | }
679 | }
680 |
681 | func isMerge(n *node) bool {
682 | return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
683 | }
684 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/apic.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "io"
5 | "os"
6 | )
7 |
8 | func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
9 | //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
10 |
11 | // Check if we can move the queue at the beginning of the buffer.
12 | if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
13 | if parser.tokens_head != len(parser.tokens) {
14 | copy(parser.tokens, parser.tokens[parser.tokens_head:])
15 | }
16 | parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
17 | parser.tokens_head = 0
18 | }
19 | parser.tokens = append(parser.tokens, *token)
20 | if pos < 0 {
21 | return
22 | }
23 | copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
24 | parser.tokens[parser.tokens_head+pos] = *token
25 | }
26 |
27 | // Create a new parser object.
28 | func yaml_parser_initialize(parser *yaml_parser_t) bool {
29 | *parser = yaml_parser_t{
30 | raw_buffer: make([]byte, 0, input_raw_buffer_size),
31 | buffer: make([]byte, 0, input_buffer_size),
32 | }
33 | return true
34 | }
35 |
36 | // Destroy a parser object.
37 | func yaml_parser_delete(parser *yaml_parser_t) {
38 | *parser = yaml_parser_t{}
39 | }
40 |
41 | // String read handler.
42 | func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
43 | if parser.input_pos == len(parser.input) {
44 | return 0, io.EOF
45 | }
46 | n = copy(buffer, parser.input[parser.input_pos:])
47 | parser.input_pos += n
48 | return n, nil
49 | }
50 |
51 | // File read handler.
52 | func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
53 | return parser.input_file.Read(buffer)
54 | }
55 |
56 | // Set a string input.
57 | func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
58 | if parser.read_handler != nil {
59 | panic("must set the input source only once")
60 | }
61 | parser.read_handler = yaml_string_read_handler
62 | parser.input = input
63 | parser.input_pos = 0
64 | }
65 |
66 | // Set a file input.
67 | func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
68 | if parser.read_handler != nil {
69 | panic("must set the input source only once")
70 | }
71 | parser.read_handler = yaml_file_read_handler
72 | parser.input_file = file
73 | }
74 |
75 | // Set the source encoding.
76 | func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
77 | if parser.encoding != yaml_ANY_ENCODING {
78 | panic("must set the encoding only once")
79 | }
80 | parser.encoding = encoding
81 | }
82 |
83 | // Create a new emitter object.
84 | func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
85 | *emitter = yaml_emitter_t{
86 | buffer: make([]byte, output_buffer_size),
87 | raw_buffer: make([]byte, 0, output_raw_buffer_size),
88 | states: make([]yaml_emitter_state_t, 0, initial_stack_size),
89 | events: make([]yaml_event_t, 0, initial_queue_size),
90 | }
91 | return true
92 | }
93 |
94 | // Destroy an emitter object.
95 | func yaml_emitter_delete(emitter *yaml_emitter_t) {
96 | *emitter = yaml_emitter_t{}
97 | }
98 |
99 | // String write handler.
100 | func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
101 | *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
102 | return nil
103 | }
104 |
105 | // File write handler.
106 | func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
107 | _, err := emitter.output_file.Write(buffer)
108 | return err
109 | }
110 |
111 | // Set a string output.
112 | func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
113 | if emitter.write_handler != nil {
114 | panic("must set the output target only once")
115 | }
116 | emitter.write_handler = yaml_string_write_handler
117 | emitter.output_buffer = output_buffer
118 | }
119 |
120 | // Set a file output.
121 | func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
122 | if emitter.write_handler != nil {
123 | panic("must set the output target only once")
124 | }
125 | emitter.write_handler = yaml_file_write_handler
126 | emitter.output_file = file
127 | }
128 |
129 | // Set the output encoding.
130 | func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
131 | if emitter.encoding != yaml_ANY_ENCODING {
132 | panic("must set the output encoding only once")
133 | }
134 | emitter.encoding = encoding
135 | }
136 |
137 | // Set the canonical output style.
138 | func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
139 | emitter.canonical = canonical
140 | }
141 |
142 | //// Set the indentation increment.
143 | func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
144 | if indent < 2 || indent > 9 {
145 | indent = 2
146 | }
147 | emitter.best_indent = indent
148 | }
149 |
150 | // Set the preferred line width.
151 | func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
152 | if width < 0 {
153 | width = -1
154 | }
155 | emitter.best_width = width
156 | }
157 |
158 | // Set if unescaped non-ASCII characters are allowed.
159 | func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
160 | emitter.unicode = unicode
161 | }
162 |
163 | // Set the preferred line break character.
164 | func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
165 | emitter.line_break = line_break
166 | }
167 |
168 | ///*
169 | // * Destroy a token object.
170 | // */
171 | //
172 | //YAML_DECLARE(void)
173 | //yaml_token_delete(yaml_token_t *token)
174 | //{
175 | // assert(token); // Non-NULL token object expected.
176 | //
177 | // switch (token.type)
178 | // {
179 | // case YAML_TAG_DIRECTIVE_TOKEN:
180 | // yaml_free(token.data.tag_directive.handle);
181 | // yaml_free(token.data.tag_directive.prefix);
182 | // break;
183 | //
184 | // case YAML_ALIAS_TOKEN:
185 | // yaml_free(token.data.alias.value);
186 | // break;
187 | //
188 | // case YAML_ANCHOR_TOKEN:
189 | // yaml_free(token.data.anchor.value);
190 | // break;
191 | //
192 | // case YAML_TAG_TOKEN:
193 | // yaml_free(token.data.tag.handle);
194 | // yaml_free(token.data.tag.suffix);
195 | // break;
196 | //
197 | // case YAML_SCALAR_TOKEN:
198 | // yaml_free(token.data.scalar.value);
199 | // break;
200 | //
201 | // default:
202 | // break;
203 | // }
204 | //
205 | // memset(token, 0, sizeof(yaml_token_t));
206 | //}
207 | //
208 | ///*
209 | // * Check if a string is a valid UTF-8 sequence.
210 | // *
211 | // * Check 'reader.c' for more details on UTF-8 encoding.
212 | // */
213 | //
214 | //static int
215 | //yaml_check_utf8(yaml_char_t *start, size_t length)
216 | //{
217 | // yaml_char_t *end = start+length;
218 | // yaml_char_t *pointer = start;
219 | //
220 | // while (pointer < end) {
221 | // unsigned char octet;
222 | // unsigned int width;
223 | // unsigned int value;
224 | // size_t k;
225 | //
226 | // octet = pointer[0];
227 | // width = (octet & 0x80) == 0x00 ? 1 :
228 | // (octet & 0xE0) == 0xC0 ? 2 :
229 | // (octet & 0xF0) == 0xE0 ? 3 :
230 | // (octet & 0xF8) == 0xF0 ? 4 : 0;
231 | // value = (octet & 0x80) == 0x00 ? octet & 0x7F :
232 | // (octet & 0xE0) == 0xC0 ? octet & 0x1F :
233 | // (octet & 0xF0) == 0xE0 ? octet & 0x0F :
234 | // (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
235 | // if (!width) return 0;
236 | // if (pointer+width > end) return 0;
237 | // for (k = 1; k < width; k ++) {
238 | // octet = pointer[k];
239 | // if ((octet & 0xC0) != 0x80) return 0;
240 | // value = (value << 6) + (octet & 0x3F);
241 | // }
242 | // if (!((width == 1) ||
243 | // (width == 2 && value >= 0x80) ||
244 | // (width == 3 && value >= 0x800) ||
245 | // (width == 4 && value >= 0x10000))) return 0;
246 | //
247 | // pointer += width;
248 | // }
249 | //
250 | // return 1;
251 | //}
252 | //
253 |
254 | // Create STREAM-START.
255 | func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
256 | *event = yaml_event_t{
257 | typ: yaml_STREAM_START_EVENT,
258 | encoding: encoding,
259 | }
260 | return true
261 | }
262 |
263 | // Create STREAM-END.
264 | func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
265 | *event = yaml_event_t{
266 | typ: yaml_STREAM_END_EVENT,
267 | }
268 | return true
269 | }
270 |
271 | // Create DOCUMENT-START.
272 | func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
273 | tag_directives []yaml_tag_directive_t, implicit bool) bool {
274 | *event = yaml_event_t{
275 | typ: yaml_DOCUMENT_START_EVENT,
276 | version_directive: version_directive,
277 | tag_directives: tag_directives,
278 | implicit: implicit,
279 | }
280 | return true
281 | }
282 |
283 | // Create DOCUMENT-END.
284 | func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
285 | *event = yaml_event_t{
286 | typ: yaml_DOCUMENT_END_EVENT,
287 | implicit: implicit,
288 | }
289 | return true
290 | }
291 |
292 | ///*
293 | // * Create ALIAS.
294 | // */
295 | //
296 | //YAML_DECLARE(int)
297 | //yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
298 | //{
299 | // mark yaml_mark_t = { 0, 0, 0 }
300 | // anchor_copy *yaml_char_t = NULL
301 | //
302 | // assert(event) // Non-NULL event object is expected.
303 | // assert(anchor) // Non-NULL anchor is expected.
304 | //
305 | // if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
306 | //
307 | // anchor_copy = yaml_strdup(anchor)
308 | // if (!anchor_copy)
309 | // return 0
310 | //
311 | // ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
312 | //
313 | // return 1
314 | //}
315 |
316 | // Create SCALAR.
317 | func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
318 | *event = yaml_event_t{
319 | typ: yaml_SCALAR_EVENT,
320 | anchor: anchor,
321 | tag: tag,
322 | value: value,
323 | implicit: plain_implicit,
324 | quoted_implicit: quoted_implicit,
325 | style: yaml_style_t(style),
326 | }
327 | return true
328 | }
329 |
330 | // Create SEQUENCE-START.
331 | func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
332 | *event = yaml_event_t{
333 | typ: yaml_SEQUENCE_START_EVENT,
334 | anchor: anchor,
335 | tag: tag,
336 | implicit: implicit,
337 | style: yaml_style_t(style),
338 | }
339 | return true
340 | }
341 |
342 | // Create SEQUENCE-END.
343 | func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
344 | *event = yaml_event_t{
345 | typ: yaml_SEQUENCE_END_EVENT,
346 | }
347 | return true
348 | }
349 |
350 | // Create MAPPING-START.
351 | func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
352 | *event = yaml_event_t{
353 | typ: yaml_MAPPING_START_EVENT,
354 | anchor: anchor,
355 | tag: tag,
356 | implicit: implicit,
357 | style: yaml_style_t(style),
358 | }
359 | return true
360 | }
361 |
362 | // Create MAPPING-END.
363 | func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
364 | *event = yaml_event_t{
365 | typ: yaml_MAPPING_END_EVENT,
366 | }
367 | return true
368 | }
369 |
370 | // Destroy an event object.
371 | func yaml_event_delete(event *yaml_event_t) {
372 | *event = yaml_event_t{}
373 | }
374 |
375 | ///*
376 | // * Create a document object.
377 | // */
378 | //
379 | //YAML_DECLARE(int)
380 | //yaml_document_initialize(document *yaml_document_t,
381 | // version_directive *yaml_version_directive_t,
382 | // tag_directives_start *yaml_tag_directive_t,
383 | // tag_directives_end *yaml_tag_directive_t,
384 | // start_implicit int, end_implicit int)
385 | //{
386 | // struct {
387 | // error yaml_error_type_t
388 | // } context
389 | // struct {
390 | // start *yaml_node_t
391 | // end *yaml_node_t
392 | // top *yaml_node_t
393 | // } nodes = { NULL, NULL, NULL }
394 | // version_directive_copy *yaml_version_directive_t = NULL
395 | // struct {
396 | // start *yaml_tag_directive_t
397 | // end *yaml_tag_directive_t
398 | // top *yaml_tag_directive_t
399 | // } tag_directives_copy = { NULL, NULL, NULL }
400 | // value yaml_tag_directive_t = { NULL, NULL }
401 | // mark yaml_mark_t = { 0, 0, 0 }
402 | //
403 | // assert(document) // Non-NULL document object is expected.
404 | // assert((tag_directives_start && tag_directives_end) ||
405 | // (tag_directives_start == tag_directives_end))
406 | // // Valid tag directives are expected.
407 | //
408 | // if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
409 | //
410 | // if (version_directive) {
411 | // version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
412 | // if (!version_directive_copy) goto error
413 | // version_directive_copy.major = version_directive.major
414 | // version_directive_copy.minor = version_directive.minor
415 | // }
416 | //
417 | // if (tag_directives_start != tag_directives_end) {
418 | // tag_directive *yaml_tag_directive_t
419 | // if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
420 | // goto error
421 | // for (tag_directive = tag_directives_start
422 | // tag_directive != tag_directives_end; tag_directive ++) {
423 | // assert(tag_directive.handle)
424 | // assert(tag_directive.prefix)
425 | // if (!yaml_check_utf8(tag_directive.handle,
426 | // strlen((char *)tag_directive.handle)))
427 | // goto error
428 | // if (!yaml_check_utf8(tag_directive.prefix,
429 | // strlen((char *)tag_directive.prefix)))
430 | // goto error
431 | // value.handle = yaml_strdup(tag_directive.handle)
432 | // value.prefix = yaml_strdup(tag_directive.prefix)
433 | // if (!value.handle || !value.prefix) goto error
434 | // if (!PUSH(&context, tag_directives_copy, value))
435 | // goto error
436 | // value.handle = NULL
437 | // value.prefix = NULL
438 | // }
439 | // }
440 | //
441 | // DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
442 | // tag_directives_copy.start, tag_directives_copy.top,
443 | // start_implicit, end_implicit, mark, mark)
444 | //
445 | // return 1
446 | //
447 | //error:
448 | // STACK_DEL(&context, nodes)
449 | // yaml_free(version_directive_copy)
450 | // while (!STACK_EMPTY(&context, tag_directives_copy)) {
451 | // value yaml_tag_directive_t = POP(&context, tag_directives_copy)
452 | // yaml_free(value.handle)
453 | // yaml_free(value.prefix)
454 | // }
455 | // STACK_DEL(&context, tag_directives_copy)
456 | // yaml_free(value.handle)
457 | // yaml_free(value.prefix)
458 | //
459 | // return 0
460 | //}
461 | //
462 | ///*
463 | // * Destroy a document object.
464 | // */
465 | //
466 | //YAML_DECLARE(void)
467 | //yaml_document_delete(document *yaml_document_t)
468 | //{
469 | // struct {
470 | // error yaml_error_type_t
471 | // } context
472 | // tag_directive *yaml_tag_directive_t
473 | //
474 | // context.error = YAML_NO_ERROR // Eliminate a compliler warning.
475 | //
476 | // assert(document) // Non-NULL document object is expected.
477 | //
478 | // while (!STACK_EMPTY(&context, document.nodes)) {
479 | // node yaml_node_t = POP(&context, document.nodes)
480 | // yaml_free(node.tag)
481 | // switch (node.type) {
482 | // case YAML_SCALAR_NODE:
483 | // yaml_free(node.data.scalar.value)
484 | // break
485 | // case YAML_SEQUENCE_NODE:
486 | // STACK_DEL(&context, node.data.sequence.items)
487 | // break
488 | // case YAML_MAPPING_NODE:
489 | // STACK_DEL(&context, node.data.mapping.pairs)
490 | // break
491 | // default:
492 | // assert(0) // Should not happen.
493 | // }
494 | // }
495 | // STACK_DEL(&context, document.nodes)
496 | //
497 | // yaml_free(document.version_directive)
498 | // for (tag_directive = document.tag_directives.start
499 | // tag_directive != document.tag_directives.end
500 | // tag_directive++) {
501 | // yaml_free(tag_directive.handle)
502 | // yaml_free(tag_directive.prefix)
503 | // }
504 | // yaml_free(document.tag_directives.start)
505 | //
506 | // memset(document, 0, sizeof(yaml_document_t))
507 | //}
508 | //
509 | ///**
510 | // * Get a document node.
511 | // */
512 | //
513 | //YAML_DECLARE(yaml_node_t *)
514 | //yaml_document_get_node(document *yaml_document_t, index int)
515 | //{
516 | // assert(document) // Non-NULL document object is expected.
517 | //
518 | // if (index > 0 && document.nodes.start + index <= document.nodes.top) {
519 | // return document.nodes.start + index - 1
520 | // }
521 | // return NULL
522 | //}
523 | //
524 | ///**
525 | // * Get the root object.
526 | // */
527 | //
528 | //YAML_DECLARE(yaml_node_t *)
529 | //yaml_document_get_root_node(document *yaml_document_t)
530 | //{
531 | // assert(document) // Non-NULL document object is expected.
532 | //
533 | // if (document.nodes.top != document.nodes.start) {
534 | // return document.nodes.start
535 | // }
536 | // return NULL
537 | //}
538 | //
539 | ///*
540 | // * Add a scalar node to a document.
541 | // */
542 | //
543 | //YAML_DECLARE(int)
544 | //yaml_document_add_scalar(document *yaml_document_t,
545 | // tag *yaml_char_t, value *yaml_char_t, length int,
546 | // style yaml_scalar_style_t)
547 | //{
548 | // struct {
549 | // error yaml_error_type_t
550 | // } context
551 | // mark yaml_mark_t = { 0, 0, 0 }
552 | // tag_copy *yaml_char_t = NULL
553 | // value_copy *yaml_char_t = NULL
554 | // node yaml_node_t
555 | //
556 | // assert(document) // Non-NULL document object is expected.
557 | // assert(value) // Non-NULL value is expected.
558 | //
559 | // if (!tag) {
560 | // tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
561 | // }
562 | //
563 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
564 | // tag_copy = yaml_strdup(tag)
565 | // if (!tag_copy) goto error
566 | //
567 | // if (length < 0) {
568 | // length = strlen((char *)value)
569 | // }
570 | //
571 | // if (!yaml_check_utf8(value, length)) goto error
572 | // value_copy = yaml_malloc(length+1)
573 | // if (!value_copy) goto error
574 | // memcpy(value_copy, value, length)
575 | // value_copy[length] = '\0'
576 | //
577 | // SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
578 | // if (!PUSH(&context, document.nodes, node)) goto error
579 | //
580 | // return document.nodes.top - document.nodes.start
581 | //
582 | //error:
583 | // yaml_free(tag_copy)
584 | // yaml_free(value_copy)
585 | //
586 | // return 0
587 | //}
588 | //
589 | ///*
590 | // * Add a sequence node to a document.
591 | // */
592 | //
593 | //YAML_DECLARE(int)
594 | //yaml_document_add_sequence(document *yaml_document_t,
595 | // tag *yaml_char_t, style yaml_sequence_style_t)
596 | //{
597 | // struct {
598 | // error yaml_error_type_t
599 | // } context
600 | // mark yaml_mark_t = { 0, 0, 0 }
601 | // tag_copy *yaml_char_t = NULL
602 | // struct {
603 | // start *yaml_node_item_t
604 | // end *yaml_node_item_t
605 | // top *yaml_node_item_t
606 | // } items = { NULL, NULL, NULL }
607 | // node yaml_node_t
608 | //
609 | // assert(document) // Non-NULL document object is expected.
610 | //
611 | // if (!tag) {
612 | // tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
613 | // }
614 | //
615 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
616 | // tag_copy = yaml_strdup(tag)
617 | // if (!tag_copy) goto error
618 | //
619 | // if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
620 | //
621 | // SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
622 | // style, mark, mark)
623 | // if (!PUSH(&context, document.nodes, node)) goto error
624 | //
625 | // return document.nodes.top - document.nodes.start
626 | //
627 | //error:
628 | // STACK_DEL(&context, items)
629 | // yaml_free(tag_copy)
630 | //
631 | // return 0
632 | //}
633 | //
634 | ///*
635 | // * Add a mapping node to a document.
636 | // */
637 | //
638 | //YAML_DECLARE(int)
639 | //yaml_document_add_mapping(document *yaml_document_t,
640 | // tag *yaml_char_t, style yaml_mapping_style_t)
641 | //{
642 | // struct {
643 | // error yaml_error_type_t
644 | // } context
645 | // mark yaml_mark_t = { 0, 0, 0 }
646 | // tag_copy *yaml_char_t = NULL
647 | // struct {
648 | // start *yaml_node_pair_t
649 | // end *yaml_node_pair_t
650 | // top *yaml_node_pair_t
651 | // } pairs = { NULL, NULL, NULL }
652 | // node yaml_node_t
653 | //
654 | // assert(document) // Non-NULL document object is expected.
655 | //
656 | // if (!tag) {
657 | // tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
658 | // }
659 | //
660 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
661 | // tag_copy = yaml_strdup(tag)
662 | // if (!tag_copy) goto error
663 | //
664 | // if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
665 | //
666 | // MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
667 | // style, mark, mark)
668 | // if (!PUSH(&context, document.nodes, node)) goto error
669 | //
670 | // return document.nodes.top - document.nodes.start
671 | //
672 | //error:
673 | // STACK_DEL(&context, pairs)
674 | // yaml_free(tag_copy)
675 | //
676 | // return 0
677 | //}
678 | //
679 | ///*
680 | // * Append an item to a sequence node.
681 | // */
682 | //
683 | //YAML_DECLARE(int)
684 | //yaml_document_append_sequence_item(document *yaml_document_t,
685 | // sequence int, item int)
686 | //{
687 | // struct {
688 | // error yaml_error_type_t
689 | // } context
690 | //
691 | // assert(document) // Non-NULL document is required.
692 | // assert(sequence > 0
693 | // && document.nodes.start + sequence <= document.nodes.top)
694 | // // Valid sequence id is required.
695 | // assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
696 | // // A sequence node is required.
697 | // assert(item > 0 && document.nodes.start + item <= document.nodes.top)
698 | // // Valid item id is required.
699 | //
700 | // if (!PUSH(&context,
701 | // document.nodes.start[sequence-1].data.sequence.items, item))
702 | // return 0
703 | //
704 | // return 1
705 | //}
706 | //
707 | ///*
708 | // * Append a pair of a key and a value to a mapping node.
709 | // */
710 | //
711 | //YAML_DECLARE(int)
712 | //yaml_document_append_mapping_pair(document *yaml_document_t,
713 | // mapping int, key int, value int)
714 | //{
715 | // struct {
716 | // error yaml_error_type_t
717 | // } context
718 | //
719 | // pair yaml_node_pair_t
720 | //
721 | // assert(document) // Non-NULL document is required.
722 | // assert(mapping > 0
723 | // && document.nodes.start + mapping <= document.nodes.top)
724 | // // Valid mapping id is required.
725 | // assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
726 | // // A mapping node is required.
727 | // assert(key > 0 && document.nodes.start + key <= document.nodes.top)
728 | // // Valid key id is required.
729 | // assert(value > 0 && document.nodes.start + value <= document.nodes.top)
730 | // // Valid value id is required.
731 | //
732 | // pair.key = key
733 | // pair.value = value
734 | //
735 | // if (!PUSH(&context,
736 | // document.nodes.start[mapping-1].data.mapping.pairs, pair))
737 | // return 0
738 | //
739 | // return 1
740 | //}
741 | //
742 | //
743 |
--------------------------------------------------------------------------------
/mattertee/vendor/gopkg.in/yaml.v2/yamlh.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "io"
5 | )
6 |
7 | // The version directive data.
8 | type yaml_version_directive_t struct {
9 | major int8 // The major version number.
10 | minor int8 // The minor version number.
11 | }
12 |
13 | // The tag directive data.
14 | type yaml_tag_directive_t struct {
15 | handle []byte // The tag handle.
16 | prefix []byte // The tag prefix.
17 | }
18 |
19 | type yaml_encoding_t int
20 |
21 | // The stream encoding.
22 | const (
23 | // Let the parser choose the encoding.
24 | yaml_ANY_ENCODING yaml_encoding_t = iota
25 |
26 | yaml_UTF8_ENCODING // The default UTF-8 encoding.
27 | yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
28 | yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
29 | )
30 |
31 | type yaml_break_t int
32 |
33 | // Line break types.
34 | const (
35 | // Let the parser choose the break type.
36 | yaml_ANY_BREAK yaml_break_t = iota
37 |
38 | yaml_CR_BREAK // Use CR for line breaks (Mac style).
39 | yaml_LN_BREAK // Use LN for line breaks (Unix style).
40 | yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
41 | )
42 |
43 | type yaml_error_type_t int
44 |
45 | // Many bad things could happen with the parser and emitter.
46 | const (
47 | // No error is produced.
48 | yaml_NO_ERROR yaml_error_type_t = iota
49 |
50 | yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
51 | yaml_READER_ERROR // Cannot read or decode the input stream.
52 | yaml_SCANNER_ERROR // Cannot scan the input stream.
53 | yaml_PARSER_ERROR // Cannot parse the input stream.
54 | yaml_COMPOSER_ERROR // Cannot compose a YAML document.
55 | yaml_WRITER_ERROR // Cannot write to the output stream.
56 | yaml_EMITTER_ERROR // Cannot emit a YAML stream.
57 | )
58 |
59 | // The pointer position.
60 | type yaml_mark_t struct {
61 | index int // The position index.
62 | line int // The position line.
63 | column int // The position column.
64 | }
65 |
66 | // Node Styles
67 |
68 | type yaml_style_t int8
69 |
70 | type yaml_scalar_style_t yaml_style_t
71 |
72 | // Scalar styles.
73 | const (
74 | // Let the emitter choose the style.
75 | yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
76 |
77 | yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
78 | yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
79 | yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
80 | yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
81 | yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
82 | )
83 |
84 | type yaml_sequence_style_t yaml_style_t
85 |
86 | // Sequence styles.
87 | const (
88 | // Let the emitter choose the style.
89 | yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
90 |
91 | yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
92 | yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
93 | )
94 |
95 | type yaml_mapping_style_t yaml_style_t
96 |
97 | // Mapping styles.
98 | const (
99 | // Let the emitter choose the style.
100 | yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
101 |
102 | yaml_BLOCK_MAPPING_STYLE // The block mapping style.
103 | yaml_FLOW_MAPPING_STYLE // The flow mapping style.
104 | )
105 |
106 | // Tokens
107 |
108 | type yaml_token_type_t int
109 |
110 | // Token types.
111 | const (
112 | // An empty token.
113 | yaml_NO_TOKEN yaml_token_type_t = iota
114 |
115 | yaml_STREAM_START_TOKEN // A STREAM-START token.
116 | yaml_STREAM_END_TOKEN // A STREAM-END token.
117 |
118 | yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
119 | yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
120 | yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
121 | yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
122 |
123 | yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
124 | yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
125 | yaml_BLOCK_END_TOKEN // A BLOCK-END token.
126 |
127 | yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
128 | yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
129 | yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
130 | yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
131 |
132 | yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
133 | yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
134 | yaml_KEY_TOKEN // A KEY token.
135 | yaml_VALUE_TOKEN // A VALUE token.
136 |
137 | yaml_ALIAS_TOKEN // An ALIAS token.
138 | yaml_ANCHOR_TOKEN // An ANCHOR token.
139 | yaml_TAG_TOKEN // A TAG token.
140 | yaml_SCALAR_TOKEN // A SCALAR token.
141 | )
142 |
143 | func (tt yaml_token_type_t) String() string {
144 | switch tt {
145 | case yaml_NO_TOKEN:
146 | return "yaml_NO_TOKEN"
147 | case yaml_STREAM_START_TOKEN:
148 | return "yaml_STREAM_START_TOKEN"
149 | case yaml_STREAM_END_TOKEN:
150 | return "yaml_STREAM_END_TOKEN"
151 | case yaml_VERSION_DIRECTIVE_TOKEN:
152 | return "yaml_VERSION_DIRECTIVE_TOKEN"
153 | case yaml_TAG_DIRECTIVE_TOKEN:
154 | return "yaml_TAG_DIRECTIVE_TOKEN"
155 | case yaml_DOCUMENT_START_TOKEN:
156 | return "yaml_DOCUMENT_START_TOKEN"
157 | case yaml_DOCUMENT_END_TOKEN:
158 | return "yaml_DOCUMENT_END_TOKEN"
159 | case yaml_BLOCK_SEQUENCE_START_TOKEN:
160 | return "yaml_BLOCK_SEQUENCE_START_TOKEN"
161 | case yaml_BLOCK_MAPPING_START_TOKEN:
162 | return "yaml_BLOCK_MAPPING_START_TOKEN"
163 | case yaml_BLOCK_END_TOKEN:
164 | return "yaml_BLOCK_END_TOKEN"
165 | case yaml_FLOW_SEQUENCE_START_TOKEN:
166 | return "yaml_FLOW_SEQUENCE_START_TOKEN"
167 | case yaml_FLOW_SEQUENCE_END_TOKEN:
168 | return "yaml_FLOW_SEQUENCE_END_TOKEN"
169 | case yaml_FLOW_MAPPING_START_TOKEN:
170 | return "yaml_FLOW_MAPPING_START_TOKEN"
171 | case yaml_FLOW_MAPPING_END_TOKEN:
172 | return "yaml_FLOW_MAPPING_END_TOKEN"
173 | case yaml_BLOCK_ENTRY_TOKEN:
174 | return "yaml_BLOCK_ENTRY_TOKEN"
175 | case yaml_FLOW_ENTRY_TOKEN:
176 | return "yaml_FLOW_ENTRY_TOKEN"
177 | case yaml_KEY_TOKEN:
178 | return "yaml_KEY_TOKEN"
179 | case yaml_VALUE_TOKEN:
180 | return "yaml_VALUE_TOKEN"
181 | case yaml_ALIAS_TOKEN:
182 | return "yaml_ALIAS_TOKEN"
183 | case yaml_ANCHOR_TOKEN:
184 | return "yaml_ANCHOR_TOKEN"
185 | case yaml_TAG_TOKEN:
186 | return "yaml_TAG_TOKEN"
187 | case yaml_SCALAR_TOKEN:
188 | return "yaml_SCALAR_TOKEN"
189 | }
190 | return ""
191 | }
192 |
193 | // The token structure.
194 | type yaml_token_t struct {
195 | // The token type.
196 | typ yaml_token_type_t
197 |
198 | // The start/end of the token.
199 | start_mark, end_mark yaml_mark_t
200 |
201 | // The stream encoding (for yaml_STREAM_START_TOKEN).
202 | encoding yaml_encoding_t
203 |
204 | // The alias/anchor/scalar value or tag/tag directive handle
205 | // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
206 | value []byte
207 |
208 | // The tag suffix (for yaml_TAG_TOKEN).
209 | suffix []byte
210 |
211 | // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
212 | prefix []byte
213 |
214 | // The scalar style (for yaml_SCALAR_TOKEN).
215 | style yaml_scalar_style_t
216 |
217 | // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
218 | major, minor int8
219 | }
220 |
221 | // Events
222 |
223 | type yaml_event_type_t int8
224 |
225 | // Event types.
226 | const (
227 | // An empty event.
228 | yaml_NO_EVENT yaml_event_type_t = iota
229 |
230 | yaml_STREAM_START_EVENT // A STREAM-START event.
231 | yaml_STREAM_END_EVENT // A STREAM-END event.
232 | yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
233 | yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
234 | yaml_ALIAS_EVENT // An ALIAS event.
235 | yaml_SCALAR_EVENT // A SCALAR event.
236 | yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
237 | yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
238 | yaml_MAPPING_START_EVENT // A MAPPING-START event.
239 | yaml_MAPPING_END_EVENT // A MAPPING-END event.
240 | )
241 |
242 | // The event structure.
243 | type yaml_event_t struct {
244 |
245 | // The event type.
246 | typ yaml_event_type_t
247 |
248 | // The start and end of the event.
249 | start_mark, end_mark yaml_mark_t
250 |
251 | // The document encoding (for yaml_STREAM_START_EVENT).
252 | encoding yaml_encoding_t
253 |
254 | // The version directive (for yaml_DOCUMENT_START_EVENT).
255 | version_directive *yaml_version_directive_t
256 |
257 | // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
258 | tag_directives []yaml_tag_directive_t
259 |
260 | // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
261 | anchor []byte
262 |
263 | // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
264 | tag []byte
265 |
266 | // The scalar value (for yaml_SCALAR_EVENT).
267 | value []byte
268 |
269 | // Is the document start/end indicator implicit, or the tag optional?
270 | // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
271 | implicit bool
272 |
273 | // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
274 | quoted_implicit bool
275 |
276 | // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
277 | style yaml_style_t
278 | }
279 |
280 | func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
281 | func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
282 | func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
283 |
284 | // Nodes
285 |
286 | const (
287 | yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
288 | yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
289 | yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
290 | yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
291 | yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
292 | yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
293 |
294 | yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
295 | yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
296 |
297 | // Not in original libyaml.
298 | yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
299 | yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
300 |
301 | yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
302 | yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
303 | yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
304 | )
305 |
306 | type yaml_node_type_t int
307 |
308 | // Node types.
309 | const (
310 | // An empty node.
311 | yaml_NO_NODE yaml_node_type_t = iota
312 |
313 | yaml_SCALAR_NODE // A scalar node.
314 | yaml_SEQUENCE_NODE // A sequence node.
315 | yaml_MAPPING_NODE // A mapping node.
316 | )
317 |
318 | // An element of a sequence node.
319 | type yaml_node_item_t int
320 |
321 | // An element of a mapping node.
322 | type yaml_node_pair_t struct {
323 | key int // The key of the element.
324 | value int // The value of the element.
325 | }
326 |
327 | // The node structure.
328 | type yaml_node_t struct {
329 | typ yaml_node_type_t // The node type.
330 | tag []byte // The node tag.
331 |
332 | // The node data.
333 |
334 | // The scalar parameters (for yaml_SCALAR_NODE).
335 | scalar struct {
336 | value []byte // The scalar value.
337 | length int // The length of the scalar value.
338 | style yaml_scalar_style_t // The scalar style.
339 | }
340 |
341 | // The sequence parameters (for YAML_SEQUENCE_NODE).
342 | sequence struct {
343 | items_data []yaml_node_item_t // The stack of sequence items.
344 | style yaml_sequence_style_t // The sequence style.
345 | }
346 |
347 | // The mapping parameters (for yaml_MAPPING_NODE).
348 | mapping struct {
349 | pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
350 | pairs_start *yaml_node_pair_t // The beginning of the stack.
351 | pairs_end *yaml_node_pair_t // The end of the stack.
352 | pairs_top *yaml_node_pair_t // The top of the stack.
353 | style yaml_mapping_style_t // The mapping style.
354 | }
355 |
356 | start_mark yaml_mark_t // The beginning of the node.
357 | end_mark yaml_mark_t // The end of the node.
358 |
359 | }
360 |
361 | // The document structure.
362 | type yaml_document_t struct {
363 |
364 | // The document nodes.
365 | nodes []yaml_node_t
366 |
367 | // The version directive.
368 | version_directive *yaml_version_directive_t
369 |
370 | // The list of tag directives.
371 | tag_directives_data []yaml_tag_directive_t
372 | tag_directives_start int // The beginning of the tag directives list.
373 | tag_directives_end int // The end of the tag directives list.
374 |
375 | start_implicit int // Is the document start indicator implicit?
376 | end_implicit int // Is the document end indicator implicit?
377 |
378 | // The start/end of the document.
379 | start_mark, end_mark yaml_mark_t
380 | }
381 |
382 | // The prototype of a read handler.
383 | //
384 | // The read handler is called when the parser needs to read more bytes from the
385 | // source. The handler should write not more than size bytes to the buffer.
386 | // The number of written bytes should be set to the size_read variable.
387 | //
388 | // [in,out] data A pointer to an application data specified by
389 | // yaml_parser_set_input().
390 | // [out] buffer The buffer to write the data from the source.
391 | // [in] size The size of the buffer.
392 | // [out] size_read The actual number of bytes read from the source.
393 | //
394 | // On success, the handler should return 1. If the handler failed,
395 | // the returned value should be 0. On EOF, the handler should set the
396 | // size_read to 0 and return 1.
397 | type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
398 |
399 | // This structure holds information about a potential simple key.
400 | type yaml_simple_key_t struct {
401 | possible bool // Is a simple key possible?
402 | required bool // Is a simple key required?
403 | token_number int // The number of the token.
404 | mark yaml_mark_t // The position mark.
405 | }
406 |
407 | // The states of the parser.
408 | type yaml_parser_state_t int
409 |
410 | const (
411 | yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
412 |
413 | yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
414 | yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
415 | yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
416 | yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
417 | yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
418 | yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
419 | yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
420 | yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
421 | yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
422 | yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
423 | yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
424 | yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
425 | yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
426 | yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
427 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
428 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
429 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
430 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
431 | yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
432 | yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
433 | yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
434 | yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
435 | yaml_PARSE_END_STATE // Expect nothing.
436 | )
437 |
438 | func (ps yaml_parser_state_t) String() string {
439 | switch ps {
440 | case yaml_PARSE_STREAM_START_STATE:
441 | return "yaml_PARSE_STREAM_START_STATE"
442 | case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
443 | return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
444 | case yaml_PARSE_DOCUMENT_START_STATE:
445 | return "yaml_PARSE_DOCUMENT_START_STATE"
446 | case yaml_PARSE_DOCUMENT_CONTENT_STATE:
447 | return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
448 | case yaml_PARSE_DOCUMENT_END_STATE:
449 | return "yaml_PARSE_DOCUMENT_END_STATE"
450 | case yaml_PARSE_BLOCK_NODE_STATE:
451 | return "yaml_PARSE_BLOCK_NODE_STATE"
452 | case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
453 | return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
454 | case yaml_PARSE_FLOW_NODE_STATE:
455 | return "yaml_PARSE_FLOW_NODE_STATE"
456 | case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
457 | return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
458 | case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
459 | return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
460 | case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
461 | return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
462 | case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
463 | return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
464 | case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
465 | return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
466 | case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
467 | return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
468 | case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
469 | return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
470 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
471 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
472 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
473 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
474 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
475 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
476 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
477 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
478 | case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
479 | return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
480 | case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
481 | return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
482 | case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
483 | return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
484 | case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
485 | return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
486 | case yaml_PARSE_END_STATE:
487 | return "yaml_PARSE_END_STATE"
488 | }
489 | return ""
490 | }
491 |
492 | // This structure holds aliases data.
493 | type yaml_alias_data_t struct {
494 | anchor []byte // The anchor.
495 | index int // The node id.
496 | mark yaml_mark_t // The anchor mark.
497 | }
498 |
499 | // The parser structure.
500 | //
501 | // All members are internal. Manage the structure using the
502 | // yaml_parser_ family of functions.
503 | type yaml_parser_t struct {
504 |
505 | // Error handling
506 |
507 | error yaml_error_type_t // Error type.
508 |
509 | problem string // Error description.
510 |
511 | // The byte about which the problem occured.
512 | problem_offset int
513 | problem_value int
514 | problem_mark yaml_mark_t
515 |
516 | // The error context.
517 | context string
518 | context_mark yaml_mark_t
519 |
520 | // Reader stuff
521 |
522 | read_handler yaml_read_handler_t // Read handler.
523 |
524 | input_file io.Reader // File input data.
525 | input []byte // String input data.
526 | input_pos int
527 |
528 | eof bool // EOF flag
529 |
530 | buffer []byte // The working buffer.
531 | buffer_pos int // The current position of the buffer.
532 |
533 | unread int // The number of unread characters in the buffer.
534 |
535 | raw_buffer []byte // The raw buffer.
536 | raw_buffer_pos int // The current position of the buffer.
537 |
538 | encoding yaml_encoding_t // The input encoding.
539 |
540 | offset int // The offset of the current position (in bytes).
541 | mark yaml_mark_t // The mark of the current position.
542 |
543 | // Scanner stuff
544 |
545 | stream_start_produced bool // Have we started to scan the input stream?
546 | stream_end_produced bool // Have we reached the end of the input stream?
547 |
548 | flow_level int // The number of unclosed '[' and '{' indicators.
549 |
550 | tokens []yaml_token_t // The tokens queue.
551 | tokens_head int // The head of the tokens queue.
552 | tokens_parsed int // The number of tokens fetched from the queue.
553 | token_available bool // Does the tokens queue contain a token ready for dequeueing.
554 |
555 | indent int // The current indentation level.
556 | indents []int // The indentation levels stack.
557 |
558 | simple_key_allowed bool // May a simple key occur at the current position?
559 | simple_keys []yaml_simple_key_t // The stack of simple keys.
560 |
561 | // Parser stuff
562 |
563 | state yaml_parser_state_t // The current parser state.
564 | states []yaml_parser_state_t // The parser states stack.
565 | marks []yaml_mark_t // The stack of marks.
566 | tag_directives []yaml_tag_directive_t // The list of TAG directives.
567 |
568 | // Dumper stuff
569 |
570 | aliases []yaml_alias_data_t // The alias data.
571 |
572 | document *yaml_document_t // The currently parsed document.
573 | }
574 |
575 | // Emitter Definitions
576 |
577 | // The prototype of a write handler.
578 | //
579 | // The write handler is called when the emitter needs to flush the accumulated
580 | // characters to the output. The handler should write @a size bytes of the
581 | // @a buffer to the output.
582 | //
583 | // @param[in,out] data A pointer to an application data specified by
584 | // yaml_emitter_set_output().
585 | // @param[in] buffer The buffer with bytes to be written.
586 | // @param[in] size The size of the buffer.
587 | //
588 | // @returns On success, the handler should return @c 1. If the handler failed,
589 | // the returned value should be @c 0.
590 | //
591 | type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
592 |
593 | type yaml_emitter_state_t int
594 |
595 | // The emitter states.
596 | const (
597 | // Expect STREAM-START.
598 | yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
599 |
600 | yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
601 | yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
602 | yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
603 | yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
604 | yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
605 | yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
606 | yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
607 | yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
608 | yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
609 | yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
610 | yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
611 | yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
612 | yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
613 | yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
614 | yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
615 | yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
616 | yaml_EMIT_END_STATE // Expect nothing.
617 | )
618 |
619 | // The emitter structure.
620 | //
621 | // All members are internal. Manage the structure using the @c yaml_emitter_
622 | // family of functions.
623 | type yaml_emitter_t struct {
624 |
625 | // Error handling
626 |
627 | error yaml_error_type_t // Error type.
628 | problem string // Error description.
629 |
630 | // Writer stuff
631 |
632 | write_handler yaml_write_handler_t // Write handler.
633 |
634 | output_buffer *[]byte // String output data.
635 | output_file io.Writer // File output data.
636 |
637 | buffer []byte // The working buffer.
638 | buffer_pos int // The current position of the buffer.
639 |
640 | raw_buffer []byte // The raw buffer.
641 | raw_buffer_pos int // The current position of the buffer.
642 |
643 | encoding yaml_encoding_t // The stream encoding.
644 |
645 | // Emitter stuff
646 |
647 | canonical bool // If the output is in the canonical style?
648 | best_indent int // The number of indentation spaces.
649 | best_width int // The preferred width of the output lines.
650 | unicode bool // Allow unescaped non-ASCII characters?
651 | line_break yaml_break_t // The preferred line break.
652 |
653 | state yaml_emitter_state_t // The current emitter state.
654 | states []yaml_emitter_state_t // The stack of states.
655 |
656 | events []yaml_event_t // The event queue.
657 | events_head int // The head of the event queue.
658 |
659 | indents []int // The stack of indentation levels.
660 |
661 | tag_directives []yaml_tag_directive_t // The list of tag directives.
662 |
663 | indent int // The current indentation level.
664 |
665 | flow_level int // The current flow level.
666 |
667 | root_context bool // Is it the document root context?
668 | sequence_context bool // Is it a sequence context?
669 | mapping_context bool // Is it a mapping context?
670 | simple_key_context bool // Is it a simple mapping key context?
671 |
672 | line int // The current line.
673 | column int // The current column.
674 | whitespace bool // If the last character was a whitespace?
675 | indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
676 | open_ended bool // If an explicit document end is required?
677 |
678 | // Anchor analysis.
679 | anchor_data struct {
680 | anchor []byte // The anchor value.
681 | alias bool // Is it an alias?
682 | }
683 |
684 | // Tag analysis.
685 | tag_data struct {
686 | handle []byte // The tag handle.
687 | suffix []byte // The tag suffix.
688 | }
689 |
690 | // Scalar analysis.
691 | scalar_data struct {
692 | value []byte // The scalar value.
693 | multiline bool // Does the scalar contain line breaks?
694 | flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
695 | block_plain_allowed bool // Can the scalar be expressed in the block plain style?
696 | single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
697 | block_allowed bool // Can the scalar be expressed in the literal or folded styles?
698 | style yaml_scalar_style_t // The output style.
699 | }
700 |
701 | // Dumper stuff
702 |
703 | opened bool // If the stream was already opened?
704 | closed bool // If the stream was already closed?
705 |
706 | // The information associated with the document nodes.
707 | anchors *struct {
708 | references int // The number of references.
709 | anchor int // The anchor id.
710 | serialized bool // If the node has been emitted?
711 | }
712 |
713 | last_anchor_id int // The last assigned anchor id.
714 |
715 | document *yaml_document_t // The currently emitted document.
716 | }
717 |
--------------------------------------------------------------------------------