├── README.md ├── config.yml ├── config └── config.go ├── event └── event.go ├── main.go ├── seccomp └── profile.go └── vendor ├── github.com └── deckarep │ └── golang-set │ ├── LICENSE │ ├── README.md │ ├── set.go │ ├── threadsafe.go │ └── threadunsafe.go ├── gopkg.in └── yaml.v2 │ ├── LICENSE │ ├── LICENSE.libyaml │ ├── README.md │ ├── apic.go │ ├── decode.go │ ├── emitterc.go │ ├── encode.go │ ├── parserc.go │ ├── readerc.go │ ├── resolve.go │ ├── scannerc.go │ ├── sorter.go │ ├── writerc.go │ ├── yaml.go │ ├── yamlh.go │ └── yamlprivateh.go └── vendor.json /README.md: -------------------------------------------------------------------------------- 1 | # falco2seccomp 2 | 3 | This tool is designed to convert [Falco](http://www.sysdig.org/falco/) JSON logs to [Docker seccomp profiles](https://github.com/docker/docker/blob/master/docs/security/seccomp.md) 4 | 5 | The Falco rule which this tool is designed to work with looks like: 6 | 7 | ```yaml 8 | - rule: container_syscall 9 | desc: Capture syscalls for any docker container 10 | priority: WARNING 11 | condition: container.id != host and syscall.type exists 12 | output: "%container.id:%syscall.type" 13 | ``` 14 | 15 | This tool was first introduced in [Using-Falco-to-secure-Docker-containers](https://http206.com/2016/07/01/Using-Falco-to-secure-Docker-containers/) 16 | -------------------------------------------------------------------------------- /config.yml: -------------------------------------------------------------------------------- 1 | defaultAction: SCMP_ACT_ERRNO 2 | architectures: 3 | - SCMP_ARCH_X86_64 4 | - SCMP_ARCH_X86 5 | # System calls from the default docker ruleset, they should be blocked anyway 6 | defaultDeny: 7 | - acct 8 | - add_key 9 | - adjtimex 10 | - bpf 11 | - clock_adjtime 12 | - clock_settime 13 | - clone 14 | - create_module 15 | - delete_module 16 | - finit_module 17 | - get_kernel_syms 18 | - get_mempolicy 19 | - init_module 20 | - ioperm 21 | - iopl 22 | - kcmp 23 | - kexec_file_load 24 | - kexec_load 25 | - keyctl 26 | - lookup_dcookie 27 | - mbind 28 | - mount 29 | - move_pages 30 | - name_to_handle_at 31 | - nfsservctl 32 | - open_by_handle_at 33 | - perf_event_open 34 | - personality 35 | - pivot_root 36 | - process_vm_readv 37 | - process_vm_writev 38 | - ptrace 39 | - query_module 40 | - quotactl 41 | - reboot 42 | - request_key 43 | - set_mempolicy 44 | - setns 45 | - settimeofday 46 | - stime 47 | - swapon 48 | - swapoff 49 | - sysfs 50 | - _sysctl 51 | - umount 52 | - umount2 53 | - unshare 54 | - uselib 55 | - userfaultfd 56 | - ustat 57 | - vm86 58 | - vm86 59 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "io/ioutil" 5 | "log" 6 | 7 | "github.com/deckarep/golang-set" 8 | 9 | "gopkg.in/yaml.v2" 10 | ) 11 | 12 | type Config struct { 13 | DefaultAction string `yaml:"defaultAction"` 14 | Architectures []string `yaml:"architectures"` 15 | DefaultDeny []string `yaml:"defaultDeny"` 16 | DenySet mapset.Set 17 | ContainerID string 18 | RuleName string 19 | } 20 | 21 | func LoadConfig(path, containerID, ruleName *string) *Config { 22 | 23 | c := Config{ 24 | ContainerID: *containerID, 25 | RuleName: *ruleName, 26 | DenySet: mapset.NewSet(), 27 | } 28 | data, err := ioutil.ReadFile(*path) 29 | if err != nil { 30 | log.Fatal(err) 31 | } 32 | 33 | err = yaml.Unmarshal(data, &c) 34 | if err != nil { 35 | log.Fatal(err) 36 | } 37 | for _, syscall := range c.DefaultDeny { 38 | c.DenySet.Add(syscall) 39 | } 40 | return &c 41 | } 42 | -------------------------------------------------------------------------------- /event/event.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "log" 7 | "os" 8 | "regexp" 9 | 10 | "github.com/nevins-b/falco2seccomp/config" 11 | "github.com/nevins-b/falco2seccomp/seccomp" 12 | ) 13 | 14 | type EventParser struct { 15 | re *regexp.Regexp 16 | config *config.Config 17 | } 18 | 19 | type Event struct { 20 | Output string 21 | Priority string 22 | Rule string 23 | ContainerID string 24 | Syscall string 25 | } 26 | 27 | func NewEventParser(config *config.Config) *EventParser { 28 | return &EventParser{ 29 | // Regex is ugly, I'd love a better solution for this. 30 | re: regexp.MustCompile(".*?((?:[a-z0-9]*)):((?:[a-z\\_]+))"), 31 | config: config, 32 | } 33 | } 34 | 35 | func (parser *EventParser) newEvent(data []byte) *Event { 36 | e := &Event{} 37 | err := json.Unmarshal(data, &e) 38 | if err != nil { 39 | return nil 40 | } 41 | if e.Rule != parser.config.RuleName { 42 | return nil 43 | } 44 | matches := parser.re.FindStringSubmatch(e.Output) 45 | if len(matches) == 0 { 46 | return nil 47 | } 48 | e.ContainerID = matches[1] 49 | e.Syscall = matches[2] 50 | 51 | if parser.config.DenySet.Contains(e.Syscall) { 52 | return nil 53 | } 54 | if e.ContainerID != parser.config.ContainerID { 55 | return nil 56 | } 57 | return e 58 | } 59 | 60 | func (parse *EventParser) ParseLog(eventLog *string) []byte { 61 | p := seccomp.NewProfile(parse.config) 62 | 63 | file, err := os.Open(*eventLog) 64 | if err != nil { 65 | log.Fatal(err) 66 | } 67 | defer file.Close() 68 | 69 | scanner := bufio.NewScanner(file) 70 | for scanner.Scan() { 71 | e := parse.newEvent(scanner.Bytes()) 72 | if e != nil { 73 | p.AllowSyscall(e.Syscall) 74 | } 75 | } 76 | profile, err := p.JSON() 77 | if err != nil { 78 | return nil 79 | } 80 | return profile 81 | } 82 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io/ioutil" 7 | 8 | "github.com/nevins-b/falco2seccomp/config" 9 | "github.com/nevins-b/falco2seccomp/event" 10 | ) 11 | 12 | func main() { 13 | 14 | eventLog := flag.String("log", "", "Falco event log file path, event must be in json format") 15 | configPath := flag.String("config", "config.yml", "Path to configuration") 16 | containerID := flag.String("container-id", "", "ID of container") 17 | ruleName := flag.String("rule-name", "container_syscall", "The name of the Falco rule") 18 | outFile := flag.String("out", "", "File to write profile to, stdout if not specified") 19 | 20 | flag.Parse() 21 | 22 | c := config.LoadConfig(configPath, containerID, ruleName) 23 | eP := event.NewEventParser(c) 24 | js := eP.ParseLog(eventLog) 25 | if *outFile != "" { 26 | ioutil.WriteFile(*outFile, js, 0600) 27 | } else { 28 | fmt.Println(string(js)) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /seccomp/profile.go: -------------------------------------------------------------------------------- 1 | package seccomp 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | 7 | "github.com/deckarep/golang-set" 8 | "github.com/nevins-b/falco2seccomp/config" 9 | ) 10 | 11 | type Profile struct { 12 | DefaultAction string `json:"defaultAction"` 13 | Architectures []string `json:"architectures"` 14 | Syscalls []Syscall `json:"syscalls"` 15 | syscallSet mapset.Set 16 | } 17 | 18 | type Syscall struct { 19 | Name string `json:"name"` 20 | Action string `json:"action"` 21 | Args []string `json:"args"` 22 | } 23 | 24 | func NewProfile(config *config.Config) *Profile { 25 | p := &Profile{ 26 | DefaultAction: config.DefaultAction, 27 | Architectures: config.Architectures, 28 | syscallSet: mapset.NewSet(), 29 | } 30 | 31 | return p 32 | } 33 | 34 | func (p *Profile) AllowSyscall(syscall string) { 35 | if p.syscallSet.Add(syscall) { 36 | s := Syscall{ 37 | Name: syscall, 38 | Action: "SCMP_ACT_ALLOW", 39 | Args: []string{}, 40 | } 41 | 42 | p.Syscalls = append(p.Syscalls, s) 43 | } 44 | } 45 | 46 | func (p *Profile) JSON() ([]byte, error) { 47 | if p.syscallSet.Cardinality() == 0 { 48 | p.Syscalls = []Syscall{} 49 | } 50 | log.Printf("Creating profile with %d syscalls allowed", p.syscallSet.Cardinality()) 51 | b, err := json.MarshalIndent(p, "", " ") 52 | if err != nil { 53 | return nil, err 54 | } 55 | return b, nil 56 | } 57 | -------------------------------------------------------------------------------- /vendor/github.com/deckarep/golang-set/LICENSE: -------------------------------------------------------------------------------- 1 | Open Source Initiative OSI - The MIT License (MIT):Licensing 2 | 3 | The MIT License (MIT) 4 | Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy of 7 | this software and associated documentation files (the "Software"), to deal in 8 | the Software without restriction, including without limitation the rights to 9 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 10 | of the Software, and to permit persons to whom the Software is furnished to do 11 | so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. -------------------------------------------------------------------------------- /vendor/github.com/deckarep/golang-set/README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/deckarep/golang-set.png?branch=master)](https://travis-ci.org/deckarep/golang-set) 2 | [![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.png)](http://godoc.org/github.com/deckarep/golang-set) 3 | 4 | ## golang-set 5 | 6 | 7 | The missing set collection for the Go language. Until Go has sets built-in...use this. 8 | 9 | Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python. 10 | You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository 11 | and carry-on and to the rest that find this useful please contribute in helping me make it better by: 12 | 13 | * Helping to make more idiomatic improvements to the code. 14 | * Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~ 15 | * Helping to make the unit-tests more robust and kick-ass. 16 | * Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set) 17 | * Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.) 18 | 19 | I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang) 20 | 21 | *Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types. 22 | 23 | ## Features (as of 9/22/2014) 24 | 25 | * a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product) 26 | 27 | ## Features (as of 9/15/2014) 28 | 29 | * a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set) 30 | 31 | ## Features (as of 4/22/2014) 32 | 33 | * One common interface to both implementations 34 | * Two set implementations to choose from 35 | * a thread-safe implementation designed for concurrent use 36 | * a non-thread-safe implementation designed for performance 37 | * 75 benchmarks for both implementations 38 | * 35 unit tests for both implementations 39 | * 14 concurrent tests for the thread-safe implementation 40 | 41 | 42 | 43 | Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind 44 | however that the Python set is a built-in type and supports additional features and syntax that make it awesome. 45 | 46 | ## Examples but not exhaustive: 47 | 48 | ```go 49 | requiredClasses := mapset.NewSet() 50 | requiredClasses.Add("Cooking") 51 | requiredClasses.Add("English") 52 | requiredClasses.Add("Math") 53 | requiredClasses.Add("Biology") 54 | 55 | scienceSlice := []interface{}{"Biology", "Chemistry"} 56 | scienceClasses := mapset.NewSetFromSlice(scienceSlice) 57 | 58 | electiveClasses := mapset.NewSet() 59 | electiveClasses.Add("Welding") 60 | electiveClasses.Add("Music") 61 | electiveClasses.Add("Automotive") 62 | 63 | bonusClasses := mapset.NewSet() 64 | bonusClasses.Add("Go Programming") 65 | bonusClasses.Add("Python Programming") 66 | 67 | //Show me all the available classes I can take 68 | allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) 69 | fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming} 70 | 71 | 72 | //Is cooking considered a science class? 73 | fmt.Println(scienceClasses.Contains("Cooking")) //false 74 | 75 | //Show me all classes that are not science classes, since I hate science. 76 | fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding} 77 | 78 | //Which science classes are also required classes? 79 | fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} 80 | 81 | //How many bonus classes do you offer? 82 | fmt.Println(bonusClasses.Cardinality()) //2 83 | 84 | //Do you have the following classes? Welding, Automotive and English? 85 | fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true 86 | ``` 87 | 88 | Thanks! 89 | 90 | -Ralph 91 | 92 | [![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge") 93 | 94 | [![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon) 95 | -------------------------------------------------------------------------------- /vendor/github.com/deckarep/golang-set/set.go: -------------------------------------------------------------------------------- 1 | /* 2 | Open Source Initiative OSI - The MIT License (MIT):Licensing 3 | 4 | The MIT License (MIT) 5 | Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy of 8 | this software and associated documentation files (the "Software"), to deal in 9 | the Software without restriction, including without limitation the rights to 10 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 11 | of the Software, and to permit persons to whom the Software is furnished to do 12 | so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in all 15 | copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | SOFTWARE. 24 | */ 25 | 26 | // Package mapset implements a simple and generic set collection. 27 | // Items stored within it are unordered and unique. It supports 28 | // typical set operations: membership testing, intersection, union, 29 | // difference, symmetric difference and cloning. 30 | // 31 | // Package mapset provides two implementations. The default 32 | // implementation is safe for concurrent access. There is a non-threadsafe 33 | // implementation which is slightly more performant. 34 | package mapset 35 | 36 | type Set interface { 37 | // Adds an element to the set. Returns whether 38 | // the item was added. 39 | Add(i interface{}) bool 40 | 41 | // Returns the number of elements in the set. 42 | Cardinality() int 43 | 44 | // Removes all elements from the set, leaving 45 | // the emtpy set. 46 | Clear() 47 | 48 | // Returns a clone of the set using the same 49 | // implementation, duplicating all keys. 50 | Clone() Set 51 | 52 | // Returns whether the given items 53 | // are all in the set. 54 | Contains(i ...interface{}) bool 55 | 56 | // Returns the difference between this set 57 | // and other. The returned set will contain 58 | // all elements of this set that are not also 59 | // elements of other. 60 | // 61 | // Note that the argument to Difference 62 | // must be of the same type as the receiver 63 | // of the method. Otherwise, Difference will 64 | // panic. 65 | Difference(other Set) Set 66 | 67 | // Determines if two sets are equal to each 68 | // other. If they have the same cardinality 69 | // and contain the same elements, they are 70 | // considered equal. The order in which 71 | // the elements were added is irrelevant. 72 | // 73 | // Note that the argument to Equal must be 74 | // of the same type as the receiver of the 75 | // method. Otherwise, Equal will panic. 76 | Equal(other Set) bool 77 | 78 | // Returns a new set containing only the elements 79 | // that exist only in both sets. 80 | // 81 | // Note that the argument to Intersect 82 | // must be of the same type as the receiver 83 | // of the method. Otherwise, Intersect will 84 | // panic. 85 | Intersect(other Set) Set 86 | 87 | // Determines if every element in the other set 88 | // is in this set. 89 | // 90 | // Note that the argument to IsSubset 91 | // must be of the same type as the receiver 92 | // of the method. Otherwise, IsSubset will 93 | // panic. 94 | IsSubset(other Set) bool 95 | 96 | // Determines if every element in this set is in 97 | // the other set. 98 | // 99 | // Note that the argument to IsSuperset 100 | // must be of the same type as the receiver 101 | // of the method. Otherwise, IsSuperset will 102 | // panic. 103 | IsSuperset(other Set) bool 104 | 105 | // Returns a channel of elements that you can 106 | // range over. 107 | Iter() <-chan interface{} 108 | 109 | // Remove a single element from the set. 110 | Remove(i interface{}) 111 | 112 | // Provides a convenient string representation 113 | // of the current state of the set. 114 | String() string 115 | 116 | // Returns a new set with all elements which are 117 | // in either this set or the other set but not in both. 118 | // 119 | // Note that the argument to SymmetricDifference 120 | // must be of the same type as the receiver 121 | // of the method. Otherwise, SymmetricDifference 122 | // will panic. 123 | SymmetricDifference(other Set) Set 124 | 125 | // Returns a new set with all elements in both sets. 126 | // 127 | // Note that the argument to Union must be of the 128 | 129 | // same type as the receiver of the method. 130 | // Otherwise, IsSuperset will panic. 131 | Union(other Set) Set 132 | 133 | // Returns all subsets of a given set (Power Set). 134 | PowerSet() Set 135 | 136 | // Returns the Cartesian Product of two sets. 137 | CartesianProduct(other Set) Set 138 | 139 | // Returns the members of the set as a slice. 140 | ToSlice() []interface{} 141 | } 142 | 143 | // Creates and returns a reference to an empty set. 144 | func NewSet(s ...interface{}) Set { 145 | set := newThreadSafeSet() 146 | for _, item := range s { 147 | set.Add(item) 148 | } 149 | return &set 150 | } 151 | 152 | // Creates and returns a new set with the given elements 153 | func NewSetWith(elts ...interface{}) Set { 154 | return NewSetFromSlice(elts) 155 | } 156 | 157 | // Creates and returns a reference to a set from an existing slice 158 | func NewSetFromSlice(s []interface{}) Set { 159 | a := NewSet(s...) 160 | return a 161 | } 162 | 163 | func NewThreadUnsafeSet() Set { 164 | set := newThreadUnsafeSet() 165 | return &set 166 | } 167 | 168 | func NewThreadUnsafeSetFromSlice(s []interface{}) Set { 169 | a := NewThreadUnsafeSet() 170 | for _, item := range s { 171 | a.Add(item) 172 | } 173 | return a 174 | } 175 | -------------------------------------------------------------------------------- /vendor/github.com/deckarep/golang-set/threadsafe.go: -------------------------------------------------------------------------------- 1 | /* 2 | Open Source Initiative OSI - The MIT License (MIT):Licensing 3 | 4 | The MIT License (MIT) 5 | Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy of 8 | this software and associated documentation files (the "Software"), to deal in 9 | the Software without restriction, including without limitation the rights to 10 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 11 | of the Software, and to permit persons to whom the Software is furnished to do 12 | so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in all 15 | copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | SOFTWARE. 24 | */ 25 | 26 | package mapset 27 | 28 | import "sync" 29 | 30 | type threadSafeSet struct { 31 | s threadUnsafeSet 32 | sync.RWMutex 33 | } 34 | 35 | func newThreadSafeSet() threadSafeSet { 36 | return threadSafeSet{s: newThreadUnsafeSet()} 37 | } 38 | 39 | func (set *threadSafeSet) Add(i interface{}) bool { 40 | set.Lock() 41 | ret := set.s.Add(i) 42 | set.Unlock() 43 | return ret 44 | } 45 | 46 | func (set *threadSafeSet) Contains(i ...interface{}) bool { 47 | set.RLock() 48 | ret := set.s.Contains(i...) 49 | set.RUnlock() 50 | return ret 51 | } 52 | 53 | func (set *threadSafeSet) IsSubset(other Set) bool { 54 | o := other.(*threadSafeSet) 55 | 56 | set.RLock() 57 | o.RLock() 58 | 59 | ret := set.s.IsSubset(&o.s) 60 | set.RUnlock() 61 | o.RUnlock() 62 | return ret 63 | } 64 | 65 | func (set *threadSafeSet) IsSuperset(other Set) bool { 66 | return other.IsSubset(set) 67 | } 68 | 69 | func (set *threadSafeSet) Union(other Set) Set { 70 | o := other.(*threadSafeSet) 71 | 72 | set.RLock() 73 | o.RLock() 74 | 75 | unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) 76 | ret := &threadSafeSet{s: *unsafeUnion} 77 | set.RUnlock() 78 | o.RUnlock() 79 | return ret 80 | } 81 | 82 | func (set *threadSafeSet) Intersect(other Set) Set { 83 | o := other.(*threadSafeSet) 84 | 85 | set.RLock() 86 | o.RLock() 87 | 88 | unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) 89 | ret := &threadSafeSet{s: *unsafeIntersection} 90 | set.RUnlock() 91 | o.RUnlock() 92 | return ret 93 | } 94 | 95 | func (set *threadSafeSet) Difference(other Set) Set { 96 | o := other.(*threadSafeSet) 97 | 98 | set.RLock() 99 | o.RLock() 100 | 101 | unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) 102 | ret := &threadSafeSet{s: *unsafeDifference} 103 | set.RUnlock() 104 | o.RUnlock() 105 | return ret 106 | } 107 | 108 | func (set *threadSafeSet) SymmetricDifference(other Set) Set { 109 | o := other.(*threadSafeSet) 110 | 111 | unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) 112 | return &threadSafeSet{s: *unsafeDifference} 113 | } 114 | 115 | func (set *threadSafeSet) Clear() { 116 | set.Lock() 117 | set.s = newThreadUnsafeSet() 118 | set.Unlock() 119 | } 120 | 121 | func (set *threadSafeSet) Remove(i interface{}) { 122 | set.Lock() 123 | delete(set.s, i) 124 | set.Unlock() 125 | } 126 | 127 | func (set *threadSafeSet) Cardinality() int { 128 | set.RLock() 129 | defer set.RUnlock() 130 | return len(set.s) 131 | } 132 | 133 | func (set *threadSafeSet) Iter() <-chan interface{} { 134 | ch := make(chan interface{}) 135 | go func() { 136 | set.RLock() 137 | 138 | for elem := range set.s { 139 | ch <- elem 140 | } 141 | close(ch) 142 | set.RUnlock() 143 | }() 144 | 145 | return ch 146 | } 147 | 148 | func (set *threadSafeSet) Equal(other Set) bool { 149 | o := other.(*threadSafeSet) 150 | 151 | set.RLock() 152 | o.RLock() 153 | 154 | ret := set.s.Equal(&o.s) 155 | set.RUnlock() 156 | o.RUnlock() 157 | return ret 158 | } 159 | 160 | func (set *threadSafeSet) Clone() Set { 161 | set.RLock() 162 | 163 | unsafeClone := set.s.Clone().(*threadUnsafeSet) 164 | ret := &threadSafeSet{s: *unsafeClone} 165 | set.RUnlock() 166 | return ret 167 | } 168 | 169 | func (set *threadSafeSet) String() string { 170 | set.RLock() 171 | ret := set.s.String() 172 | set.RUnlock() 173 | return ret 174 | } 175 | 176 | func (set *threadSafeSet) PowerSet() Set { 177 | set.RLock() 178 | ret := set.s.PowerSet() 179 | set.RUnlock() 180 | return ret 181 | } 182 | 183 | func (set *threadSafeSet) CartesianProduct(other Set) Set { 184 | o := other.(*threadSafeSet) 185 | 186 | set.RLock() 187 | o.RLock() 188 | 189 | unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) 190 | ret := &threadSafeSet{s: *unsafeCartProduct} 191 | set.RUnlock() 192 | o.RUnlock() 193 | return ret 194 | } 195 | 196 | func (set *threadSafeSet) ToSlice() []interface{} { 197 | set.RLock() 198 | keys := make([]interface{}, 0, set.Cardinality()) 199 | for elem := range set.s { 200 | keys = append(keys, elem) 201 | } 202 | set.RUnlock() 203 | return keys 204 | } 205 | -------------------------------------------------------------------------------- /vendor/github.com/deckarep/golang-set/threadunsafe.go: -------------------------------------------------------------------------------- 1 | /* 2 | Open Source Initiative OSI - The MIT License (MIT):Licensing 3 | 4 | The MIT License (MIT) 5 | Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy of 8 | this software and associated documentation files (the "Software"), to deal in 9 | the Software without restriction, including without limitation the rights to 10 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 11 | of the Software, and to permit persons to whom the Software is furnished to do 12 | so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in all 15 | copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | SOFTWARE. 24 | */ 25 | 26 | package mapset 27 | 28 | import ( 29 | "fmt" 30 | "reflect" 31 | "strings" 32 | ) 33 | 34 | type threadUnsafeSet map[interface{}]struct{} 35 | 36 | type orderedPair struct { 37 | first interface{} 38 | second interface{} 39 | } 40 | 41 | func newThreadUnsafeSet() threadUnsafeSet { 42 | return make(threadUnsafeSet) 43 | } 44 | 45 | func (pair *orderedPair) Equal(other orderedPair) bool { 46 | if pair.first == other.first && 47 | pair.second == other.second { 48 | return true 49 | } 50 | 51 | return false 52 | } 53 | 54 | func (set *threadUnsafeSet) Add(i interface{}) bool { 55 | _, found := (*set)[i] 56 | (*set)[i] = struct{}{} 57 | return !found //False if it existed already 58 | } 59 | 60 | func (set *threadUnsafeSet) Contains(i ...interface{}) bool { 61 | for _, val := range i { 62 | if _, ok := (*set)[val]; !ok { 63 | return false 64 | } 65 | } 66 | return true 67 | } 68 | 69 | func (set *threadUnsafeSet) IsSubset(other Set) bool { 70 | _ = other.(*threadUnsafeSet) 71 | for elem := range *set { 72 | if !other.Contains(elem) { 73 | return false 74 | } 75 | } 76 | return true 77 | } 78 | 79 | func (set *threadUnsafeSet) IsSuperset(other Set) bool { 80 | return other.IsSubset(set) 81 | } 82 | 83 | func (set *threadUnsafeSet) Union(other Set) Set { 84 | o := other.(*threadUnsafeSet) 85 | 86 | unionedSet := newThreadUnsafeSet() 87 | 88 | for elem := range *set { 89 | unionedSet.Add(elem) 90 | } 91 | for elem := range *o { 92 | unionedSet.Add(elem) 93 | } 94 | return &unionedSet 95 | } 96 | 97 | func (set *threadUnsafeSet) Intersect(other Set) Set { 98 | o := other.(*threadUnsafeSet) 99 | 100 | intersection := newThreadUnsafeSet() 101 | // loop over smaller set 102 | if set.Cardinality() < other.Cardinality() { 103 | for elem := range *set { 104 | if other.Contains(elem) { 105 | intersection.Add(elem) 106 | } 107 | } 108 | } else { 109 | for elem := range *o { 110 | if set.Contains(elem) { 111 | intersection.Add(elem) 112 | } 113 | } 114 | } 115 | return &intersection 116 | } 117 | 118 | func (set *threadUnsafeSet) Difference(other Set) Set { 119 | _ = other.(*threadUnsafeSet) 120 | 121 | difference := newThreadUnsafeSet() 122 | for elem := range *set { 123 | if !other.Contains(elem) { 124 | difference.Add(elem) 125 | } 126 | } 127 | return &difference 128 | } 129 | 130 | func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { 131 | _ = other.(*threadUnsafeSet) 132 | 133 | aDiff := set.Difference(other) 134 | bDiff := other.Difference(set) 135 | return aDiff.Union(bDiff) 136 | } 137 | 138 | func (set *threadUnsafeSet) Clear() { 139 | *set = newThreadUnsafeSet() 140 | } 141 | 142 | func (set *threadUnsafeSet) Remove(i interface{}) { 143 | delete(*set, i) 144 | } 145 | 146 | func (set *threadUnsafeSet) Cardinality() int { 147 | return len(*set) 148 | } 149 | 150 | func (set *threadUnsafeSet) Iter() <-chan interface{} { 151 | ch := make(chan interface{}) 152 | go func() { 153 | for elem := range *set { 154 | ch <- elem 155 | } 156 | close(ch) 157 | }() 158 | 159 | return ch 160 | } 161 | 162 | func (set *threadUnsafeSet) Equal(other Set) bool { 163 | _ = other.(*threadUnsafeSet) 164 | 165 | if set.Cardinality() != other.Cardinality() { 166 | return false 167 | } 168 | for elem := range *set { 169 | if !other.Contains(elem) { 170 | return false 171 | } 172 | } 173 | return true 174 | } 175 | 176 | func (set *threadUnsafeSet) Clone() Set { 177 | clonedSet := newThreadUnsafeSet() 178 | for elem := range *set { 179 | clonedSet.Add(elem) 180 | } 181 | return &clonedSet 182 | } 183 | 184 | func (set *threadUnsafeSet) String() string { 185 | items := make([]string, 0, len(*set)) 186 | 187 | for elem := range *set { 188 | items = append(items, fmt.Sprintf("%v", elem)) 189 | } 190 | return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) 191 | } 192 | 193 | func (pair orderedPair) String() string { 194 | return fmt.Sprintf("(%v, %v)", pair.first, pair.second) 195 | } 196 | 197 | func (set *threadUnsafeSet) PowerSet() Set { 198 | powSet := NewThreadUnsafeSet() 199 | nullset := newThreadUnsafeSet() 200 | powSet.Add(&nullset) 201 | 202 | for es := range *set { 203 | u := newThreadUnsafeSet() 204 | j := powSet.Iter() 205 | for er := range j { 206 | p := newThreadUnsafeSet() 207 | if reflect.TypeOf(er).Name() == "" { 208 | k := er.(*threadUnsafeSet) 209 | for ek := range *(k) { 210 | p.Add(ek) 211 | } 212 | } else { 213 | p.Add(er) 214 | } 215 | p.Add(es) 216 | u.Add(&p) 217 | } 218 | 219 | powSet = powSet.Union(&u) 220 | } 221 | 222 | return powSet 223 | } 224 | 225 | func (set *threadUnsafeSet) CartesianProduct(other Set) Set { 226 | o := other.(*threadUnsafeSet) 227 | cartProduct := NewThreadUnsafeSet() 228 | 229 | for i := range *set { 230 | for j := range *o { 231 | elem := orderedPair{first: i, second: j} 232 | cartProduct.Add(elem) 233 | } 234 | } 235 | 236 | return cartProduct 237 | } 238 | 239 | func (set *threadUnsafeSet) ToSlice() []interface{} { 240 | keys := make([]interface{}, 0, set.Cardinality()) 241 | for elem := range *set { 242 | keys = append(keys, elem) 243 | } 244 | 245 | return keys 246 | } 247 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Copyright (c) 2011-2014 - Canonical Inc. 3 | 4 | This software is licensed under the LGPLv3, included below. 5 | 6 | As a special exception to the GNU Lesser General Public License version 3 7 | ("LGPL3"), the copyright holders of this Library give you permission to 8 | convey to a third party a Combined Work that links statically or dynamically 9 | to this Library without providing any Minimal Corresponding Source or 10 | Minimal Application Code as set out in 4d or providing the installation 11 | information set out in section 4e, provided that you comply with the other 12 | provisions of LGPL3 and provided that you meet, for the Application the 13 | terms and conditions of the license(s) which apply to the Application. 14 | 15 | Except as stated in this special exception, the provisions of LGPL3 will 16 | continue to comply in full to this Library. If you modify this Library, you 17 | may apply this exception to your version of this Library, but you are not 18 | obliged to do so. If you do not wish to do so, delete this exception 19 | statement from your version. This exception does not (and cannot) modify any 20 | license terms which apply to the Application, with which you must still 21 | comply. 22 | 23 | 24 | GNU LESSER GENERAL PUBLIC LICENSE 25 | Version 3, 29 June 2007 26 | 27 | Copyright (C) 2007 Free Software Foundation, Inc. 28 | Everyone is permitted to copy and distribute verbatim copies 29 | of this license document, but changing it is not allowed. 30 | 31 | 32 | This version of the GNU Lesser General Public License incorporates 33 | the terms and conditions of version 3 of the GNU General Public 34 | License, supplemented by the additional permissions listed below. 35 | 36 | 0. Additional Definitions. 37 | 38 | As used herein, "this License" refers to version 3 of the GNU Lesser 39 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 40 | General Public License. 41 | 42 | "The Library" refers to a covered work governed by this License, 43 | other than an Application or a Combined Work as defined below. 44 | 45 | An "Application" is any work that makes use of an interface provided 46 | by the Library, but which is not otherwise based on the Library. 47 | Defining a subclass of a class defined by the Library is deemed a mode 48 | of using an interface provided by the Library. 49 | 50 | A "Combined Work" is a work produced by combining or linking an 51 | Application with the Library. The particular version of the Library 52 | with which the Combined Work was made is also called the "Linked 53 | Version". 54 | 55 | The "Minimal Corresponding Source" for a Combined Work means the 56 | Corresponding Source for the Combined Work, excluding any source code 57 | for portions of the Combined Work that, considered in isolation, are 58 | based on the Application, and not on the Linked Version. 59 | 60 | The "Corresponding Application Code" for a Combined Work means the 61 | object code and/or source code for the Application, including any data 62 | and utility programs needed for reproducing the Combined Work from the 63 | Application, but excluding the System Libraries of the Combined Work. 64 | 65 | 1. Exception to Section 3 of the GNU GPL. 66 | 67 | You may convey a covered work under sections 3 and 4 of this License 68 | without being bound by section 3 of the GNU GPL. 69 | 70 | 2. Conveying Modified Versions. 71 | 72 | If you modify a copy of the Library, and, in your modifications, a 73 | facility refers to a function or data to be supplied by an Application 74 | that uses the facility (other than as an argument passed when the 75 | facility is invoked), then you may convey a copy of the modified 76 | version: 77 | 78 | a) under this License, provided that you make a good faith effort to 79 | ensure that, in the event an Application does not supply the 80 | function or data, the facility still operates, and performs 81 | whatever part of its purpose remains meaningful, or 82 | 83 | b) under the GNU GPL, with none of the additional permissions of 84 | this License applicable to that copy. 85 | 86 | 3. Object Code Incorporating Material from Library Header Files. 87 | 88 | The object code form of an Application may incorporate material from 89 | a header file that is part of the Library. You may convey such object 90 | code under terms of your choice, provided that, if the incorporated 91 | material is not limited to numerical parameters, data structure 92 | layouts and accessors, or small macros, inline functions and templates 93 | (ten or fewer lines in length), you do both of the following: 94 | 95 | a) Give prominent notice with each copy of the object code that the 96 | Library is used in it and that the Library and its use are 97 | covered by this License. 98 | 99 | b) Accompany the object code with a copy of the GNU GPL and this license 100 | document. 101 | 102 | 4. Combined Works. 103 | 104 | You may convey a Combined Work under terms of your choice that, 105 | taken together, effectively do not restrict modification of the 106 | portions of the Library contained in the Combined Work and reverse 107 | engineering for debugging such modifications, if you also do each of 108 | the following: 109 | 110 | a) Give prominent notice with each copy of the Combined Work that 111 | the Library is used in it and that the Library and its use are 112 | covered by this License. 113 | 114 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 115 | document. 116 | 117 | c) For a Combined Work that displays copyright notices during 118 | execution, include the copyright notice for the Library among 119 | these notices, as well as a reference directing the user to the 120 | copies of the GNU GPL and this license document. 121 | 122 | d) Do one of the following: 123 | 124 | 0) Convey the Minimal Corresponding Source under the terms of this 125 | License, and the Corresponding Application Code in a form 126 | suitable for, and under terms that permit, the user to 127 | recombine or relink the Application with a modified version of 128 | the Linked Version to produce a modified Combined Work, in the 129 | manner specified by section 6 of the GNU GPL for conveying 130 | Corresponding Source. 131 | 132 | 1) Use a suitable shared library mechanism for linking with the 133 | Library. A suitable mechanism is one that (a) uses at run time 134 | a copy of the Library already present on the user's computer 135 | system, and (b) will operate properly with a modified version 136 | of the Library that is interface-compatible with the Linked 137 | Version. 138 | 139 | e) Provide Installation Information, but only if you would otherwise 140 | be required to provide such information under section 6 of the 141 | GNU GPL, and only to the extent that such information is 142 | necessary to install and execute a modified version of the 143 | Combined Work produced by recombining or relinking the 144 | Application with a modified version of the Linked Version. (If 145 | you use option 4d0, the Installation Information must accompany 146 | the Minimal Corresponding Source and Corresponding Application 147 | Code. If you use option 4d1, you must provide the Installation 148 | Information in the manner specified by section 6 of the GNU GPL 149 | for conveying Corresponding Source.) 150 | 151 | 5. Combined Libraries. 152 | 153 | You may place library facilities that are a work based on the 154 | Library side by side in a single library together with other library 155 | facilities that are not Applications and are not covered by this 156 | License, and convey such a combined library under terms of your 157 | choice, if you do both of the following: 158 | 159 | a) Accompany the combined library with a copy of the same work based 160 | on the Library, uncombined with any other library facilities, 161 | conveyed under the terms of this License. 162 | 163 | b) Give prominent notice with the combined library that part of it 164 | is a work based on the Library, and explaining where to find the 165 | accompanying uncombined form of the same work. 166 | 167 | 6. Revised Versions of the GNU Lesser General Public License. 168 | 169 | The Free Software Foundation may publish revised and/or new versions 170 | of the GNU Lesser General Public License from time to time. Such new 171 | versions will be similar in spirit to the present version, but may 172 | differ in detail to address new problems or concerns. 173 | 174 | Each version is given a distinguishing version number. If the 175 | Library as you received it specifies that a certain numbered version 176 | of the GNU Lesser General Public License "or any later version" 177 | applies to it, you have the option of following the terms and 178 | conditions either of that published version or of any later version 179 | published by the Free Software Foundation. If the Library as you 180 | received it does not specify a version number of the GNU Lesser 181 | General Public License, you may choose any version of the GNU Lesser 182 | General Public License ever published by the Free Software Foundation. 183 | 184 | If the Library as you received it specifies that a proxy can decide 185 | whether future versions of the GNU Lesser General Public License shall 186 | apply, that proxy's public statement of acceptance of any version is 187 | permanent authorization for you to choose that version for the 188 | Library. 189 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/LICENSE.libyaml: -------------------------------------------------------------------------------- 1 | The following files were ported to Go from C files of libyaml, and thus 2 | are still covered by their original copyright and license: 3 | 4 | apic.go 5 | emitterc.go 6 | parserc.go 7 | readerc.go 8 | scannerc.go 9 | writerc.go 10 | yamlh.go 11 | yamlprivateh.go 12 | 13 | Copyright (c) 2006 Kirill Simonov 14 | 15 | Permission is hereby granted, free of charge, to any person obtaining a copy of 16 | this software and associated documentation files (the "Software"), to deal in 17 | the Software without restriction, including without limitation the rights to 18 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 19 | of the Software, and to permit persons to whom the Software is furnished to do 20 | so, subject to the following conditions: 21 | 22 | The above copyright notice and this permission notice shall be included in all 23 | copies or substantial portions of the Software. 24 | 25 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 30 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 | SOFTWARE. 32 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/README.md: -------------------------------------------------------------------------------- 1 | # YAML support for the Go language 2 | 3 | Introduction 4 | ------------ 5 | 6 | The yaml package enables Go programs to comfortably encode and decode YAML 7 | values. It was developed within [Canonical](https://www.canonical.com) as 8 | part of the [juju](https://juju.ubuntu.com) project, and is based on a 9 | pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) 10 | C library to parse and generate YAML data quickly and reliably. 11 | 12 | Compatibility 13 | ------------- 14 | 15 | The yaml package supports most of YAML 1.1 and 1.2, including support for 16 | anchors, tags, map merging, etc. Multi-document unmarshalling is not yet 17 | implemented, and base-60 floats from YAML 1.1 are purposefully not 18 | supported since they're a poor design and are gone in YAML 1.2. 19 | 20 | Installation and usage 21 | ---------------------- 22 | 23 | The import path for the package is *gopkg.in/yaml.v2*. 24 | 25 | To install it, run: 26 | 27 | go get gopkg.in/yaml.v2 28 | 29 | API documentation 30 | ----------------- 31 | 32 | If opened in a browser, the import path itself leads to the API documentation: 33 | 34 | * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) 35 | 36 | API stability 37 | ------------- 38 | 39 | The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). 40 | 41 | 42 | License 43 | ------- 44 | 45 | The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. 46 | 47 | 48 | Example 49 | ------- 50 | 51 | ```Go 52 | package main 53 | 54 | import ( 55 | "fmt" 56 | "log" 57 | 58 | "gopkg.in/yaml.v2" 59 | ) 60 | 61 | var data = ` 62 | a: Easy! 63 | b: 64 | c: 2 65 | d: [3, 4] 66 | ` 67 | 68 | type T struct { 69 | A string 70 | B struct { 71 | RenamedC int `yaml:"c"` 72 | D []int `yaml:",flow"` 73 | } 74 | } 75 | 76 | func main() { 77 | t := T{} 78 | 79 | err := yaml.Unmarshal([]byte(data), &t) 80 | if err != nil { 81 | log.Fatalf("error: %v", err) 82 | } 83 | fmt.Printf("--- t:\n%v\n\n", t) 84 | 85 | d, err := yaml.Marshal(&t) 86 | if err != nil { 87 | log.Fatalf("error: %v", err) 88 | } 89 | fmt.Printf("--- t dump:\n%s\n\n", string(d)) 90 | 91 | m := make(map[interface{}]interface{}) 92 | 93 | err = yaml.Unmarshal([]byte(data), &m) 94 | if err != nil { 95 | log.Fatalf("error: %v", err) 96 | } 97 | fmt.Printf("--- m:\n%v\n\n", m) 98 | 99 | d, err = yaml.Marshal(&m) 100 | if err != nil { 101 | log.Fatalf("error: %v", err) 102 | } 103 | fmt.Printf("--- m dump:\n%s\n\n", string(d)) 104 | } 105 | ``` 106 | 107 | This example will generate the following output: 108 | 109 | ``` 110 | --- t: 111 | {Easy! {2 [3 4]}} 112 | 113 | --- t dump: 114 | a: Easy! 115 | b: 116 | c: 2 117 | d: [3, 4] 118 | 119 | 120 | --- m: 121 | map[a:Easy! b:map[c:2 d:[3 4]]] 122 | 123 | --- m dump: 124 | a: Easy! 125 | b: 126 | c: 2 127 | d: 128 | - 3 129 | - 4 130 | ``` 131 | 132 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/apic.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | import ( 4 | "io" 5 | "os" 6 | ) 7 | 8 | func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { 9 | //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) 10 | 11 | // Check if we can move the queue at the beginning of the buffer. 12 | if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { 13 | if parser.tokens_head != len(parser.tokens) { 14 | copy(parser.tokens, parser.tokens[parser.tokens_head:]) 15 | } 16 | parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] 17 | parser.tokens_head = 0 18 | } 19 | parser.tokens = append(parser.tokens, *token) 20 | if pos < 0 { 21 | return 22 | } 23 | copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) 24 | parser.tokens[parser.tokens_head+pos] = *token 25 | } 26 | 27 | // Create a new parser object. 28 | func yaml_parser_initialize(parser *yaml_parser_t) bool { 29 | *parser = yaml_parser_t{ 30 | raw_buffer: make([]byte, 0, input_raw_buffer_size), 31 | buffer: make([]byte, 0, input_buffer_size), 32 | } 33 | return true 34 | } 35 | 36 | // Destroy a parser object. 37 | func yaml_parser_delete(parser *yaml_parser_t) { 38 | *parser = yaml_parser_t{} 39 | } 40 | 41 | // String read handler. 42 | func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { 43 | if parser.input_pos == len(parser.input) { 44 | return 0, io.EOF 45 | } 46 | n = copy(buffer, parser.input[parser.input_pos:]) 47 | parser.input_pos += n 48 | return n, nil 49 | } 50 | 51 | // File read handler. 52 | func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { 53 | return parser.input_file.Read(buffer) 54 | } 55 | 56 | // Set a string input. 57 | func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { 58 | if parser.read_handler != nil { 59 | panic("must set the input source only once") 60 | } 61 | parser.read_handler = yaml_string_read_handler 62 | parser.input = input 63 | parser.input_pos = 0 64 | } 65 | 66 | // Set a file input. 67 | func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { 68 | if parser.read_handler != nil { 69 | panic("must set the input source only once") 70 | } 71 | parser.read_handler = yaml_file_read_handler 72 | parser.input_file = file 73 | } 74 | 75 | // Set the source encoding. 76 | func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { 77 | if parser.encoding != yaml_ANY_ENCODING { 78 | panic("must set the encoding only once") 79 | } 80 | parser.encoding = encoding 81 | } 82 | 83 | // Create a new emitter object. 84 | func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { 85 | *emitter = yaml_emitter_t{ 86 | buffer: make([]byte, output_buffer_size), 87 | raw_buffer: make([]byte, 0, output_raw_buffer_size), 88 | states: make([]yaml_emitter_state_t, 0, initial_stack_size), 89 | events: make([]yaml_event_t, 0, initial_queue_size), 90 | } 91 | return true 92 | } 93 | 94 | // Destroy an emitter object. 95 | func yaml_emitter_delete(emitter *yaml_emitter_t) { 96 | *emitter = yaml_emitter_t{} 97 | } 98 | 99 | // String write handler. 100 | func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { 101 | *emitter.output_buffer = append(*emitter.output_buffer, buffer...) 102 | return nil 103 | } 104 | 105 | // File write handler. 106 | func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { 107 | _, err := emitter.output_file.Write(buffer) 108 | return err 109 | } 110 | 111 | // Set a string output. 112 | func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { 113 | if emitter.write_handler != nil { 114 | panic("must set the output target only once") 115 | } 116 | emitter.write_handler = yaml_string_write_handler 117 | emitter.output_buffer = output_buffer 118 | } 119 | 120 | // Set a file output. 121 | func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { 122 | if emitter.write_handler != nil { 123 | panic("must set the output target only once") 124 | } 125 | emitter.write_handler = yaml_file_write_handler 126 | emitter.output_file = file 127 | } 128 | 129 | // Set the output encoding. 130 | func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { 131 | if emitter.encoding != yaml_ANY_ENCODING { 132 | panic("must set the output encoding only once") 133 | } 134 | emitter.encoding = encoding 135 | } 136 | 137 | // Set the canonical output style. 138 | func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { 139 | emitter.canonical = canonical 140 | } 141 | 142 | //// Set the indentation increment. 143 | func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { 144 | if indent < 2 || indent > 9 { 145 | indent = 2 146 | } 147 | emitter.best_indent = indent 148 | } 149 | 150 | // Set the preferred line width. 151 | func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { 152 | if width < 0 { 153 | width = -1 154 | } 155 | emitter.best_width = width 156 | } 157 | 158 | // Set if unescaped non-ASCII characters are allowed. 159 | func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { 160 | emitter.unicode = unicode 161 | } 162 | 163 | // Set the preferred line break character. 164 | func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { 165 | emitter.line_break = line_break 166 | } 167 | 168 | ///* 169 | // * Destroy a token object. 170 | // */ 171 | // 172 | //YAML_DECLARE(void) 173 | //yaml_token_delete(yaml_token_t *token) 174 | //{ 175 | // assert(token); // Non-NULL token object expected. 176 | // 177 | // switch (token.type) 178 | // { 179 | // case YAML_TAG_DIRECTIVE_TOKEN: 180 | // yaml_free(token.data.tag_directive.handle); 181 | // yaml_free(token.data.tag_directive.prefix); 182 | // break; 183 | // 184 | // case YAML_ALIAS_TOKEN: 185 | // yaml_free(token.data.alias.value); 186 | // break; 187 | // 188 | // case YAML_ANCHOR_TOKEN: 189 | // yaml_free(token.data.anchor.value); 190 | // break; 191 | // 192 | // case YAML_TAG_TOKEN: 193 | // yaml_free(token.data.tag.handle); 194 | // yaml_free(token.data.tag.suffix); 195 | // break; 196 | // 197 | // case YAML_SCALAR_TOKEN: 198 | // yaml_free(token.data.scalar.value); 199 | // break; 200 | // 201 | // default: 202 | // break; 203 | // } 204 | // 205 | // memset(token, 0, sizeof(yaml_token_t)); 206 | //} 207 | // 208 | ///* 209 | // * Check if a string is a valid UTF-8 sequence. 210 | // * 211 | // * Check 'reader.c' for more details on UTF-8 encoding. 212 | // */ 213 | // 214 | //static int 215 | //yaml_check_utf8(yaml_char_t *start, size_t length) 216 | //{ 217 | // yaml_char_t *end = start+length; 218 | // yaml_char_t *pointer = start; 219 | // 220 | // while (pointer < end) { 221 | // unsigned char octet; 222 | // unsigned int width; 223 | // unsigned int value; 224 | // size_t k; 225 | // 226 | // octet = pointer[0]; 227 | // width = (octet & 0x80) == 0x00 ? 1 : 228 | // (octet & 0xE0) == 0xC0 ? 2 : 229 | // (octet & 0xF0) == 0xE0 ? 3 : 230 | // (octet & 0xF8) == 0xF0 ? 4 : 0; 231 | // value = (octet & 0x80) == 0x00 ? octet & 0x7F : 232 | // (octet & 0xE0) == 0xC0 ? octet & 0x1F : 233 | // (octet & 0xF0) == 0xE0 ? octet & 0x0F : 234 | // (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; 235 | // if (!width) return 0; 236 | // if (pointer+width > end) return 0; 237 | // for (k = 1; k < width; k ++) { 238 | // octet = pointer[k]; 239 | // if ((octet & 0xC0) != 0x80) return 0; 240 | // value = (value << 6) + (octet & 0x3F); 241 | // } 242 | // if (!((width == 1) || 243 | // (width == 2 && value >= 0x80) || 244 | // (width == 3 && value >= 0x800) || 245 | // (width == 4 && value >= 0x10000))) return 0; 246 | // 247 | // pointer += width; 248 | // } 249 | // 250 | // return 1; 251 | //} 252 | // 253 | 254 | // Create STREAM-START. 255 | func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { 256 | *event = yaml_event_t{ 257 | typ: yaml_STREAM_START_EVENT, 258 | encoding: encoding, 259 | } 260 | return true 261 | } 262 | 263 | // Create STREAM-END. 264 | func yaml_stream_end_event_initialize(event *yaml_event_t) bool { 265 | *event = yaml_event_t{ 266 | typ: yaml_STREAM_END_EVENT, 267 | } 268 | return true 269 | } 270 | 271 | // Create DOCUMENT-START. 272 | func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, 273 | tag_directives []yaml_tag_directive_t, implicit bool) bool { 274 | *event = yaml_event_t{ 275 | typ: yaml_DOCUMENT_START_EVENT, 276 | version_directive: version_directive, 277 | tag_directives: tag_directives, 278 | implicit: implicit, 279 | } 280 | return true 281 | } 282 | 283 | // Create DOCUMENT-END. 284 | func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { 285 | *event = yaml_event_t{ 286 | typ: yaml_DOCUMENT_END_EVENT, 287 | implicit: implicit, 288 | } 289 | return true 290 | } 291 | 292 | ///* 293 | // * Create ALIAS. 294 | // */ 295 | // 296 | //YAML_DECLARE(int) 297 | //yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) 298 | //{ 299 | // mark yaml_mark_t = { 0, 0, 0 } 300 | // anchor_copy *yaml_char_t = NULL 301 | // 302 | // assert(event) // Non-NULL event object is expected. 303 | // assert(anchor) // Non-NULL anchor is expected. 304 | // 305 | // if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 306 | // 307 | // anchor_copy = yaml_strdup(anchor) 308 | // if (!anchor_copy) 309 | // return 0 310 | // 311 | // ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) 312 | // 313 | // return 1 314 | //} 315 | 316 | // Create SCALAR. 317 | func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { 318 | *event = yaml_event_t{ 319 | typ: yaml_SCALAR_EVENT, 320 | anchor: anchor, 321 | tag: tag, 322 | value: value, 323 | implicit: plain_implicit, 324 | quoted_implicit: quoted_implicit, 325 | style: yaml_style_t(style), 326 | } 327 | return true 328 | } 329 | 330 | // Create SEQUENCE-START. 331 | func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { 332 | *event = yaml_event_t{ 333 | typ: yaml_SEQUENCE_START_EVENT, 334 | anchor: anchor, 335 | tag: tag, 336 | implicit: implicit, 337 | style: yaml_style_t(style), 338 | } 339 | return true 340 | } 341 | 342 | // Create SEQUENCE-END. 343 | func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { 344 | *event = yaml_event_t{ 345 | typ: yaml_SEQUENCE_END_EVENT, 346 | } 347 | return true 348 | } 349 | 350 | // Create MAPPING-START. 351 | func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { 352 | *event = yaml_event_t{ 353 | typ: yaml_MAPPING_START_EVENT, 354 | anchor: anchor, 355 | tag: tag, 356 | implicit: implicit, 357 | style: yaml_style_t(style), 358 | } 359 | return true 360 | } 361 | 362 | // Create MAPPING-END. 363 | func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { 364 | *event = yaml_event_t{ 365 | typ: yaml_MAPPING_END_EVENT, 366 | } 367 | return true 368 | } 369 | 370 | // Destroy an event object. 371 | func yaml_event_delete(event *yaml_event_t) { 372 | *event = yaml_event_t{} 373 | } 374 | 375 | ///* 376 | // * Create a document object. 377 | // */ 378 | // 379 | //YAML_DECLARE(int) 380 | //yaml_document_initialize(document *yaml_document_t, 381 | // version_directive *yaml_version_directive_t, 382 | // tag_directives_start *yaml_tag_directive_t, 383 | // tag_directives_end *yaml_tag_directive_t, 384 | // start_implicit int, end_implicit int) 385 | //{ 386 | // struct { 387 | // error yaml_error_type_t 388 | // } context 389 | // struct { 390 | // start *yaml_node_t 391 | // end *yaml_node_t 392 | // top *yaml_node_t 393 | // } nodes = { NULL, NULL, NULL } 394 | // version_directive_copy *yaml_version_directive_t = NULL 395 | // struct { 396 | // start *yaml_tag_directive_t 397 | // end *yaml_tag_directive_t 398 | // top *yaml_tag_directive_t 399 | // } tag_directives_copy = { NULL, NULL, NULL } 400 | // value yaml_tag_directive_t = { NULL, NULL } 401 | // mark yaml_mark_t = { 0, 0, 0 } 402 | // 403 | // assert(document) // Non-NULL document object is expected. 404 | // assert((tag_directives_start && tag_directives_end) || 405 | // (tag_directives_start == tag_directives_end)) 406 | // // Valid tag directives are expected. 407 | // 408 | // if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error 409 | // 410 | // if (version_directive) { 411 | // version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) 412 | // if (!version_directive_copy) goto error 413 | // version_directive_copy.major = version_directive.major 414 | // version_directive_copy.minor = version_directive.minor 415 | // } 416 | // 417 | // if (tag_directives_start != tag_directives_end) { 418 | // tag_directive *yaml_tag_directive_t 419 | // if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) 420 | // goto error 421 | // for (tag_directive = tag_directives_start 422 | // tag_directive != tag_directives_end; tag_directive ++) { 423 | // assert(tag_directive.handle) 424 | // assert(tag_directive.prefix) 425 | // if (!yaml_check_utf8(tag_directive.handle, 426 | // strlen((char *)tag_directive.handle))) 427 | // goto error 428 | // if (!yaml_check_utf8(tag_directive.prefix, 429 | // strlen((char *)tag_directive.prefix))) 430 | // goto error 431 | // value.handle = yaml_strdup(tag_directive.handle) 432 | // value.prefix = yaml_strdup(tag_directive.prefix) 433 | // if (!value.handle || !value.prefix) goto error 434 | // if (!PUSH(&context, tag_directives_copy, value)) 435 | // goto error 436 | // value.handle = NULL 437 | // value.prefix = NULL 438 | // } 439 | // } 440 | // 441 | // DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, 442 | // tag_directives_copy.start, tag_directives_copy.top, 443 | // start_implicit, end_implicit, mark, mark) 444 | // 445 | // return 1 446 | // 447 | //error: 448 | // STACK_DEL(&context, nodes) 449 | // yaml_free(version_directive_copy) 450 | // while (!STACK_EMPTY(&context, tag_directives_copy)) { 451 | // value yaml_tag_directive_t = POP(&context, tag_directives_copy) 452 | // yaml_free(value.handle) 453 | // yaml_free(value.prefix) 454 | // } 455 | // STACK_DEL(&context, tag_directives_copy) 456 | // yaml_free(value.handle) 457 | // yaml_free(value.prefix) 458 | // 459 | // return 0 460 | //} 461 | // 462 | ///* 463 | // * Destroy a document object. 464 | // */ 465 | // 466 | //YAML_DECLARE(void) 467 | //yaml_document_delete(document *yaml_document_t) 468 | //{ 469 | // struct { 470 | // error yaml_error_type_t 471 | // } context 472 | // tag_directive *yaml_tag_directive_t 473 | // 474 | // context.error = YAML_NO_ERROR // Eliminate a compliler warning. 475 | // 476 | // assert(document) // Non-NULL document object is expected. 477 | // 478 | // while (!STACK_EMPTY(&context, document.nodes)) { 479 | // node yaml_node_t = POP(&context, document.nodes) 480 | // yaml_free(node.tag) 481 | // switch (node.type) { 482 | // case YAML_SCALAR_NODE: 483 | // yaml_free(node.data.scalar.value) 484 | // break 485 | // case YAML_SEQUENCE_NODE: 486 | // STACK_DEL(&context, node.data.sequence.items) 487 | // break 488 | // case YAML_MAPPING_NODE: 489 | // STACK_DEL(&context, node.data.mapping.pairs) 490 | // break 491 | // default: 492 | // assert(0) // Should not happen. 493 | // } 494 | // } 495 | // STACK_DEL(&context, document.nodes) 496 | // 497 | // yaml_free(document.version_directive) 498 | // for (tag_directive = document.tag_directives.start 499 | // tag_directive != document.tag_directives.end 500 | // tag_directive++) { 501 | // yaml_free(tag_directive.handle) 502 | // yaml_free(tag_directive.prefix) 503 | // } 504 | // yaml_free(document.tag_directives.start) 505 | // 506 | // memset(document, 0, sizeof(yaml_document_t)) 507 | //} 508 | // 509 | ///** 510 | // * Get a document node. 511 | // */ 512 | // 513 | //YAML_DECLARE(yaml_node_t *) 514 | //yaml_document_get_node(document *yaml_document_t, index int) 515 | //{ 516 | // assert(document) // Non-NULL document object is expected. 517 | // 518 | // if (index > 0 && document.nodes.start + index <= document.nodes.top) { 519 | // return document.nodes.start + index - 1 520 | // } 521 | // return NULL 522 | //} 523 | // 524 | ///** 525 | // * Get the root object. 526 | // */ 527 | // 528 | //YAML_DECLARE(yaml_node_t *) 529 | //yaml_document_get_root_node(document *yaml_document_t) 530 | //{ 531 | // assert(document) // Non-NULL document object is expected. 532 | // 533 | // if (document.nodes.top != document.nodes.start) { 534 | // return document.nodes.start 535 | // } 536 | // return NULL 537 | //} 538 | // 539 | ///* 540 | // * Add a scalar node to a document. 541 | // */ 542 | // 543 | //YAML_DECLARE(int) 544 | //yaml_document_add_scalar(document *yaml_document_t, 545 | // tag *yaml_char_t, value *yaml_char_t, length int, 546 | // style yaml_scalar_style_t) 547 | //{ 548 | // struct { 549 | // error yaml_error_type_t 550 | // } context 551 | // mark yaml_mark_t = { 0, 0, 0 } 552 | // tag_copy *yaml_char_t = NULL 553 | // value_copy *yaml_char_t = NULL 554 | // node yaml_node_t 555 | // 556 | // assert(document) // Non-NULL document object is expected. 557 | // assert(value) // Non-NULL value is expected. 558 | // 559 | // if (!tag) { 560 | // tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG 561 | // } 562 | // 563 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error 564 | // tag_copy = yaml_strdup(tag) 565 | // if (!tag_copy) goto error 566 | // 567 | // if (length < 0) { 568 | // length = strlen((char *)value) 569 | // } 570 | // 571 | // if (!yaml_check_utf8(value, length)) goto error 572 | // value_copy = yaml_malloc(length+1) 573 | // if (!value_copy) goto error 574 | // memcpy(value_copy, value, length) 575 | // value_copy[length] = '\0' 576 | // 577 | // SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) 578 | // if (!PUSH(&context, document.nodes, node)) goto error 579 | // 580 | // return document.nodes.top - document.nodes.start 581 | // 582 | //error: 583 | // yaml_free(tag_copy) 584 | // yaml_free(value_copy) 585 | // 586 | // return 0 587 | //} 588 | // 589 | ///* 590 | // * Add a sequence node to a document. 591 | // */ 592 | // 593 | //YAML_DECLARE(int) 594 | //yaml_document_add_sequence(document *yaml_document_t, 595 | // tag *yaml_char_t, style yaml_sequence_style_t) 596 | //{ 597 | // struct { 598 | // error yaml_error_type_t 599 | // } context 600 | // mark yaml_mark_t = { 0, 0, 0 } 601 | // tag_copy *yaml_char_t = NULL 602 | // struct { 603 | // start *yaml_node_item_t 604 | // end *yaml_node_item_t 605 | // top *yaml_node_item_t 606 | // } items = { NULL, NULL, NULL } 607 | // node yaml_node_t 608 | // 609 | // assert(document) // Non-NULL document object is expected. 610 | // 611 | // if (!tag) { 612 | // tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG 613 | // } 614 | // 615 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error 616 | // tag_copy = yaml_strdup(tag) 617 | // if (!tag_copy) goto error 618 | // 619 | // if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error 620 | // 621 | // SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, 622 | // style, mark, mark) 623 | // if (!PUSH(&context, document.nodes, node)) goto error 624 | // 625 | // return document.nodes.top - document.nodes.start 626 | // 627 | //error: 628 | // STACK_DEL(&context, items) 629 | // yaml_free(tag_copy) 630 | // 631 | // return 0 632 | //} 633 | // 634 | ///* 635 | // * Add a mapping node to a document. 636 | // */ 637 | // 638 | //YAML_DECLARE(int) 639 | //yaml_document_add_mapping(document *yaml_document_t, 640 | // tag *yaml_char_t, style yaml_mapping_style_t) 641 | //{ 642 | // struct { 643 | // error yaml_error_type_t 644 | // } context 645 | // mark yaml_mark_t = { 0, 0, 0 } 646 | // tag_copy *yaml_char_t = NULL 647 | // struct { 648 | // start *yaml_node_pair_t 649 | // end *yaml_node_pair_t 650 | // top *yaml_node_pair_t 651 | // } pairs = { NULL, NULL, NULL } 652 | // node yaml_node_t 653 | // 654 | // assert(document) // Non-NULL document object is expected. 655 | // 656 | // if (!tag) { 657 | // tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG 658 | // } 659 | // 660 | // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error 661 | // tag_copy = yaml_strdup(tag) 662 | // if (!tag_copy) goto error 663 | // 664 | // if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error 665 | // 666 | // MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, 667 | // style, mark, mark) 668 | // if (!PUSH(&context, document.nodes, node)) goto error 669 | // 670 | // return document.nodes.top - document.nodes.start 671 | // 672 | //error: 673 | // STACK_DEL(&context, pairs) 674 | // yaml_free(tag_copy) 675 | // 676 | // return 0 677 | //} 678 | // 679 | ///* 680 | // * Append an item to a sequence node. 681 | // */ 682 | // 683 | //YAML_DECLARE(int) 684 | //yaml_document_append_sequence_item(document *yaml_document_t, 685 | // sequence int, item int) 686 | //{ 687 | // struct { 688 | // error yaml_error_type_t 689 | // } context 690 | // 691 | // assert(document) // Non-NULL document is required. 692 | // assert(sequence > 0 693 | // && document.nodes.start + sequence <= document.nodes.top) 694 | // // Valid sequence id is required. 695 | // assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) 696 | // // A sequence node is required. 697 | // assert(item > 0 && document.nodes.start + item <= document.nodes.top) 698 | // // Valid item id is required. 699 | // 700 | // if (!PUSH(&context, 701 | // document.nodes.start[sequence-1].data.sequence.items, item)) 702 | // return 0 703 | // 704 | // return 1 705 | //} 706 | // 707 | ///* 708 | // * Append a pair of a key and a value to a mapping node. 709 | // */ 710 | // 711 | //YAML_DECLARE(int) 712 | //yaml_document_append_mapping_pair(document *yaml_document_t, 713 | // mapping int, key int, value int) 714 | //{ 715 | // struct { 716 | // error yaml_error_type_t 717 | // } context 718 | // 719 | // pair yaml_node_pair_t 720 | // 721 | // assert(document) // Non-NULL document is required. 722 | // assert(mapping > 0 723 | // && document.nodes.start + mapping <= document.nodes.top) 724 | // // Valid mapping id is required. 725 | // assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) 726 | // // A mapping node is required. 727 | // assert(key > 0 && document.nodes.start + key <= document.nodes.top) 728 | // // Valid key id is required. 729 | // assert(value > 0 && document.nodes.start + value <= document.nodes.top) 730 | // // Valid value id is required. 731 | // 732 | // pair.key = key 733 | // pair.value = value 734 | // 735 | // if (!PUSH(&context, 736 | // document.nodes.start[mapping-1].data.mapping.pairs, pair)) 737 | // return 0 738 | // 739 | // return 1 740 | //} 741 | // 742 | // 743 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/decode.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | import ( 4 | "encoding" 5 | "encoding/base64" 6 | "fmt" 7 | "math" 8 | "reflect" 9 | "strconv" 10 | "time" 11 | ) 12 | 13 | const ( 14 | documentNode = 1 << iota 15 | mappingNode 16 | sequenceNode 17 | scalarNode 18 | aliasNode 19 | ) 20 | 21 | type node struct { 22 | kind int 23 | line, column int 24 | tag string 25 | value string 26 | implicit bool 27 | children []*node 28 | anchors map[string]*node 29 | } 30 | 31 | // ---------------------------------------------------------------------------- 32 | // Parser, produces a node tree out of a libyaml event stream. 33 | 34 | type parser struct { 35 | parser yaml_parser_t 36 | event yaml_event_t 37 | doc *node 38 | } 39 | 40 | func newParser(b []byte) *parser { 41 | p := parser{} 42 | if !yaml_parser_initialize(&p.parser) { 43 | panic("failed to initialize YAML emitter") 44 | } 45 | 46 | if len(b) == 0 { 47 | b = []byte{'\n'} 48 | } 49 | 50 | yaml_parser_set_input_string(&p.parser, b) 51 | 52 | p.skip() 53 | if p.event.typ != yaml_STREAM_START_EVENT { 54 | panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) 55 | } 56 | p.skip() 57 | return &p 58 | } 59 | 60 | func (p *parser) destroy() { 61 | if p.event.typ != yaml_NO_EVENT { 62 | yaml_event_delete(&p.event) 63 | } 64 | yaml_parser_delete(&p.parser) 65 | } 66 | 67 | func (p *parser) skip() { 68 | if p.event.typ != yaml_NO_EVENT { 69 | if p.event.typ == yaml_STREAM_END_EVENT { 70 | failf("attempted to go past the end of stream; corrupted value?") 71 | } 72 | yaml_event_delete(&p.event) 73 | } 74 | if !yaml_parser_parse(&p.parser, &p.event) { 75 | p.fail() 76 | } 77 | } 78 | 79 | func (p *parser) fail() { 80 | var where string 81 | var line int 82 | if p.parser.problem_mark.line != 0 { 83 | line = p.parser.problem_mark.line 84 | } else if p.parser.context_mark.line != 0 { 85 | line = p.parser.context_mark.line 86 | } 87 | if line != 0 { 88 | where = "line " + strconv.Itoa(line) + ": " 89 | } 90 | var msg string 91 | if len(p.parser.problem) > 0 { 92 | msg = p.parser.problem 93 | } else { 94 | msg = "unknown problem parsing YAML content" 95 | } 96 | failf("%s%s", where, msg) 97 | } 98 | 99 | func (p *parser) anchor(n *node, anchor []byte) { 100 | if anchor != nil { 101 | p.doc.anchors[string(anchor)] = n 102 | } 103 | } 104 | 105 | func (p *parser) parse() *node { 106 | switch p.event.typ { 107 | case yaml_SCALAR_EVENT: 108 | return p.scalar() 109 | case yaml_ALIAS_EVENT: 110 | return p.alias() 111 | case yaml_MAPPING_START_EVENT: 112 | return p.mapping() 113 | case yaml_SEQUENCE_START_EVENT: 114 | return p.sequence() 115 | case yaml_DOCUMENT_START_EVENT: 116 | return p.document() 117 | case yaml_STREAM_END_EVENT: 118 | // Happens when attempting to decode an empty buffer. 119 | return nil 120 | default: 121 | panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) 122 | } 123 | panic("unreachable") 124 | } 125 | 126 | func (p *parser) node(kind int) *node { 127 | return &node{ 128 | kind: kind, 129 | line: p.event.start_mark.line, 130 | column: p.event.start_mark.column, 131 | } 132 | } 133 | 134 | func (p *parser) document() *node { 135 | n := p.node(documentNode) 136 | n.anchors = make(map[string]*node) 137 | p.doc = n 138 | p.skip() 139 | n.children = append(n.children, p.parse()) 140 | if p.event.typ != yaml_DOCUMENT_END_EVENT { 141 | panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) 142 | } 143 | p.skip() 144 | return n 145 | } 146 | 147 | func (p *parser) alias() *node { 148 | n := p.node(aliasNode) 149 | n.value = string(p.event.anchor) 150 | p.skip() 151 | return n 152 | } 153 | 154 | func (p *parser) scalar() *node { 155 | n := p.node(scalarNode) 156 | n.value = string(p.event.value) 157 | n.tag = string(p.event.tag) 158 | n.implicit = p.event.implicit 159 | p.anchor(n, p.event.anchor) 160 | p.skip() 161 | return n 162 | } 163 | 164 | func (p *parser) sequence() *node { 165 | n := p.node(sequenceNode) 166 | p.anchor(n, p.event.anchor) 167 | p.skip() 168 | for p.event.typ != yaml_SEQUENCE_END_EVENT { 169 | n.children = append(n.children, p.parse()) 170 | } 171 | p.skip() 172 | return n 173 | } 174 | 175 | func (p *parser) mapping() *node { 176 | n := p.node(mappingNode) 177 | p.anchor(n, p.event.anchor) 178 | p.skip() 179 | for p.event.typ != yaml_MAPPING_END_EVENT { 180 | n.children = append(n.children, p.parse(), p.parse()) 181 | } 182 | p.skip() 183 | return n 184 | } 185 | 186 | // ---------------------------------------------------------------------------- 187 | // Decoder, unmarshals a node into a provided value. 188 | 189 | type decoder struct { 190 | doc *node 191 | aliases map[string]bool 192 | mapType reflect.Type 193 | terrors []string 194 | } 195 | 196 | var ( 197 | mapItemType = reflect.TypeOf(MapItem{}) 198 | durationType = reflect.TypeOf(time.Duration(0)) 199 | defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) 200 | ifaceType = defaultMapType.Elem() 201 | ) 202 | 203 | func newDecoder() *decoder { 204 | d := &decoder{mapType: defaultMapType} 205 | d.aliases = make(map[string]bool) 206 | return d 207 | } 208 | 209 | func (d *decoder) terror(n *node, tag string, out reflect.Value) { 210 | if n.tag != "" { 211 | tag = n.tag 212 | } 213 | value := n.value 214 | if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { 215 | if len(value) > 10 { 216 | value = " `" + value[:7] + "...`" 217 | } else { 218 | value = " `" + value + "`" 219 | } 220 | } 221 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) 222 | } 223 | 224 | func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { 225 | terrlen := len(d.terrors) 226 | err := u.UnmarshalYAML(func(v interface{}) (err error) { 227 | defer handleErr(&err) 228 | d.unmarshal(n, reflect.ValueOf(v)) 229 | if len(d.terrors) > terrlen { 230 | issues := d.terrors[terrlen:] 231 | d.terrors = d.terrors[:terrlen] 232 | return &TypeError{issues} 233 | } 234 | return nil 235 | }) 236 | if e, ok := err.(*TypeError); ok { 237 | d.terrors = append(d.terrors, e.Errors...) 238 | return false 239 | } 240 | if err != nil { 241 | fail(err) 242 | } 243 | return true 244 | } 245 | 246 | // d.prepare initializes and dereferences pointers and calls UnmarshalYAML 247 | // if a value is found to implement it. 248 | // It returns the initialized and dereferenced out value, whether 249 | // unmarshalling was already done by UnmarshalYAML, and if so whether 250 | // its types unmarshalled appropriately. 251 | // 252 | // If n holds a null value, prepare returns before doing anything. 253 | func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { 254 | if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { 255 | return out, false, false 256 | } 257 | again := true 258 | for again { 259 | again = false 260 | if out.Kind() == reflect.Ptr { 261 | if out.IsNil() { 262 | out.Set(reflect.New(out.Type().Elem())) 263 | } 264 | out = out.Elem() 265 | again = true 266 | } 267 | if out.CanAddr() { 268 | if u, ok := out.Addr().Interface().(Unmarshaler); ok { 269 | good = d.callUnmarshaler(n, u) 270 | return out, true, good 271 | } 272 | } 273 | } 274 | return out, false, false 275 | } 276 | 277 | func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { 278 | switch n.kind { 279 | case documentNode: 280 | return d.document(n, out) 281 | case aliasNode: 282 | return d.alias(n, out) 283 | } 284 | out, unmarshaled, good := d.prepare(n, out) 285 | if unmarshaled { 286 | return good 287 | } 288 | switch n.kind { 289 | case scalarNode: 290 | good = d.scalar(n, out) 291 | case mappingNode: 292 | good = d.mapping(n, out) 293 | case sequenceNode: 294 | good = d.sequence(n, out) 295 | default: 296 | panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) 297 | } 298 | return good 299 | } 300 | 301 | func (d *decoder) document(n *node, out reflect.Value) (good bool) { 302 | if len(n.children) == 1 { 303 | d.doc = n 304 | d.unmarshal(n.children[0], out) 305 | return true 306 | } 307 | return false 308 | } 309 | 310 | func (d *decoder) alias(n *node, out reflect.Value) (good bool) { 311 | an, ok := d.doc.anchors[n.value] 312 | if !ok { 313 | failf("unknown anchor '%s' referenced", n.value) 314 | } 315 | if d.aliases[n.value] { 316 | failf("anchor '%s' value contains itself", n.value) 317 | } 318 | d.aliases[n.value] = true 319 | good = d.unmarshal(an, out) 320 | delete(d.aliases, n.value) 321 | return good 322 | } 323 | 324 | var zeroValue reflect.Value 325 | 326 | func resetMap(out reflect.Value) { 327 | for _, k := range out.MapKeys() { 328 | out.SetMapIndex(k, zeroValue) 329 | } 330 | } 331 | 332 | func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { 333 | var tag string 334 | var resolved interface{} 335 | if n.tag == "" && !n.implicit { 336 | tag = yaml_STR_TAG 337 | resolved = n.value 338 | } else { 339 | tag, resolved = resolve(n.tag, n.value) 340 | if tag == yaml_BINARY_TAG { 341 | data, err := base64.StdEncoding.DecodeString(resolved.(string)) 342 | if err != nil { 343 | failf("!!binary value contains invalid base64 data") 344 | } 345 | resolved = string(data) 346 | } 347 | } 348 | if resolved == nil { 349 | if out.Kind() == reflect.Map && !out.CanAddr() { 350 | resetMap(out) 351 | } else { 352 | out.Set(reflect.Zero(out.Type())) 353 | } 354 | return true 355 | } 356 | if s, ok := resolved.(string); ok && out.CanAddr() { 357 | if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { 358 | err := u.UnmarshalText([]byte(s)) 359 | if err != nil { 360 | fail(err) 361 | } 362 | return true 363 | } 364 | } 365 | switch out.Kind() { 366 | case reflect.String: 367 | if tag == yaml_BINARY_TAG { 368 | out.SetString(resolved.(string)) 369 | good = true 370 | } else if resolved != nil { 371 | out.SetString(n.value) 372 | good = true 373 | } 374 | case reflect.Interface: 375 | if resolved == nil { 376 | out.Set(reflect.Zero(out.Type())) 377 | } else { 378 | out.Set(reflect.ValueOf(resolved)) 379 | } 380 | good = true 381 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 382 | switch resolved := resolved.(type) { 383 | case int: 384 | if !out.OverflowInt(int64(resolved)) { 385 | out.SetInt(int64(resolved)) 386 | good = true 387 | } 388 | case int64: 389 | if !out.OverflowInt(resolved) { 390 | out.SetInt(resolved) 391 | good = true 392 | } 393 | case uint64: 394 | if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { 395 | out.SetInt(int64(resolved)) 396 | good = true 397 | } 398 | case float64: 399 | if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { 400 | out.SetInt(int64(resolved)) 401 | good = true 402 | } 403 | case string: 404 | if out.Type() == durationType { 405 | d, err := time.ParseDuration(resolved) 406 | if err == nil { 407 | out.SetInt(int64(d)) 408 | good = true 409 | } 410 | } 411 | } 412 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 413 | switch resolved := resolved.(type) { 414 | case int: 415 | if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { 416 | out.SetUint(uint64(resolved)) 417 | good = true 418 | } 419 | case int64: 420 | if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { 421 | out.SetUint(uint64(resolved)) 422 | good = true 423 | } 424 | case uint64: 425 | if !out.OverflowUint(uint64(resolved)) { 426 | out.SetUint(uint64(resolved)) 427 | good = true 428 | } 429 | case float64: 430 | if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { 431 | out.SetUint(uint64(resolved)) 432 | good = true 433 | } 434 | } 435 | case reflect.Bool: 436 | switch resolved := resolved.(type) { 437 | case bool: 438 | out.SetBool(resolved) 439 | good = true 440 | } 441 | case reflect.Float32, reflect.Float64: 442 | switch resolved := resolved.(type) { 443 | case int: 444 | out.SetFloat(float64(resolved)) 445 | good = true 446 | case int64: 447 | out.SetFloat(float64(resolved)) 448 | good = true 449 | case uint64: 450 | out.SetFloat(float64(resolved)) 451 | good = true 452 | case float64: 453 | out.SetFloat(resolved) 454 | good = true 455 | } 456 | case reflect.Ptr: 457 | if out.Type().Elem() == reflect.TypeOf(resolved) { 458 | // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? 459 | elem := reflect.New(out.Type().Elem()) 460 | elem.Elem().Set(reflect.ValueOf(resolved)) 461 | out.Set(elem) 462 | good = true 463 | } 464 | } 465 | if !good { 466 | d.terror(n, tag, out) 467 | } 468 | return good 469 | } 470 | 471 | func settableValueOf(i interface{}) reflect.Value { 472 | v := reflect.ValueOf(i) 473 | sv := reflect.New(v.Type()).Elem() 474 | sv.Set(v) 475 | return sv 476 | } 477 | 478 | func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { 479 | l := len(n.children) 480 | 481 | var iface reflect.Value 482 | switch out.Kind() { 483 | case reflect.Slice: 484 | out.Set(reflect.MakeSlice(out.Type(), l, l)) 485 | case reflect.Interface: 486 | // No type hints. Will have to use a generic sequence. 487 | iface = out 488 | out = settableValueOf(make([]interface{}, l)) 489 | default: 490 | d.terror(n, yaml_SEQ_TAG, out) 491 | return false 492 | } 493 | et := out.Type().Elem() 494 | 495 | j := 0 496 | for i := 0; i < l; i++ { 497 | e := reflect.New(et).Elem() 498 | if ok := d.unmarshal(n.children[i], e); ok { 499 | out.Index(j).Set(e) 500 | j++ 501 | } 502 | } 503 | out.Set(out.Slice(0, j)) 504 | if iface.IsValid() { 505 | iface.Set(out) 506 | } 507 | return true 508 | } 509 | 510 | func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { 511 | switch out.Kind() { 512 | case reflect.Struct: 513 | return d.mappingStruct(n, out) 514 | case reflect.Slice: 515 | return d.mappingSlice(n, out) 516 | case reflect.Map: 517 | // okay 518 | case reflect.Interface: 519 | if d.mapType.Kind() == reflect.Map { 520 | iface := out 521 | out = reflect.MakeMap(d.mapType) 522 | iface.Set(out) 523 | } else { 524 | slicev := reflect.New(d.mapType).Elem() 525 | if !d.mappingSlice(n, slicev) { 526 | return false 527 | } 528 | out.Set(slicev) 529 | return true 530 | } 531 | default: 532 | d.terror(n, yaml_MAP_TAG, out) 533 | return false 534 | } 535 | outt := out.Type() 536 | kt := outt.Key() 537 | et := outt.Elem() 538 | 539 | mapType := d.mapType 540 | if outt.Key() == ifaceType && outt.Elem() == ifaceType { 541 | d.mapType = outt 542 | } 543 | 544 | if out.IsNil() { 545 | out.Set(reflect.MakeMap(outt)) 546 | } 547 | l := len(n.children) 548 | for i := 0; i < l; i += 2 { 549 | if isMerge(n.children[i]) { 550 | d.merge(n.children[i+1], out) 551 | continue 552 | } 553 | k := reflect.New(kt).Elem() 554 | if d.unmarshal(n.children[i], k) { 555 | kkind := k.Kind() 556 | if kkind == reflect.Interface { 557 | kkind = k.Elem().Kind() 558 | } 559 | if kkind == reflect.Map || kkind == reflect.Slice { 560 | failf("invalid map key: %#v", k.Interface()) 561 | } 562 | e := reflect.New(et).Elem() 563 | if d.unmarshal(n.children[i+1], e) { 564 | out.SetMapIndex(k, e) 565 | } 566 | } 567 | } 568 | d.mapType = mapType 569 | return true 570 | } 571 | 572 | func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { 573 | outt := out.Type() 574 | if outt.Elem() != mapItemType { 575 | d.terror(n, yaml_MAP_TAG, out) 576 | return false 577 | } 578 | 579 | mapType := d.mapType 580 | d.mapType = outt 581 | 582 | var slice []MapItem 583 | var l = len(n.children) 584 | for i := 0; i < l; i += 2 { 585 | if isMerge(n.children[i]) { 586 | d.merge(n.children[i+1], out) 587 | continue 588 | } 589 | item := MapItem{} 590 | k := reflect.ValueOf(&item.Key).Elem() 591 | if d.unmarshal(n.children[i], k) { 592 | v := reflect.ValueOf(&item.Value).Elem() 593 | if d.unmarshal(n.children[i+1], v) { 594 | slice = append(slice, item) 595 | } 596 | } 597 | } 598 | out.Set(reflect.ValueOf(slice)) 599 | d.mapType = mapType 600 | return true 601 | } 602 | 603 | func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { 604 | sinfo, err := getStructInfo(out.Type()) 605 | if err != nil { 606 | panic(err) 607 | } 608 | name := settableValueOf("") 609 | l := len(n.children) 610 | 611 | var inlineMap reflect.Value 612 | var elemType reflect.Type 613 | if sinfo.InlineMap != -1 { 614 | inlineMap = out.Field(sinfo.InlineMap) 615 | inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) 616 | elemType = inlineMap.Type().Elem() 617 | } 618 | 619 | for i := 0; i < l; i += 2 { 620 | ni := n.children[i] 621 | if isMerge(ni) { 622 | d.merge(n.children[i+1], out) 623 | continue 624 | } 625 | if !d.unmarshal(ni, name) { 626 | continue 627 | } 628 | if info, ok := sinfo.FieldsMap[name.String()]; ok { 629 | var field reflect.Value 630 | if info.Inline == nil { 631 | field = out.Field(info.Num) 632 | } else { 633 | field = out.FieldByIndex(info.Inline) 634 | } 635 | d.unmarshal(n.children[i+1], field) 636 | } else if sinfo.InlineMap != -1 { 637 | if inlineMap.IsNil() { 638 | inlineMap.Set(reflect.MakeMap(inlineMap.Type())) 639 | } 640 | value := reflect.New(elemType).Elem() 641 | d.unmarshal(n.children[i+1], value) 642 | inlineMap.SetMapIndex(name, value) 643 | } 644 | } 645 | return true 646 | } 647 | 648 | func failWantMap() { 649 | failf("map merge requires map or sequence of maps as the value") 650 | } 651 | 652 | func (d *decoder) merge(n *node, out reflect.Value) { 653 | switch n.kind { 654 | case mappingNode: 655 | d.unmarshal(n, out) 656 | case aliasNode: 657 | an, ok := d.doc.anchors[n.value] 658 | if ok && an.kind != mappingNode { 659 | failWantMap() 660 | } 661 | d.unmarshal(n, out) 662 | case sequenceNode: 663 | // Step backwards as earlier nodes take precedence. 664 | for i := len(n.children) - 1; i >= 0; i-- { 665 | ni := n.children[i] 666 | if ni.kind == aliasNode { 667 | an, ok := d.doc.anchors[ni.value] 668 | if ok && an.kind != mappingNode { 669 | failWantMap() 670 | } 671 | } else if ni.kind != mappingNode { 672 | failWantMap() 673 | } 674 | d.unmarshal(ni, out) 675 | } 676 | default: 677 | failWantMap() 678 | } 679 | } 680 | 681 | func isMerge(n *node) bool { 682 | return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) 683 | } 684 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/encode.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | import ( 4 | "encoding" 5 | "fmt" 6 | "reflect" 7 | "regexp" 8 | "sort" 9 | "strconv" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | type encoder struct { 15 | emitter yaml_emitter_t 16 | event yaml_event_t 17 | out []byte 18 | flow bool 19 | } 20 | 21 | func newEncoder() (e *encoder) { 22 | e = &encoder{} 23 | e.must(yaml_emitter_initialize(&e.emitter)) 24 | yaml_emitter_set_output_string(&e.emitter, &e.out) 25 | yaml_emitter_set_unicode(&e.emitter, true) 26 | e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) 27 | e.emit() 28 | e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) 29 | e.emit() 30 | return e 31 | } 32 | 33 | func (e *encoder) finish() { 34 | e.must(yaml_document_end_event_initialize(&e.event, true)) 35 | e.emit() 36 | e.emitter.open_ended = false 37 | e.must(yaml_stream_end_event_initialize(&e.event)) 38 | e.emit() 39 | } 40 | 41 | func (e *encoder) destroy() { 42 | yaml_emitter_delete(&e.emitter) 43 | } 44 | 45 | func (e *encoder) emit() { 46 | // This will internally delete the e.event value. 47 | if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { 48 | e.must(false) 49 | } 50 | } 51 | 52 | func (e *encoder) must(ok bool) { 53 | if !ok { 54 | msg := e.emitter.problem 55 | if msg == "" { 56 | msg = "unknown problem generating YAML content" 57 | } 58 | failf("%s", msg) 59 | } 60 | } 61 | 62 | func (e *encoder) marshal(tag string, in reflect.Value) { 63 | if !in.IsValid() { 64 | e.nilv() 65 | return 66 | } 67 | iface := in.Interface() 68 | if m, ok := iface.(Marshaler); ok { 69 | v, err := m.MarshalYAML() 70 | if err != nil { 71 | fail(err) 72 | } 73 | if v == nil { 74 | e.nilv() 75 | return 76 | } 77 | in = reflect.ValueOf(v) 78 | } else if m, ok := iface.(encoding.TextMarshaler); ok { 79 | text, err := m.MarshalText() 80 | if err != nil { 81 | fail(err) 82 | } 83 | in = reflect.ValueOf(string(text)) 84 | } 85 | switch in.Kind() { 86 | case reflect.Interface: 87 | if in.IsNil() { 88 | e.nilv() 89 | } else { 90 | e.marshal(tag, in.Elem()) 91 | } 92 | case reflect.Map: 93 | e.mapv(tag, in) 94 | case reflect.Ptr: 95 | if in.IsNil() { 96 | e.nilv() 97 | } else { 98 | e.marshal(tag, in.Elem()) 99 | } 100 | case reflect.Struct: 101 | e.structv(tag, in) 102 | case reflect.Slice: 103 | if in.Type().Elem() == mapItemType { 104 | e.itemsv(tag, in) 105 | } else { 106 | e.slicev(tag, in) 107 | } 108 | case reflect.String: 109 | e.stringv(tag, in) 110 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 111 | if in.Type() == durationType { 112 | e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) 113 | } else { 114 | e.intv(tag, in) 115 | } 116 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 117 | e.uintv(tag, in) 118 | case reflect.Float32, reflect.Float64: 119 | e.floatv(tag, in) 120 | case reflect.Bool: 121 | e.boolv(tag, in) 122 | default: 123 | panic("cannot marshal type: " + in.Type().String()) 124 | } 125 | } 126 | 127 | func (e *encoder) mapv(tag string, in reflect.Value) { 128 | e.mappingv(tag, func() { 129 | keys := keyList(in.MapKeys()) 130 | sort.Sort(keys) 131 | for _, k := range keys { 132 | e.marshal("", k) 133 | e.marshal("", in.MapIndex(k)) 134 | } 135 | }) 136 | } 137 | 138 | func (e *encoder) itemsv(tag string, in reflect.Value) { 139 | e.mappingv(tag, func() { 140 | slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) 141 | for _, item := range slice { 142 | e.marshal("", reflect.ValueOf(item.Key)) 143 | e.marshal("", reflect.ValueOf(item.Value)) 144 | } 145 | }) 146 | } 147 | 148 | func (e *encoder) structv(tag string, in reflect.Value) { 149 | sinfo, err := getStructInfo(in.Type()) 150 | if err != nil { 151 | panic(err) 152 | } 153 | e.mappingv(tag, func() { 154 | for _, info := range sinfo.FieldsList { 155 | var value reflect.Value 156 | if info.Inline == nil { 157 | value = in.Field(info.Num) 158 | } else { 159 | value = in.FieldByIndex(info.Inline) 160 | } 161 | if info.OmitEmpty && isZero(value) { 162 | continue 163 | } 164 | e.marshal("", reflect.ValueOf(info.Key)) 165 | e.flow = info.Flow 166 | e.marshal("", value) 167 | } 168 | if sinfo.InlineMap >= 0 { 169 | m := in.Field(sinfo.InlineMap) 170 | if m.Len() > 0 { 171 | e.flow = false 172 | keys := keyList(m.MapKeys()) 173 | sort.Sort(keys) 174 | for _, k := range keys { 175 | if _, found := sinfo.FieldsMap[k.String()]; found { 176 | panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) 177 | } 178 | e.marshal("", k) 179 | e.flow = false 180 | e.marshal("", m.MapIndex(k)) 181 | } 182 | } 183 | } 184 | }) 185 | } 186 | 187 | func (e *encoder) mappingv(tag string, f func()) { 188 | implicit := tag == "" 189 | style := yaml_BLOCK_MAPPING_STYLE 190 | if e.flow { 191 | e.flow = false 192 | style = yaml_FLOW_MAPPING_STYLE 193 | } 194 | e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) 195 | e.emit() 196 | f() 197 | e.must(yaml_mapping_end_event_initialize(&e.event)) 198 | e.emit() 199 | } 200 | 201 | func (e *encoder) slicev(tag string, in reflect.Value) { 202 | implicit := tag == "" 203 | style := yaml_BLOCK_SEQUENCE_STYLE 204 | if e.flow { 205 | e.flow = false 206 | style = yaml_FLOW_SEQUENCE_STYLE 207 | } 208 | e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) 209 | e.emit() 210 | n := in.Len() 211 | for i := 0; i < n; i++ { 212 | e.marshal("", in.Index(i)) 213 | } 214 | e.must(yaml_sequence_end_event_initialize(&e.event)) 215 | e.emit() 216 | } 217 | 218 | // isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. 219 | // 220 | // The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported 221 | // in YAML 1.2 and by this package, but these should be marshalled quoted for 222 | // the time being for compatibility with other parsers. 223 | func isBase60Float(s string) (result bool) { 224 | // Fast path. 225 | if s == "" { 226 | return false 227 | } 228 | c := s[0] 229 | if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { 230 | return false 231 | } 232 | // Do the full match. 233 | return base60float.MatchString(s) 234 | } 235 | 236 | // From http://yaml.org/type/float.html, except the regular expression there 237 | // is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. 238 | var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) 239 | 240 | func (e *encoder) stringv(tag string, in reflect.Value) { 241 | var style yaml_scalar_style_t 242 | s := in.String() 243 | rtag, rs := resolve("", s) 244 | if rtag == yaml_BINARY_TAG { 245 | if tag == "" || tag == yaml_STR_TAG { 246 | tag = rtag 247 | s = rs.(string) 248 | } else if tag == yaml_BINARY_TAG { 249 | failf("explicitly tagged !!binary data must be base64-encoded") 250 | } else { 251 | failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) 252 | } 253 | } 254 | if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { 255 | style = yaml_DOUBLE_QUOTED_SCALAR_STYLE 256 | } else if strings.Contains(s, "\n") { 257 | style = yaml_LITERAL_SCALAR_STYLE 258 | } else { 259 | style = yaml_PLAIN_SCALAR_STYLE 260 | } 261 | e.emitScalar(s, "", tag, style) 262 | } 263 | 264 | func (e *encoder) boolv(tag string, in reflect.Value) { 265 | var s string 266 | if in.Bool() { 267 | s = "true" 268 | } else { 269 | s = "false" 270 | } 271 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) 272 | } 273 | 274 | func (e *encoder) intv(tag string, in reflect.Value) { 275 | s := strconv.FormatInt(in.Int(), 10) 276 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) 277 | } 278 | 279 | func (e *encoder) uintv(tag string, in reflect.Value) { 280 | s := strconv.FormatUint(in.Uint(), 10) 281 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) 282 | } 283 | 284 | func (e *encoder) floatv(tag string, in reflect.Value) { 285 | // FIXME: Handle 64 bits here. 286 | s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) 287 | switch s { 288 | case "+Inf": 289 | s = ".inf" 290 | case "-Inf": 291 | s = "-.inf" 292 | case "NaN": 293 | s = ".nan" 294 | } 295 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) 296 | } 297 | 298 | func (e *encoder) nilv() { 299 | e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) 300 | } 301 | 302 | func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { 303 | implicit := tag == "" 304 | e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) 305 | e.emit() 306 | } 307 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/parserc.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | import ( 4 | "bytes" 5 | ) 6 | 7 | // The parser implements the following grammar: 8 | // 9 | // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END 10 | // implicit_document ::= block_node DOCUMENT-END* 11 | // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* 12 | // block_node_or_indentless_sequence ::= 13 | // ALIAS 14 | // | properties (block_content | indentless_block_sequence)? 15 | // | block_content 16 | // | indentless_block_sequence 17 | // block_node ::= ALIAS 18 | // | properties block_content? 19 | // | block_content 20 | // flow_node ::= ALIAS 21 | // | properties flow_content? 22 | // | flow_content 23 | // properties ::= TAG ANCHOR? | ANCHOR TAG? 24 | // block_content ::= block_collection | flow_collection | SCALAR 25 | // flow_content ::= flow_collection | SCALAR 26 | // block_collection ::= block_sequence | block_mapping 27 | // flow_collection ::= flow_sequence | flow_mapping 28 | // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END 29 | // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ 30 | // block_mapping ::= BLOCK-MAPPING_START 31 | // ((KEY block_node_or_indentless_sequence?)? 32 | // (VALUE block_node_or_indentless_sequence?)?)* 33 | // BLOCK-END 34 | // flow_sequence ::= FLOW-SEQUENCE-START 35 | // (flow_sequence_entry FLOW-ENTRY)* 36 | // flow_sequence_entry? 37 | // FLOW-SEQUENCE-END 38 | // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? 39 | // flow_mapping ::= FLOW-MAPPING-START 40 | // (flow_mapping_entry FLOW-ENTRY)* 41 | // flow_mapping_entry? 42 | // FLOW-MAPPING-END 43 | // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? 44 | 45 | // Peek the next token in the token queue. 46 | func peek_token(parser *yaml_parser_t) *yaml_token_t { 47 | if parser.token_available || yaml_parser_fetch_more_tokens(parser) { 48 | return &parser.tokens[parser.tokens_head] 49 | } 50 | return nil 51 | } 52 | 53 | // Remove the next token from the queue (must be called after peek_token). 54 | func skip_token(parser *yaml_parser_t) { 55 | parser.token_available = false 56 | parser.tokens_parsed++ 57 | parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN 58 | parser.tokens_head++ 59 | } 60 | 61 | // Get the next event. 62 | func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { 63 | // Erase the event object. 64 | *event = yaml_event_t{} 65 | 66 | // No events after the end of the stream or error. 67 | if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { 68 | return true 69 | } 70 | 71 | // Generate the next event. 72 | return yaml_parser_state_machine(parser, event) 73 | } 74 | 75 | // Set parser error. 76 | func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { 77 | parser.error = yaml_PARSER_ERROR 78 | parser.problem = problem 79 | parser.problem_mark = problem_mark 80 | return false 81 | } 82 | 83 | func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { 84 | parser.error = yaml_PARSER_ERROR 85 | parser.context = context 86 | parser.context_mark = context_mark 87 | parser.problem = problem 88 | parser.problem_mark = problem_mark 89 | return false 90 | } 91 | 92 | // State dispatcher. 93 | func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { 94 | //trace("yaml_parser_state_machine", "state:", parser.state.String()) 95 | 96 | switch parser.state { 97 | case yaml_PARSE_STREAM_START_STATE: 98 | return yaml_parser_parse_stream_start(parser, event) 99 | 100 | case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: 101 | return yaml_parser_parse_document_start(parser, event, true) 102 | 103 | case yaml_PARSE_DOCUMENT_START_STATE: 104 | return yaml_parser_parse_document_start(parser, event, false) 105 | 106 | case yaml_PARSE_DOCUMENT_CONTENT_STATE: 107 | return yaml_parser_parse_document_content(parser, event) 108 | 109 | case yaml_PARSE_DOCUMENT_END_STATE: 110 | return yaml_parser_parse_document_end(parser, event) 111 | 112 | case yaml_PARSE_BLOCK_NODE_STATE: 113 | return yaml_parser_parse_node(parser, event, true, false) 114 | 115 | case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: 116 | return yaml_parser_parse_node(parser, event, true, true) 117 | 118 | case yaml_PARSE_FLOW_NODE_STATE: 119 | return yaml_parser_parse_node(parser, event, false, false) 120 | 121 | case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: 122 | return yaml_parser_parse_block_sequence_entry(parser, event, true) 123 | 124 | case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: 125 | return yaml_parser_parse_block_sequence_entry(parser, event, false) 126 | 127 | case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: 128 | return yaml_parser_parse_indentless_sequence_entry(parser, event) 129 | 130 | case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: 131 | return yaml_parser_parse_block_mapping_key(parser, event, true) 132 | 133 | case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: 134 | return yaml_parser_parse_block_mapping_key(parser, event, false) 135 | 136 | case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: 137 | return yaml_parser_parse_block_mapping_value(parser, event) 138 | 139 | case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: 140 | return yaml_parser_parse_flow_sequence_entry(parser, event, true) 141 | 142 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: 143 | return yaml_parser_parse_flow_sequence_entry(parser, event, false) 144 | 145 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: 146 | return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) 147 | 148 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: 149 | return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) 150 | 151 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: 152 | return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) 153 | 154 | case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: 155 | return yaml_parser_parse_flow_mapping_key(parser, event, true) 156 | 157 | case yaml_PARSE_FLOW_MAPPING_KEY_STATE: 158 | return yaml_parser_parse_flow_mapping_key(parser, event, false) 159 | 160 | case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: 161 | return yaml_parser_parse_flow_mapping_value(parser, event, false) 162 | 163 | case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: 164 | return yaml_parser_parse_flow_mapping_value(parser, event, true) 165 | 166 | default: 167 | panic("invalid parser state") 168 | } 169 | return false 170 | } 171 | 172 | // Parse the production: 173 | // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END 174 | // ************ 175 | func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { 176 | token := peek_token(parser) 177 | if token == nil { 178 | return false 179 | } 180 | if token.typ != yaml_STREAM_START_TOKEN { 181 | return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) 182 | } 183 | parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE 184 | *event = yaml_event_t{ 185 | typ: yaml_STREAM_START_EVENT, 186 | start_mark: token.start_mark, 187 | end_mark: token.end_mark, 188 | encoding: token.encoding, 189 | } 190 | skip_token(parser) 191 | return true 192 | } 193 | 194 | // Parse the productions: 195 | // implicit_document ::= block_node DOCUMENT-END* 196 | // * 197 | // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* 198 | // ************************* 199 | func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { 200 | 201 | token := peek_token(parser) 202 | if token == nil { 203 | return false 204 | } 205 | 206 | // Parse extra document end indicators. 207 | if !implicit { 208 | for token.typ == yaml_DOCUMENT_END_TOKEN { 209 | skip_token(parser) 210 | token = peek_token(parser) 211 | if token == nil { 212 | return false 213 | } 214 | } 215 | } 216 | 217 | if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && 218 | token.typ != yaml_TAG_DIRECTIVE_TOKEN && 219 | token.typ != yaml_DOCUMENT_START_TOKEN && 220 | token.typ != yaml_STREAM_END_TOKEN { 221 | // Parse an implicit document. 222 | if !yaml_parser_process_directives(parser, nil, nil) { 223 | return false 224 | } 225 | parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) 226 | parser.state = yaml_PARSE_BLOCK_NODE_STATE 227 | 228 | *event = yaml_event_t{ 229 | typ: yaml_DOCUMENT_START_EVENT, 230 | start_mark: token.start_mark, 231 | end_mark: token.end_mark, 232 | } 233 | 234 | } else if token.typ != yaml_STREAM_END_TOKEN { 235 | // Parse an explicit document. 236 | var version_directive *yaml_version_directive_t 237 | var tag_directives []yaml_tag_directive_t 238 | start_mark := token.start_mark 239 | if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { 240 | return false 241 | } 242 | token = peek_token(parser) 243 | if token == nil { 244 | return false 245 | } 246 | if token.typ != yaml_DOCUMENT_START_TOKEN { 247 | yaml_parser_set_parser_error(parser, 248 | "did not find expected ", token.start_mark) 249 | return false 250 | } 251 | parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) 252 | parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE 253 | end_mark := token.end_mark 254 | 255 | *event = yaml_event_t{ 256 | typ: yaml_DOCUMENT_START_EVENT, 257 | start_mark: start_mark, 258 | end_mark: end_mark, 259 | version_directive: version_directive, 260 | tag_directives: tag_directives, 261 | implicit: false, 262 | } 263 | skip_token(parser) 264 | 265 | } else { 266 | // Parse the stream end. 267 | parser.state = yaml_PARSE_END_STATE 268 | *event = yaml_event_t{ 269 | typ: yaml_STREAM_END_EVENT, 270 | start_mark: token.start_mark, 271 | end_mark: token.end_mark, 272 | } 273 | skip_token(parser) 274 | } 275 | 276 | return true 277 | } 278 | 279 | // Parse the productions: 280 | // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* 281 | // *********** 282 | // 283 | func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { 284 | token := peek_token(parser) 285 | if token == nil { 286 | return false 287 | } 288 | if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || 289 | token.typ == yaml_TAG_DIRECTIVE_TOKEN || 290 | token.typ == yaml_DOCUMENT_START_TOKEN || 291 | token.typ == yaml_DOCUMENT_END_TOKEN || 292 | token.typ == yaml_STREAM_END_TOKEN { 293 | parser.state = parser.states[len(parser.states)-1] 294 | parser.states = parser.states[:len(parser.states)-1] 295 | return yaml_parser_process_empty_scalar(parser, event, 296 | token.start_mark) 297 | } 298 | return yaml_parser_parse_node(parser, event, true, false) 299 | } 300 | 301 | // Parse the productions: 302 | // implicit_document ::= block_node DOCUMENT-END* 303 | // ************* 304 | // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* 305 | // 306 | func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { 307 | token := peek_token(parser) 308 | if token == nil { 309 | return false 310 | } 311 | 312 | start_mark := token.start_mark 313 | end_mark := token.start_mark 314 | 315 | implicit := true 316 | if token.typ == yaml_DOCUMENT_END_TOKEN { 317 | end_mark = token.end_mark 318 | skip_token(parser) 319 | implicit = false 320 | } 321 | 322 | parser.tag_directives = parser.tag_directives[:0] 323 | 324 | parser.state = yaml_PARSE_DOCUMENT_START_STATE 325 | *event = yaml_event_t{ 326 | typ: yaml_DOCUMENT_END_EVENT, 327 | start_mark: start_mark, 328 | end_mark: end_mark, 329 | implicit: implicit, 330 | } 331 | return true 332 | } 333 | 334 | // Parse the productions: 335 | // block_node_or_indentless_sequence ::= 336 | // ALIAS 337 | // ***** 338 | // | properties (block_content | indentless_block_sequence)? 339 | // ********** * 340 | // | block_content | indentless_block_sequence 341 | // * 342 | // block_node ::= ALIAS 343 | // ***** 344 | // | properties block_content? 345 | // ********** * 346 | // | block_content 347 | // * 348 | // flow_node ::= ALIAS 349 | // ***** 350 | // | properties flow_content? 351 | // ********** * 352 | // | flow_content 353 | // * 354 | // properties ::= TAG ANCHOR? | ANCHOR TAG? 355 | // ************************* 356 | // block_content ::= block_collection | flow_collection | SCALAR 357 | // ****** 358 | // flow_content ::= flow_collection | SCALAR 359 | // ****** 360 | func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { 361 | //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() 362 | 363 | token := peek_token(parser) 364 | if token == nil { 365 | return false 366 | } 367 | 368 | if token.typ == yaml_ALIAS_TOKEN { 369 | parser.state = parser.states[len(parser.states)-1] 370 | parser.states = parser.states[:len(parser.states)-1] 371 | *event = yaml_event_t{ 372 | typ: yaml_ALIAS_EVENT, 373 | start_mark: token.start_mark, 374 | end_mark: token.end_mark, 375 | anchor: token.value, 376 | } 377 | skip_token(parser) 378 | return true 379 | } 380 | 381 | start_mark := token.start_mark 382 | end_mark := token.start_mark 383 | 384 | var tag_token bool 385 | var tag_handle, tag_suffix, anchor []byte 386 | var tag_mark yaml_mark_t 387 | if token.typ == yaml_ANCHOR_TOKEN { 388 | anchor = token.value 389 | start_mark = token.start_mark 390 | end_mark = token.end_mark 391 | skip_token(parser) 392 | token = peek_token(parser) 393 | if token == nil { 394 | return false 395 | } 396 | if token.typ == yaml_TAG_TOKEN { 397 | tag_token = true 398 | tag_handle = token.value 399 | tag_suffix = token.suffix 400 | tag_mark = token.start_mark 401 | end_mark = token.end_mark 402 | skip_token(parser) 403 | token = peek_token(parser) 404 | if token == nil { 405 | return false 406 | } 407 | } 408 | } else if token.typ == yaml_TAG_TOKEN { 409 | tag_token = true 410 | tag_handle = token.value 411 | tag_suffix = token.suffix 412 | start_mark = token.start_mark 413 | tag_mark = token.start_mark 414 | end_mark = token.end_mark 415 | skip_token(parser) 416 | token = peek_token(parser) 417 | if token == nil { 418 | return false 419 | } 420 | if token.typ == yaml_ANCHOR_TOKEN { 421 | anchor = token.value 422 | end_mark = token.end_mark 423 | skip_token(parser) 424 | token = peek_token(parser) 425 | if token == nil { 426 | return false 427 | } 428 | } 429 | } 430 | 431 | var tag []byte 432 | if tag_token { 433 | if len(tag_handle) == 0 { 434 | tag = tag_suffix 435 | tag_suffix = nil 436 | } else { 437 | for i := range parser.tag_directives { 438 | if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { 439 | tag = append([]byte(nil), parser.tag_directives[i].prefix...) 440 | tag = append(tag, tag_suffix...) 441 | break 442 | } 443 | } 444 | if len(tag) == 0 { 445 | yaml_parser_set_parser_error_context(parser, 446 | "while parsing a node", start_mark, 447 | "found undefined tag handle", tag_mark) 448 | return false 449 | } 450 | } 451 | } 452 | 453 | implicit := len(tag) == 0 454 | if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { 455 | end_mark = token.end_mark 456 | parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE 457 | *event = yaml_event_t{ 458 | typ: yaml_SEQUENCE_START_EVENT, 459 | start_mark: start_mark, 460 | end_mark: end_mark, 461 | anchor: anchor, 462 | tag: tag, 463 | implicit: implicit, 464 | style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), 465 | } 466 | return true 467 | } 468 | if token.typ == yaml_SCALAR_TOKEN { 469 | var plain_implicit, quoted_implicit bool 470 | end_mark = token.end_mark 471 | if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { 472 | plain_implicit = true 473 | } else if len(tag) == 0 { 474 | quoted_implicit = true 475 | } 476 | parser.state = parser.states[len(parser.states)-1] 477 | parser.states = parser.states[:len(parser.states)-1] 478 | 479 | *event = yaml_event_t{ 480 | typ: yaml_SCALAR_EVENT, 481 | start_mark: start_mark, 482 | end_mark: end_mark, 483 | anchor: anchor, 484 | tag: tag, 485 | value: token.value, 486 | implicit: plain_implicit, 487 | quoted_implicit: quoted_implicit, 488 | style: yaml_style_t(token.style), 489 | } 490 | skip_token(parser) 491 | return true 492 | } 493 | if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { 494 | // [Go] Some of the events below can be merged as they differ only on style. 495 | end_mark = token.end_mark 496 | parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE 497 | *event = yaml_event_t{ 498 | typ: yaml_SEQUENCE_START_EVENT, 499 | start_mark: start_mark, 500 | end_mark: end_mark, 501 | anchor: anchor, 502 | tag: tag, 503 | implicit: implicit, 504 | style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), 505 | } 506 | return true 507 | } 508 | if token.typ == yaml_FLOW_MAPPING_START_TOKEN { 509 | end_mark = token.end_mark 510 | parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE 511 | *event = yaml_event_t{ 512 | typ: yaml_MAPPING_START_EVENT, 513 | start_mark: start_mark, 514 | end_mark: end_mark, 515 | anchor: anchor, 516 | tag: tag, 517 | implicit: implicit, 518 | style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), 519 | } 520 | return true 521 | } 522 | if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { 523 | end_mark = token.end_mark 524 | parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE 525 | *event = yaml_event_t{ 526 | typ: yaml_SEQUENCE_START_EVENT, 527 | start_mark: start_mark, 528 | end_mark: end_mark, 529 | anchor: anchor, 530 | tag: tag, 531 | implicit: implicit, 532 | style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), 533 | } 534 | return true 535 | } 536 | if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { 537 | end_mark = token.end_mark 538 | parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE 539 | *event = yaml_event_t{ 540 | typ: yaml_MAPPING_START_EVENT, 541 | start_mark: start_mark, 542 | end_mark: end_mark, 543 | anchor: anchor, 544 | tag: tag, 545 | implicit: implicit, 546 | style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), 547 | } 548 | return true 549 | } 550 | if len(anchor) > 0 || len(tag) > 0 { 551 | parser.state = parser.states[len(parser.states)-1] 552 | parser.states = parser.states[:len(parser.states)-1] 553 | 554 | *event = yaml_event_t{ 555 | typ: yaml_SCALAR_EVENT, 556 | start_mark: start_mark, 557 | end_mark: end_mark, 558 | anchor: anchor, 559 | tag: tag, 560 | implicit: implicit, 561 | quoted_implicit: false, 562 | style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), 563 | } 564 | return true 565 | } 566 | 567 | context := "while parsing a flow node" 568 | if block { 569 | context = "while parsing a block node" 570 | } 571 | yaml_parser_set_parser_error_context(parser, context, start_mark, 572 | "did not find expected node content", token.start_mark) 573 | return false 574 | } 575 | 576 | // Parse the productions: 577 | // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END 578 | // ******************** *********** * ********* 579 | // 580 | func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { 581 | if first { 582 | token := peek_token(parser) 583 | parser.marks = append(parser.marks, token.start_mark) 584 | skip_token(parser) 585 | } 586 | 587 | token := peek_token(parser) 588 | if token == nil { 589 | return false 590 | } 591 | 592 | if token.typ == yaml_BLOCK_ENTRY_TOKEN { 593 | mark := token.end_mark 594 | skip_token(parser) 595 | token = peek_token(parser) 596 | if token == nil { 597 | return false 598 | } 599 | if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { 600 | parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) 601 | return yaml_parser_parse_node(parser, event, true, false) 602 | } else { 603 | parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE 604 | return yaml_parser_process_empty_scalar(parser, event, mark) 605 | } 606 | } 607 | if token.typ == yaml_BLOCK_END_TOKEN { 608 | parser.state = parser.states[len(parser.states)-1] 609 | parser.states = parser.states[:len(parser.states)-1] 610 | parser.marks = parser.marks[:len(parser.marks)-1] 611 | 612 | *event = yaml_event_t{ 613 | typ: yaml_SEQUENCE_END_EVENT, 614 | start_mark: token.start_mark, 615 | end_mark: token.end_mark, 616 | } 617 | 618 | skip_token(parser) 619 | return true 620 | } 621 | 622 | context_mark := parser.marks[len(parser.marks)-1] 623 | parser.marks = parser.marks[:len(parser.marks)-1] 624 | return yaml_parser_set_parser_error_context(parser, 625 | "while parsing a block collection", context_mark, 626 | "did not find expected '-' indicator", token.start_mark) 627 | } 628 | 629 | // Parse the productions: 630 | // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ 631 | // *********** * 632 | func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { 633 | token := peek_token(parser) 634 | if token == nil { 635 | return false 636 | } 637 | 638 | if token.typ == yaml_BLOCK_ENTRY_TOKEN { 639 | mark := token.end_mark 640 | skip_token(parser) 641 | token = peek_token(parser) 642 | if token == nil { 643 | return false 644 | } 645 | if token.typ != yaml_BLOCK_ENTRY_TOKEN && 646 | token.typ != yaml_KEY_TOKEN && 647 | token.typ != yaml_VALUE_TOKEN && 648 | token.typ != yaml_BLOCK_END_TOKEN { 649 | parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) 650 | return yaml_parser_parse_node(parser, event, true, false) 651 | } 652 | parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE 653 | return yaml_parser_process_empty_scalar(parser, event, mark) 654 | } 655 | parser.state = parser.states[len(parser.states)-1] 656 | parser.states = parser.states[:len(parser.states)-1] 657 | 658 | *event = yaml_event_t{ 659 | typ: yaml_SEQUENCE_END_EVENT, 660 | start_mark: token.start_mark, 661 | end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? 662 | } 663 | return true 664 | } 665 | 666 | // Parse the productions: 667 | // block_mapping ::= BLOCK-MAPPING_START 668 | // ******************* 669 | // ((KEY block_node_or_indentless_sequence?)? 670 | // *** * 671 | // (VALUE block_node_or_indentless_sequence?)?)* 672 | // 673 | // BLOCK-END 674 | // ********* 675 | // 676 | func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { 677 | if first { 678 | token := peek_token(parser) 679 | parser.marks = append(parser.marks, token.start_mark) 680 | skip_token(parser) 681 | } 682 | 683 | token := peek_token(parser) 684 | if token == nil { 685 | return false 686 | } 687 | 688 | if token.typ == yaml_KEY_TOKEN { 689 | mark := token.end_mark 690 | skip_token(parser) 691 | token = peek_token(parser) 692 | if token == nil { 693 | return false 694 | } 695 | if token.typ != yaml_KEY_TOKEN && 696 | token.typ != yaml_VALUE_TOKEN && 697 | token.typ != yaml_BLOCK_END_TOKEN { 698 | parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) 699 | return yaml_parser_parse_node(parser, event, true, true) 700 | } else { 701 | parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE 702 | return yaml_parser_process_empty_scalar(parser, event, mark) 703 | } 704 | } else if token.typ == yaml_BLOCK_END_TOKEN { 705 | parser.state = parser.states[len(parser.states)-1] 706 | parser.states = parser.states[:len(parser.states)-1] 707 | parser.marks = parser.marks[:len(parser.marks)-1] 708 | *event = yaml_event_t{ 709 | typ: yaml_MAPPING_END_EVENT, 710 | start_mark: token.start_mark, 711 | end_mark: token.end_mark, 712 | } 713 | skip_token(parser) 714 | return true 715 | } 716 | 717 | context_mark := parser.marks[len(parser.marks)-1] 718 | parser.marks = parser.marks[:len(parser.marks)-1] 719 | return yaml_parser_set_parser_error_context(parser, 720 | "while parsing a block mapping", context_mark, 721 | "did not find expected key", token.start_mark) 722 | } 723 | 724 | // Parse the productions: 725 | // block_mapping ::= BLOCK-MAPPING_START 726 | // 727 | // ((KEY block_node_or_indentless_sequence?)? 728 | // 729 | // (VALUE block_node_or_indentless_sequence?)?)* 730 | // ***** * 731 | // BLOCK-END 732 | // 733 | // 734 | func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { 735 | token := peek_token(parser) 736 | if token == nil { 737 | return false 738 | } 739 | if token.typ == yaml_VALUE_TOKEN { 740 | mark := token.end_mark 741 | skip_token(parser) 742 | token = peek_token(parser) 743 | if token == nil { 744 | return false 745 | } 746 | if token.typ != yaml_KEY_TOKEN && 747 | token.typ != yaml_VALUE_TOKEN && 748 | token.typ != yaml_BLOCK_END_TOKEN { 749 | parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) 750 | return yaml_parser_parse_node(parser, event, true, true) 751 | } 752 | parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE 753 | return yaml_parser_process_empty_scalar(parser, event, mark) 754 | } 755 | parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE 756 | return yaml_parser_process_empty_scalar(parser, event, token.start_mark) 757 | } 758 | 759 | // Parse the productions: 760 | // flow_sequence ::= FLOW-SEQUENCE-START 761 | // ******************* 762 | // (flow_sequence_entry FLOW-ENTRY)* 763 | // * ********** 764 | // flow_sequence_entry? 765 | // * 766 | // FLOW-SEQUENCE-END 767 | // ***************** 768 | // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? 769 | // * 770 | // 771 | func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { 772 | if first { 773 | token := peek_token(parser) 774 | parser.marks = append(parser.marks, token.start_mark) 775 | skip_token(parser) 776 | } 777 | token := peek_token(parser) 778 | if token == nil { 779 | return false 780 | } 781 | if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { 782 | if !first { 783 | if token.typ == yaml_FLOW_ENTRY_TOKEN { 784 | skip_token(parser) 785 | token = peek_token(parser) 786 | if token == nil { 787 | return false 788 | } 789 | } else { 790 | context_mark := parser.marks[len(parser.marks)-1] 791 | parser.marks = parser.marks[:len(parser.marks)-1] 792 | return yaml_parser_set_parser_error_context(parser, 793 | "while parsing a flow sequence", context_mark, 794 | "did not find expected ',' or ']'", token.start_mark) 795 | } 796 | } 797 | 798 | if token.typ == yaml_KEY_TOKEN { 799 | parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE 800 | *event = yaml_event_t{ 801 | typ: yaml_MAPPING_START_EVENT, 802 | start_mark: token.start_mark, 803 | end_mark: token.end_mark, 804 | implicit: true, 805 | style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), 806 | } 807 | skip_token(parser) 808 | return true 809 | } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { 810 | parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) 811 | return yaml_parser_parse_node(parser, event, false, false) 812 | } 813 | } 814 | 815 | parser.state = parser.states[len(parser.states)-1] 816 | parser.states = parser.states[:len(parser.states)-1] 817 | parser.marks = parser.marks[:len(parser.marks)-1] 818 | 819 | *event = yaml_event_t{ 820 | typ: yaml_SEQUENCE_END_EVENT, 821 | start_mark: token.start_mark, 822 | end_mark: token.end_mark, 823 | } 824 | 825 | skip_token(parser) 826 | return true 827 | } 828 | 829 | // 830 | // Parse the productions: 831 | // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? 832 | // *** * 833 | // 834 | func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { 835 | token := peek_token(parser) 836 | if token == nil { 837 | return false 838 | } 839 | if token.typ != yaml_VALUE_TOKEN && 840 | token.typ != yaml_FLOW_ENTRY_TOKEN && 841 | token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { 842 | parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) 843 | return yaml_parser_parse_node(parser, event, false, false) 844 | } 845 | mark := token.end_mark 846 | skip_token(parser) 847 | parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE 848 | return yaml_parser_process_empty_scalar(parser, event, mark) 849 | } 850 | 851 | // Parse the productions: 852 | // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? 853 | // ***** * 854 | // 855 | func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { 856 | token := peek_token(parser) 857 | if token == nil { 858 | return false 859 | } 860 | if token.typ == yaml_VALUE_TOKEN { 861 | skip_token(parser) 862 | token := peek_token(parser) 863 | if token == nil { 864 | return false 865 | } 866 | if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { 867 | parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) 868 | return yaml_parser_parse_node(parser, event, false, false) 869 | } 870 | } 871 | parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE 872 | return yaml_parser_process_empty_scalar(parser, event, token.start_mark) 873 | } 874 | 875 | // Parse the productions: 876 | // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? 877 | // * 878 | // 879 | func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { 880 | token := peek_token(parser) 881 | if token == nil { 882 | return false 883 | } 884 | parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE 885 | *event = yaml_event_t{ 886 | typ: yaml_MAPPING_END_EVENT, 887 | start_mark: token.start_mark, 888 | end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? 889 | } 890 | return true 891 | } 892 | 893 | // Parse the productions: 894 | // flow_mapping ::= FLOW-MAPPING-START 895 | // ****************** 896 | // (flow_mapping_entry FLOW-ENTRY)* 897 | // * ********** 898 | // flow_mapping_entry? 899 | // ****************** 900 | // FLOW-MAPPING-END 901 | // **************** 902 | // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? 903 | // * *** * 904 | // 905 | func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { 906 | if first { 907 | token := peek_token(parser) 908 | parser.marks = append(parser.marks, token.start_mark) 909 | skip_token(parser) 910 | } 911 | 912 | token := peek_token(parser) 913 | if token == nil { 914 | return false 915 | } 916 | 917 | if token.typ != yaml_FLOW_MAPPING_END_TOKEN { 918 | if !first { 919 | if token.typ == yaml_FLOW_ENTRY_TOKEN { 920 | skip_token(parser) 921 | token = peek_token(parser) 922 | if token == nil { 923 | return false 924 | } 925 | } else { 926 | context_mark := parser.marks[len(parser.marks)-1] 927 | parser.marks = parser.marks[:len(parser.marks)-1] 928 | return yaml_parser_set_parser_error_context(parser, 929 | "while parsing a flow mapping", context_mark, 930 | "did not find expected ',' or '}'", token.start_mark) 931 | } 932 | } 933 | 934 | if token.typ == yaml_KEY_TOKEN { 935 | skip_token(parser) 936 | token = peek_token(parser) 937 | if token == nil { 938 | return false 939 | } 940 | if token.typ != yaml_VALUE_TOKEN && 941 | token.typ != yaml_FLOW_ENTRY_TOKEN && 942 | token.typ != yaml_FLOW_MAPPING_END_TOKEN { 943 | parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) 944 | return yaml_parser_parse_node(parser, event, false, false) 945 | } else { 946 | parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE 947 | return yaml_parser_process_empty_scalar(parser, event, token.start_mark) 948 | } 949 | } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { 950 | parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) 951 | return yaml_parser_parse_node(parser, event, false, false) 952 | } 953 | } 954 | 955 | parser.state = parser.states[len(parser.states)-1] 956 | parser.states = parser.states[:len(parser.states)-1] 957 | parser.marks = parser.marks[:len(parser.marks)-1] 958 | *event = yaml_event_t{ 959 | typ: yaml_MAPPING_END_EVENT, 960 | start_mark: token.start_mark, 961 | end_mark: token.end_mark, 962 | } 963 | skip_token(parser) 964 | return true 965 | } 966 | 967 | // Parse the productions: 968 | // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? 969 | // * ***** * 970 | // 971 | func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { 972 | token := peek_token(parser) 973 | if token == nil { 974 | return false 975 | } 976 | if empty { 977 | parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE 978 | return yaml_parser_process_empty_scalar(parser, event, token.start_mark) 979 | } 980 | if token.typ == yaml_VALUE_TOKEN { 981 | skip_token(parser) 982 | token = peek_token(parser) 983 | if token == nil { 984 | return false 985 | } 986 | if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { 987 | parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) 988 | return yaml_parser_parse_node(parser, event, false, false) 989 | } 990 | } 991 | parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE 992 | return yaml_parser_process_empty_scalar(parser, event, token.start_mark) 993 | } 994 | 995 | // Generate an empty scalar event. 996 | func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { 997 | *event = yaml_event_t{ 998 | typ: yaml_SCALAR_EVENT, 999 | start_mark: mark, 1000 | end_mark: mark, 1001 | value: nil, // Empty 1002 | implicit: true, 1003 | style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), 1004 | } 1005 | return true 1006 | } 1007 | 1008 | var default_tag_directives = []yaml_tag_directive_t{ 1009 | {[]byte("!"), []byte("!")}, 1010 | {[]byte("!!"), []byte("tag:yaml.org,2002:")}, 1011 | } 1012 | 1013 | // Parse directives. 1014 | func yaml_parser_process_directives(parser *yaml_parser_t, 1015 | version_directive_ref **yaml_version_directive_t, 1016 | tag_directives_ref *[]yaml_tag_directive_t) bool { 1017 | 1018 | var version_directive *yaml_version_directive_t 1019 | var tag_directives []yaml_tag_directive_t 1020 | 1021 | token := peek_token(parser) 1022 | if token == nil { 1023 | return false 1024 | } 1025 | 1026 | for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { 1027 | if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { 1028 | if version_directive != nil { 1029 | yaml_parser_set_parser_error(parser, 1030 | "found duplicate %YAML directive", token.start_mark) 1031 | return false 1032 | } 1033 | if token.major != 1 || token.minor != 1 { 1034 | yaml_parser_set_parser_error(parser, 1035 | "found incompatible YAML document", token.start_mark) 1036 | return false 1037 | } 1038 | version_directive = &yaml_version_directive_t{ 1039 | major: token.major, 1040 | minor: token.minor, 1041 | } 1042 | } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { 1043 | value := yaml_tag_directive_t{ 1044 | handle: token.value, 1045 | prefix: token.prefix, 1046 | } 1047 | if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { 1048 | return false 1049 | } 1050 | tag_directives = append(tag_directives, value) 1051 | } 1052 | 1053 | skip_token(parser) 1054 | token = peek_token(parser) 1055 | if token == nil { 1056 | return false 1057 | } 1058 | } 1059 | 1060 | for i := range default_tag_directives { 1061 | if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { 1062 | return false 1063 | } 1064 | } 1065 | 1066 | if version_directive_ref != nil { 1067 | *version_directive_ref = version_directive 1068 | } 1069 | if tag_directives_ref != nil { 1070 | *tag_directives_ref = tag_directives 1071 | } 1072 | return true 1073 | } 1074 | 1075 | // Append a tag directive to the directives stack. 1076 | func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { 1077 | for i := range parser.tag_directives { 1078 | if bytes.Equal(value.handle, parser.tag_directives[i].handle) { 1079 | if allow_duplicates { 1080 | return true 1081 | } 1082 | return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) 1083 | } 1084 | } 1085 | 1086 | // [Go] I suspect the copy is unnecessary. This was likely done 1087 | // because there was no way to track ownership of the data. 1088 | value_copy := yaml_tag_directive_t{ 1089 | handle: make([]byte, len(value.handle)), 1090 | prefix: make([]byte, len(value.prefix)), 1091 | } 1092 | copy(value_copy.handle, value.handle) 1093 | copy(value_copy.prefix, value.prefix) 1094 | parser.tag_directives = append(parser.tag_directives, value_copy) 1095 | return true 1096 | } 1097 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/readerc.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // Set the reader error and return 0. 8 | func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { 9 | parser.error = yaml_READER_ERROR 10 | parser.problem = problem 11 | parser.problem_offset = offset 12 | parser.problem_value = value 13 | return false 14 | } 15 | 16 | // Byte order marks. 17 | const ( 18 | bom_UTF8 = "\xef\xbb\xbf" 19 | bom_UTF16LE = "\xff\xfe" 20 | bom_UTF16BE = "\xfe\xff" 21 | ) 22 | 23 | // Determine the input stream encoding by checking the BOM symbol. If no BOM is 24 | // found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. 25 | func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { 26 | // Ensure that we had enough bytes in the raw buffer. 27 | for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { 28 | if !yaml_parser_update_raw_buffer(parser) { 29 | return false 30 | } 31 | } 32 | 33 | // Determine the encoding. 34 | buf := parser.raw_buffer 35 | pos := parser.raw_buffer_pos 36 | avail := len(buf) - pos 37 | if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { 38 | parser.encoding = yaml_UTF16LE_ENCODING 39 | parser.raw_buffer_pos += 2 40 | parser.offset += 2 41 | } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { 42 | parser.encoding = yaml_UTF16BE_ENCODING 43 | parser.raw_buffer_pos += 2 44 | parser.offset += 2 45 | } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { 46 | parser.encoding = yaml_UTF8_ENCODING 47 | parser.raw_buffer_pos += 3 48 | parser.offset += 3 49 | } else { 50 | parser.encoding = yaml_UTF8_ENCODING 51 | } 52 | return true 53 | } 54 | 55 | // Update the raw buffer. 56 | func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { 57 | size_read := 0 58 | 59 | // Return if the raw buffer is full. 60 | if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { 61 | return true 62 | } 63 | 64 | // Return on EOF. 65 | if parser.eof { 66 | return true 67 | } 68 | 69 | // Move the remaining bytes in the raw buffer to the beginning. 70 | if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { 71 | copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) 72 | } 73 | parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] 74 | parser.raw_buffer_pos = 0 75 | 76 | // Call the read handler to fill the buffer. 77 | size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) 78 | parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] 79 | if err == io.EOF { 80 | parser.eof = true 81 | } else if err != nil { 82 | return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) 83 | } 84 | return true 85 | } 86 | 87 | // Ensure that the buffer contains at least `length` characters. 88 | // Return true on success, false on failure. 89 | // 90 | // The length is supposed to be significantly less that the buffer size. 91 | func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { 92 | if parser.read_handler == nil { 93 | panic("read handler must be set") 94 | } 95 | 96 | // If the EOF flag is set and the raw buffer is empty, do nothing. 97 | if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { 98 | return true 99 | } 100 | 101 | // Return if the buffer contains enough characters. 102 | if parser.unread >= length { 103 | return true 104 | } 105 | 106 | // Determine the input encoding if it is not known yet. 107 | if parser.encoding == yaml_ANY_ENCODING { 108 | if !yaml_parser_determine_encoding(parser) { 109 | return false 110 | } 111 | } 112 | 113 | // Move the unread characters to the beginning of the buffer. 114 | buffer_len := len(parser.buffer) 115 | if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { 116 | copy(parser.buffer, parser.buffer[parser.buffer_pos:]) 117 | buffer_len -= parser.buffer_pos 118 | parser.buffer_pos = 0 119 | } else if parser.buffer_pos == buffer_len { 120 | buffer_len = 0 121 | parser.buffer_pos = 0 122 | } 123 | 124 | // Open the whole buffer for writing, and cut it before returning. 125 | parser.buffer = parser.buffer[:cap(parser.buffer)] 126 | 127 | // Fill the buffer until it has enough characters. 128 | first := true 129 | for parser.unread < length { 130 | 131 | // Fill the raw buffer if necessary. 132 | if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { 133 | if !yaml_parser_update_raw_buffer(parser) { 134 | parser.buffer = parser.buffer[:buffer_len] 135 | return false 136 | } 137 | } 138 | first = false 139 | 140 | // Decode the raw buffer. 141 | inner: 142 | for parser.raw_buffer_pos != len(parser.raw_buffer) { 143 | var value rune 144 | var width int 145 | 146 | raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos 147 | 148 | // Decode the next character. 149 | switch parser.encoding { 150 | case yaml_UTF8_ENCODING: 151 | // Decode a UTF-8 character. Check RFC 3629 152 | // (http://www.ietf.org/rfc/rfc3629.txt) for more details. 153 | // 154 | // The following table (taken from the RFC) is used for 155 | // decoding. 156 | // 157 | // Char. number range | UTF-8 octet sequence 158 | // (hexadecimal) | (binary) 159 | // --------------------+------------------------------------ 160 | // 0000 0000-0000 007F | 0xxxxxxx 161 | // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx 162 | // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx 163 | // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx 164 | // 165 | // Additionally, the characters in the range 0xD800-0xDFFF 166 | // are prohibited as they are reserved for use with UTF-16 167 | // surrogate pairs. 168 | 169 | // Determine the length of the UTF-8 sequence. 170 | octet := parser.raw_buffer[parser.raw_buffer_pos] 171 | switch { 172 | case octet&0x80 == 0x00: 173 | width = 1 174 | case octet&0xE0 == 0xC0: 175 | width = 2 176 | case octet&0xF0 == 0xE0: 177 | width = 3 178 | case octet&0xF8 == 0xF0: 179 | width = 4 180 | default: 181 | // The leading octet is invalid. 182 | return yaml_parser_set_reader_error(parser, 183 | "invalid leading UTF-8 octet", 184 | parser.offset, int(octet)) 185 | } 186 | 187 | // Check if the raw buffer contains an incomplete character. 188 | if width > raw_unread { 189 | if parser.eof { 190 | return yaml_parser_set_reader_error(parser, 191 | "incomplete UTF-8 octet sequence", 192 | parser.offset, -1) 193 | } 194 | break inner 195 | } 196 | 197 | // Decode the leading octet. 198 | switch { 199 | case octet&0x80 == 0x00: 200 | value = rune(octet & 0x7F) 201 | case octet&0xE0 == 0xC0: 202 | value = rune(octet & 0x1F) 203 | case octet&0xF0 == 0xE0: 204 | value = rune(octet & 0x0F) 205 | case octet&0xF8 == 0xF0: 206 | value = rune(octet & 0x07) 207 | default: 208 | value = 0 209 | } 210 | 211 | // Check and decode the trailing octets. 212 | for k := 1; k < width; k++ { 213 | octet = parser.raw_buffer[parser.raw_buffer_pos+k] 214 | 215 | // Check if the octet is valid. 216 | if (octet & 0xC0) != 0x80 { 217 | return yaml_parser_set_reader_error(parser, 218 | "invalid trailing UTF-8 octet", 219 | parser.offset+k, int(octet)) 220 | } 221 | 222 | // Decode the octet. 223 | value = (value << 6) + rune(octet&0x3F) 224 | } 225 | 226 | // Check the length of the sequence against the value. 227 | switch { 228 | case width == 1: 229 | case width == 2 && value >= 0x80: 230 | case width == 3 && value >= 0x800: 231 | case width == 4 && value >= 0x10000: 232 | default: 233 | return yaml_parser_set_reader_error(parser, 234 | "invalid length of a UTF-8 sequence", 235 | parser.offset, -1) 236 | } 237 | 238 | // Check the range of the value. 239 | if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { 240 | return yaml_parser_set_reader_error(parser, 241 | "invalid Unicode character", 242 | parser.offset, int(value)) 243 | } 244 | 245 | case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: 246 | var low, high int 247 | if parser.encoding == yaml_UTF16LE_ENCODING { 248 | low, high = 0, 1 249 | } else { 250 | low, high = 1, 0 251 | } 252 | 253 | // The UTF-16 encoding is not as simple as one might 254 | // naively think. Check RFC 2781 255 | // (http://www.ietf.org/rfc/rfc2781.txt). 256 | // 257 | // Normally, two subsequent bytes describe a Unicode 258 | // character. However a special technique (called a 259 | // surrogate pair) is used for specifying character 260 | // values larger than 0xFFFF. 261 | // 262 | // A surrogate pair consists of two pseudo-characters: 263 | // high surrogate area (0xD800-0xDBFF) 264 | // low surrogate area (0xDC00-0xDFFF) 265 | // 266 | // The following formulas are used for decoding 267 | // and encoding characters using surrogate pairs: 268 | // 269 | // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) 270 | // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) 271 | // W1 = 110110yyyyyyyyyy 272 | // W2 = 110111xxxxxxxxxx 273 | // 274 | // where U is the character value, W1 is the high surrogate 275 | // area, W2 is the low surrogate area. 276 | 277 | // Check for incomplete UTF-16 character. 278 | if raw_unread < 2 { 279 | if parser.eof { 280 | return yaml_parser_set_reader_error(parser, 281 | "incomplete UTF-16 character", 282 | parser.offset, -1) 283 | } 284 | break inner 285 | } 286 | 287 | // Get the character. 288 | value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + 289 | (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) 290 | 291 | // Check for unexpected low surrogate area. 292 | if value&0xFC00 == 0xDC00 { 293 | return yaml_parser_set_reader_error(parser, 294 | "unexpected low surrogate area", 295 | parser.offset, int(value)) 296 | } 297 | 298 | // Check for a high surrogate area. 299 | if value&0xFC00 == 0xD800 { 300 | width = 4 301 | 302 | // Check for incomplete surrogate pair. 303 | if raw_unread < 4 { 304 | if parser.eof { 305 | return yaml_parser_set_reader_error(parser, 306 | "incomplete UTF-16 surrogate pair", 307 | parser.offset, -1) 308 | } 309 | break inner 310 | } 311 | 312 | // Get the next character. 313 | value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + 314 | (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) 315 | 316 | // Check for a low surrogate area. 317 | if value2&0xFC00 != 0xDC00 { 318 | return yaml_parser_set_reader_error(parser, 319 | "expected low surrogate area", 320 | parser.offset+2, int(value2)) 321 | } 322 | 323 | // Generate the value of the surrogate pair. 324 | value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) 325 | } else { 326 | width = 2 327 | } 328 | 329 | default: 330 | panic("impossible") 331 | } 332 | 333 | // Check if the character is in the allowed range: 334 | // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) 335 | // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) 336 | // | [#x10000-#x10FFFF] (32 bit) 337 | switch { 338 | case value == 0x09: 339 | case value == 0x0A: 340 | case value == 0x0D: 341 | case value >= 0x20 && value <= 0x7E: 342 | case value == 0x85: 343 | case value >= 0xA0 && value <= 0xD7FF: 344 | case value >= 0xE000 && value <= 0xFFFD: 345 | case value >= 0x10000 && value <= 0x10FFFF: 346 | default: 347 | return yaml_parser_set_reader_error(parser, 348 | "control characters are not allowed", 349 | parser.offset, int(value)) 350 | } 351 | 352 | // Move the raw pointers. 353 | parser.raw_buffer_pos += width 354 | parser.offset += width 355 | 356 | // Finally put the character into the buffer. 357 | if value <= 0x7F { 358 | // 0000 0000-0000 007F . 0xxxxxxx 359 | parser.buffer[buffer_len+0] = byte(value) 360 | buffer_len += 1 361 | } else if value <= 0x7FF { 362 | // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx 363 | parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) 364 | parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) 365 | buffer_len += 2 366 | } else if value <= 0xFFFF { 367 | // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx 368 | parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) 369 | parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) 370 | parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) 371 | buffer_len += 3 372 | } else { 373 | // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx 374 | parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) 375 | parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) 376 | parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) 377 | parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) 378 | buffer_len += 4 379 | } 380 | 381 | parser.unread++ 382 | } 383 | 384 | // On EOF, put NUL into the buffer and return. 385 | if parser.eof { 386 | parser.buffer[buffer_len] = 0 387 | buffer_len++ 388 | parser.unread++ 389 | break 390 | } 391 | } 392 | parser.buffer = parser.buffer[:buffer_len] 393 | return true 394 | } 395 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/resolve.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | import ( 4 | "encoding/base64" 5 | "math" 6 | "strconv" 7 | "strings" 8 | "unicode/utf8" 9 | ) 10 | 11 | type resolveMapItem struct { 12 | value interface{} 13 | tag string 14 | } 15 | 16 | var resolveTable = make([]byte, 256) 17 | var resolveMap = make(map[string]resolveMapItem) 18 | 19 | func init() { 20 | t := resolveTable 21 | t[int('+')] = 'S' // Sign 22 | t[int('-')] = 'S' 23 | for _, c := range "0123456789" { 24 | t[int(c)] = 'D' // Digit 25 | } 26 | for _, c := range "yYnNtTfFoO~" { 27 | t[int(c)] = 'M' // In map 28 | } 29 | t[int('.')] = '.' // Float (potentially in map) 30 | 31 | var resolveMapList = []struct { 32 | v interface{} 33 | tag string 34 | l []string 35 | }{ 36 | {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, 37 | {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, 38 | {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, 39 | {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, 40 | {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, 41 | {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, 42 | {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, 43 | {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, 44 | {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, 45 | {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, 46 | {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, 47 | {"<<", yaml_MERGE_TAG, []string{"<<"}}, 48 | } 49 | 50 | m := resolveMap 51 | for _, item := range resolveMapList { 52 | for _, s := range item.l { 53 | m[s] = resolveMapItem{item.v, item.tag} 54 | } 55 | } 56 | } 57 | 58 | const longTagPrefix = "tag:yaml.org,2002:" 59 | 60 | func shortTag(tag string) string { 61 | // TODO This can easily be made faster and produce less garbage. 62 | if strings.HasPrefix(tag, longTagPrefix) { 63 | return "!!" + tag[len(longTagPrefix):] 64 | } 65 | return tag 66 | } 67 | 68 | func longTag(tag string) string { 69 | if strings.HasPrefix(tag, "!!") { 70 | return longTagPrefix + tag[2:] 71 | } 72 | return tag 73 | } 74 | 75 | func resolvableTag(tag string) bool { 76 | switch tag { 77 | case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: 78 | return true 79 | } 80 | return false 81 | } 82 | 83 | func resolve(tag string, in string) (rtag string, out interface{}) { 84 | if !resolvableTag(tag) { 85 | return tag, in 86 | } 87 | 88 | defer func() { 89 | switch tag { 90 | case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: 91 | return 92 | } 93 | failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) 94 | }() 95 | 96 | // Any data is accepted as a !!str or !!binary. 97 | // Otherwise, the prefix is enough of a hint about what it might be. 98 | hint := byte('N') 99 | if in != "" { 100 | hint = resolveTable[in[0]] 101 | } 102 | if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { 103 | // Handle things we can lookup in a map. 104 | if item, ok := resolveMap[in]; ok { 105 | return item.tag, item.value 106 | } 107 | 108 | // Base 60 floats are a bad idea, were dropped in YAML 1.2, and 109 | // are purposefully unsupported here. They're still quoted on 110 | // the way out for compatibility with other parser, though. 111 | 112 | switch hint { 113 | case 'M': 114 | // We've already checked the map above. 115 | 116 | case '.': 117 | // Not in the map, so maybe a normal float. 118 | floatv, err := strconv.ParseFloat(in, 64) 119 | if err == nil { 120 | return yaml_FLOAT_TAG, floatv 121 | } 122 | 123 | case 'D', 'S': 124 | // Int, float, or timestamp. 125 | plain := strings.Replace(in, "_", "", -1) 126 | intv, err := strconv.ParseInt(plain, 0, 64) 127 | if err == nil { 128 | if intv == int64(int(intv)) { 129 | return yaml_INT_TAG, int(intv) 130 | } else { 131 | return yaml_INT_TAG, intv 132 | } 133 | } 134 | uintv, err := strconv.ParseUint(plain, 0, 64) 135 | if err == nil { 136 | return yaml_INT_TAG, uintv 137 | } 138 | floatv, err := strconv.ParseFloat(plain, 64) 139 | if err == nil { 140 | return yaml_FLOAT_TAG, floatv 141 | } 142 | if strings.HasPrefix(plain, "0b") { 143 | intv, err := strconv.ParseInt(plain[2:], 2, 64) 144 | if err == nil { 145 | if intv == int64(int(intv)) { 146 | return yaml_INT_TAG, int(intv) 147 | } else { 148 | return yaml_INT_TAG, intv 149 | } 150 | } 151 | uintv, err := strconv.ParseUint(plain[2:], 2, 64) 152 | if err == nil { 153 | return yaml_INT_TAG, uintv 154 | } 155 | } else if strings.HasPrefix(plain, "-0b") { 156 | intv, err := strconv.ParseInt(plain[3:], 2, 64) 157 | if err == nil { 158 | if intv == int64(int(intv)) { 159 | return yaml_INT_TAG, -int(intv) 160 | } else { 161 | return yaml_INT_TAG, -intv 162 | } 163 | } 164 | } 165 | // XXX Handle timestamps here. 166 | 167 | default: 168 | panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") 169 | } 170 | } 171 | if tag == yaml_BINARY_TAG { 172 | return yaml_BINARY_TAG, in 173 | } 174 | if utf8.ValidString(in) { 175 | return yaml_STR_TAG, in 176 | } 177 | return yaml_BINARY_TAG, encodeBase64(in) 178 | } 179 | 180 | // encodeBase64 encodes s as base64 that is broken up into multiple lines 181 | // as appropriate for the resulting length. 182 | func encodeBase64(s string) string { 183 | const lineLen = 70 184 | encLen := base64.StdEncoding.EncodedLen(len(s)) 185 | lines := encLen/lineLen + 1 186 | buf := make([]byte, encLen*2+lines) 187 | in := buf[0:encLen] 188 | out := buf[encLen:] 189 | base64.StdEncoding.Encode(in, []byte(s)) 190 | k := 0 191 | for i := 0; i < len(in); i += lineLen { 192 | j := i + lineLen 193 | if j > len(in) { 194 | j = len(in) 195 | } 196 | k += copy(out[k:], in[i:j]) 197 | if lines > 1 { 198 | out[k] = '\n' 199 | k++ 200 | } 201 | } 202 | return string(out[:k]) 203 | } 204 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/sorter.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | import ( 4 | "reflect" 5 | "unicode" 6 | ) 7 | 8 | type keyList []reflect.Value 9 | 10 | func (l keyList) Len() int { return len(l) } 11 | func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } 12 | func (l keyList) Less(i, j int) bool { 13 | a := l[i] 14 | b := l[j] 15 | ak := a.Kind() 16 | bk := b.Kind() 17 | for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { 18 | a = a.Elem() 19 | ak = a.Kind() 20 | } 21 | for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { 22 | b = b.Elem() 23 | bk = b.Kind() 24 | } 25 | af, aok := keyFloat(a) 26 | bf, bok := keyFloat(b) 27 | if aok && bok { 28 | if af != bf { 29 | return af < bf 30 | } 31 | if ak != bk { 32 | return ak < bk 33 | } 34 | return numLess(a, b) 35 | } 36 | if ak != reflect.String || bk != reflect.String { 37 | return ak < bk 38 | } 39 | ar, br := []rune(a.String()), []rune(b.String()) 40 | for i := 0; i < len(ar) && i < len(br); i++ { 41 | if ar[i] == br[i] { 42 | continue 43 | } 44 | al := unicode.IsLetter(ar[i]) 45 | bl := unicode.IsLetter(br[i]) 46 | if al && bl { 47 | return ar[i] < br[i] 48 | } 49 | if al || bl { 50 | return bl 51 | } 52 | var ai, bi int 53 | var an, bn int64 54 | for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { 55 | an = an*10 + int64(ar[ai]-'0') 56 | } 57 | for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { 58 | bn = bn*10 + int64(br[bi]-'0') 59 | } 60 | if an != bn { 61 | return an < bn 62 | } 63 | if ai != bi { 64 | return ai < bi 65 | } 66 | return ar[i] < br[i] 67 | } 68 | return len(ar) < len(br) 69 | } 70 | 71 | // keyFloat returns a float value for v if it is a number/bool 72 | // and whether it is a number/bool or not. 73 | func keyFloat(v reflect.Value) (f float64, ok bool) { 74 | switch v.Kind() { 75 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 76 | return float64(v.Int()), true 77 | case reflect.Float32, reflect.Float64: 78 | return v.Float(), true 79 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 80 | return float64(v.Uint()), true 81 | case reflect.Bool: 82 | if v.Bool() { 83 | return 1, true 84 | } 85 | return 0, true 86 | } 87 | return 0, false 88 | } 89 | 90 | // numLess returns whether a < b. 91 | // a and b must necessarily have the same kind. 92 | func numLess(a, b reflect.Value) bool { 93 | switch a.Kind() { 94 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 95 | return a.Int() < b.Int() 96 | case reflect.Float32, reflect.Float64: 97 | return a.Float() < b.Float() 98 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 99 | return a.Uint() < b.Uint() 100 | case reflect.Bool: 101 | return !a.Bool() && b.Bool() 102 | } 103 | panic("not a number") 104 | } 105 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/writerc.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | // Set the writer error and return false. 4 | func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { 5 | emitter.error = yaml_WRITER_ERROR 6 | emitter.problem = problem 7 | return false 8 | } 9 | 10 | // Flush the output buffer. 11 | func yaml_emitter_flush(emitter *yaml_emitter_t) bool { 12 | if emitter.write_handler == nil { 13 | panic("write handler not set") 14 | } 15 | 16 | // Check if the buffer is empty. 17 | if emitter.buffer_pos == 0 { 18 | return true 19 | } 20 | 21 | // If the output encoding is UTF-8, we don't need to recode the buffer. 22 | if emitter.encoding == yaml_UTF8_ENCODING { 23 | if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { 24 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) 25 | } 26 | emitter.buffer_pos = 0 27 | return true 28 | } 29 | 30 | // Recode the buffer into the raw buffer. 31 | var low, high int 32 | if emitter.encoding == yaml_UTF16LE_ENCODING { 33 | low, high = 0, 1 34 | } else { 35 | high, low = 1, 0 36 | } 37 | 38 | pos := 0 39 | for pos < emitter.buffer_pos { 40 | // See the "reader.c" code for more details on UTF-8 encoding. Note 41 | // that we assume that the buffer contains a valid UTF-8 sequence. 42 | 43 | // Read the next UTF-8 character. 44 | octet := emitter.buffer[pos] 45 | 46 | var w int 47 | var value rune 48 | switch { 49 | case octet&0x80 == 0x00: 50 | w, value = 1, rune(octet&0x7F) 51 | case octet&0xE0 == 0xC0: 52 | w, value = 2, rune(octet&0x1F) 53 | case octet&0xF0 == 0xE0: 54 | w, value = 3, rune(octet&0x0F) 55 | case octet&0xF8 == 0xF0: 56 | w, value = 4, rune(octet&0x07) 57 | } 58 | for k := 1; k < w; k++ { 59 | octet = emitter.buffer[pos+k] 60 | value = (value << 6) + (rune(octet) & 0x3F) 61 | } 62 | pos += w 63 | 64 | // Write the character. 65 | if value < 0x10000 { 66 | var b [2]byte 67 | b[high] = byte(value >> 8) 68 | b[low] = byte(value & 0xFF) 69 | emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) 70 | } else { 71 | // Write the character using a surrogate pair (check "reader.c"). 72 | var b [4]byte 73 | value -= 0x10000 74 | b[high] = byte(0xD8 + (value >> 18)) 75 | b[low] = byte((value >> 10) & 0xFF) 76 | b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) 77 | b[low+2] = byte(value & 0xFF) 78 | emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) 79 | } 80 | } 81 | 82 | // Write the raw buffer. 83 | if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { 84 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) 85 | } 86 | emitter.buffer_pos = 0 87 | emitter.raw_buffer = emitter.raw_buffer[:0] 88 | return true 89 | } 90 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/yaml.go: -------------------------------------------------------------------------------- 1 | // Package yaml implements YAML support for the Go language. 2 | // 3 | // Source code and other details for the project are available at GitHub: 4 | // 5 | // https://github.com/go-yaml/yaml 6 | // 7 | package yaml 8 | 9 | import ( 10 | "errors" 11 | "fmt" 12 | "reflect" 13 | "strings" 14 | "sync" 15 | ) 16 | 17 | // MapSlice encodes and decodes as a YAML map. 18 | // The order of keys is preserved when encoding and decoding. 19 | type MapSlice []MapItem 20 | 21 | // MapItem is an item in a MapSlice. 22 | type MapItem struct { 23 | Key, Value interface{} 24 | } 25 | 26 | // The Unmarshaler interface may be implemented by types to customize their 27 | // behavior when being unmarshaled from a YAML document. The UnmarshalYAML 28 | // method receives a function that may be called to unmarshal the original 29 | // YAML value into a field or variable. It is safe to call the unmarshal 30 | // function parameter more than once if necessary. 31 | type Unmarshaler interface { 32 | UnmarshalYAML(unmarshal func(interface{}) error) error 33 | } 34 | 35 | // The Marshaler interface may be implemented by types to customize their 36 | // behavior when being marshaled into a YAML document. The returned value 37 | // is marshaled in place of the original value implementing Marshaler. 38 | // 39 | // If an error is returned by MarshalYAML, the marshaling procedure stops 40 | // and returns with the provided error. 41 | type Marshaler interface { 42 | MarshalYAML() (interface{}, error) 43 | } 44 | 45 | // Unmarshal decodes the first document found within the in byte slice 46 | // and assigns decoded values into the out value. 47 | // 48 | // Maps and pointers (to a struct, string, int, etc) are accepted as out 49 | // values. If an internal pointer within a struct is not initialized, 50 | // the yaml package will initialize it if necessary for unmarshalling 51 | // the provided data. The out parameter must not be nil. 52 | // 53 | // The type of the decoded values should be compatible with the respective 54 | // values in out. If one or more values cannot be decoded due to a type 55 | // mismatches, decoding continues partially until the end of the YAML 56 | // content, and a *yaml.TypeError is returned with details for all 57 | // missed values. 58 | // 59 | // Struct fields are only unmarshalled if they are exported (have an 60 | // upper case first letter), and are unmarshalled using the field name 61 | // lowercased as the default key. Custom keys may be defined via the 62 | // "yaml" name in the field tag: the content preceding the first comma 63 | // is used as the key, and the following comma-separated options are 64 | // used to tweak the marshalling process (see Marshal). 65 | // Conflicting names result in a runtime error. 66 | // 67 | // For example: 68 | // 69 | // type T struct { 70 | // F int `yaml:"a,omitempty"` 71 | // B int 72 | // } 73 | // var t T 74 | // yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) 75 | // 76 | // See the documentation of Marshal for the format of tags and a list of 77 | // supported tag options. 78 | // 79 | func Unmarshal(in []byte, out interface{}) (err error) { 80 | defer handleErr(&err) 81 | d := newDecoder() 82 | p := newParser(in) 83 | defer p.destroy() 84 | node := p.parse() 85 | if node != nil { 86 | v := reflect.ValueOf(out) 87 | if v.Kind() == reflect.Ptr && !v.IsNil() { 88 | v = v.Elem() 89 | } 90 | d.unmarshal(node, v) 91 | } 92 | if len(d.terrors) > 0 { 93 | return &TypeError{d.terrors} 94 | } 95 | return nil 96 | } 97 | 98 | // Marshal serializes the value provided into a YAML document. The structure 99 | // of the generated document will reflect the structure of the value itself. 100 | // Maps and pointers (to struct, string, int, etc) are accepted as the in value. 101 | // 102 | // Struct fields are only unmarshalled if they are exported (have an upper case 103 | // first letter), and are unmarshalled using the field name lowercased as the 104 | // default key. Custom keys may be defined via the "yaml" name in the field 105 | // tag: the content preceding the first comma is used as the key, and the 106 | // following comma-separated options are used to tweak the marshalling process. 107 | // Conflicting names result in a runtime error. 108 | // 109 | // The field tag format accepted is: 110 | // 111 | // `(...) yaml:"[][,[,]]" (...)` 112 | // 113 | // The following flags are currently supported: 114 | // 115 | // omitempty Only include the field if it's not set to the zero 116 | // value for the type or to empty slices or maps. 117 | // Does not apply to zero valued structs. 118 | // 119 | // flow Marshal using a flow style (useful for structs, 120 | // sequences and maps). 121 | // 122 | // inline Inline the field, which must be a struct or a map, 123 | // causing all of its fields or keys to be processed as if 124 | // they were part of the outer struct. For maps, keys must 125 | // not conflict with the yaml keys of other struct fields. 126 | // 127 | // In addition, if the key is "-", the field is ignored. 128 | // 129 | // For example: 130 | // 131 | // type T struct { 132 | // F int "a,omitempty" 133 | // B int 134 | // } 135 | // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" 136 | // yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" 137 | // 138 | func Marshal(in interface{}) (out []byte, err error) { 139 | defer handleErr(&err) 140 | e := newEncoder() 141 | defer e.destroy() 142 | e.marshal("", reflect.ValueOf(in)) 143 | e.finish() 144 | out = e.out 145 | return 146 | } 147 | 148 | func handleErr(err *error) { 149 | if v := recover(); v != nil { 150 | if e, ok := v.(yamlError); ok { 151 | *err = e.err 152 | } else { 153 | panic(v) 154 | } 155 | } 156 | } 157 | 158 | type yamlError struct { 159 | err error 160 | } 161 | 162 | func fail(err error) { 163 | panic(yamlError{err}) 164 | } 165 | 166 | func failf(format string, args ...interface{}) { 167 | panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) 168 | } 169 | 170 | // A TypeError is returned by Unmarshal when one or more fields in 171 | // the YAML document cannot be properly decoded into the requested 172 | // types. When this error is returned, the value is still 173 | // unmarshaled partially. 174 | type TypeError struct { 175 | Errors []string 176 | } 177 | 178 | func (e *TypeError) Error() string { 179 | return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) 180 | } 181 | 182 | // -------------------------------------------------------------------------- 183 | // Maintain a mapping of keys to structure field indexes 184 | 185 | // The code in this section was copied from mgo/bson. 186 | 187 | // structInfo holds details for the serialization of fields of 188 | // a given struct. 189 | type structInfo struct { 190 | FieldsMap map[string]fieldInfo 191 | FieldsList []fieldInfo 192 | 193 | // InlineMap is the number of the field in the struct that 194 | // contains an ,inline map, or -1 if there's none. 195 | InlineMap int 196 | } 197 | 198 | type fieldInfo struct { 199 | Key string 200 | Num int 201 | OmitEmpty bool 202 | Flow bool 203 | 204 | // Inline holds the field index if the field is part of an inlined struct. 205 | Inline []int 206 | } 207 | 208 | var structMap = make(map[reflect.Type]*structInfo) 209 | var fieldMapMutex sync.RWMutex 210 | 211 | func getStructInfo(st reflect.Type) (*structInfo, error) { 212 | fieldMapMutex.RLock() 213 | sinfo, found := structMap[st] 214 | fieldMapMutex.RUnlock() 215 | if found { 216 | return sinfo, nil 217 | } 218 | 219 | n := st.NumField() 220 | fieldsMap := make(map[string]fieldInfo) 221 | fieldsList := make([]fieldInfo, 0, n) 222 | inlineMap := -1 223 | for i := 0; i != n; i++ { 224 | field := st.Field(i) 225 | if field.PkgPath != "" && !field.Anonymous { 226 | continue // Private field 227 | } 228 | 229 | info := fieldInfo{Num: i} 230 | 231 | tag := field.Tag.Get("yaml") 232 | if tag == "" && strings.Index(string(field.Tag), ":") < 0 { 233 | tag = string(field.Tag) 234 | } 235 | if tag == "-" { 236 | continue 237 | } 238 | 239 | inline := false 240 | fields := strings.Split(tag, ",") 241 | if len(fields) > 1 { 242 | for _, flag := range fields[1:] { 243 | switch flag { 244 | case "omitempty": 245 | info.OmitEmpty = true 246 | case "flow": 247 | info.Flow = true 248 | case "inline": 249 | inline = true 250 | default: 251 | return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) 252 | } 253 | } 254 | tag = fields[0] 255 | } 256 | 257 | if inline { 258 | switch field.Type.Kind() { 259 | case reflect.Map: 260 | if inlineMap >= 0 { 261 | return nil, errors.New("Multiple ,inline maps in struct " + st.String()) 262 | } 263 | if field.Type.Key() != reflect.TypeOf("") { 264 | return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) 265 | } 266 | inlineMap = info.Num 267 | case reflect.Struct: 268 | sinfo, err := getStructInfo(field.Type) 269 | if err != nil { 270 | return nil, err 271 | } 272 | for _, finfo := range sinfo.FieldsList { 273 | if _, found := fieldsMap[finfo.Key]; found { 274 | msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() 275 | return nil, errors.New(msg) 276 | } 277 | if finfo.Inline == nil { 278 | finfo.Inline = []int{i, finfo.Num} 279 | } else { 280 | finfo.Inline = append([]int{i}, finfo.Inline...) 281 | } 282 | fieldsMap[finfo.Key] = finfo 283 | fieldsList = append(fieldsList, finfo) 284 | } 285 | default: 286 | //return nil, errors.New("Option ,inline needs a struct value or map field") 287 | return nil, errors.New("Option ,inline needs a struct value field") 288 | } 289 | continue 290 | } 291 | 292 | if tag != "" { 293 | info.Key = tag 294 | } else { 295 | info.Key = strings.ToLower(field.Name) 296 | } 297 | 298 | if _, found = fieldsMap[info.Key]; found { 299 | msg := "Duplicated key '" + info.Key + "' in struct " + st.String() 300 | return nil, errors.New(msg) 301 | } 302 | 303 | fieldsList = append(fieldsList, info) 304 | fieldsMap[info.Key] = info 305 | } 306 | 307 | sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} 308 | 309 | fieldMapMutex.Lock() 310 | structMap[st] = sinfo 311 | fieldMapMutex.Unlock() 312 | return sinfo, nil 313 | } 314 | 315 | func isZero(v reflect.Value) bool { 316 | switch v.Kind() { 317 | case reflect.String: 318 | return len(v.String()) == 0 319 | case reflect.Interface, reflect.Ptr: 320 | return v.IsNil() 321 | case reflect.Slice: 322 | return v.Len() == 0 323 | case reflect.Map: 324 | return v.Len() == 0 325 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 326 | return v.Int() == 0 327 | case reflect.Float32, reflect.Float64: 328 | return v.Float() == 0 329 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 330 | return v.Uint() == 0 331 | case reflect.Bool: 332 | return !v.Bool() 333 | case reflect.Struct: 334 | vt := v.Type() 335 | for i := v.NumField() - 1; i >= 0; i-- { 336 | if vt.Field(i).PkgPath != "" { 337 | continue // Private field 338 | } 339 | if !isZero(v.Field(i)) { 340 | return false 341 | } 342 | } 343 | return true 344 | } 345 | return false 346 | } 347 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/yamlh.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // The version directive data. 8 | type yaml_version_directive_t struct { 9 | major int8 // The major version number. 10 | minor int8 // The minor version number. 11 | } 12 | 13 | // The tag directive data. 14 | type yaml_tag_directive_t struct { 15 | handle []byte // The tag handle. 16 | prefix []byte // The tag prefix. 17 | } 18 | 19 | type yaml_encoding_t int 20 | 21 | // The stream encoding. 22 | const ( 23 | // Let the parser choose the encoding. 24 | yaml_ANY_ENCODING yaml_encoding_t = iota 25 | 26 | yaml_UTF8_ENCODING // The default UTF-8 encoding. 27 | yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. 28 | yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. 29 | ) 30 | 31 | type yaml_break_t int 32 | 33 | // Line break types. 34 | const ( 35 | // Let the parser choose the break type. 36 | yaml_ANY_BREAK yaml_break_t = iota 37 | 38 | yaml_CR_BREAK // Use CR for line breaks (Mac style). 39 | yaml_LN_BREAK // Use LN for line breaks (Unix style). 40 | yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). 41 | ) 42 | 43 | type yaml_error_type_t int 44 | 45 | // Many bad things could happen with the parser and emitter. 46 | const ( 47 | // No error is produced. 48 | yaml_NO_ERROR yaml_error_type_t = iota 49 | 50 | yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. 51 | yaml_READER_ERROR // Cannot read or decode the input stream. 52 | yaml_SCANNER_ERROR // Cannot scan the input stream. 53 | yaml_PARSER_ERROR // Cannot parse the input stream. 54 | yaml_COMPOSER_ERROR // Cannot compose a YAML document. 55 | yaml_WRITER_ERROR // Cannot write to the output stream. 56 | yaml_EMITTER_ERROR // Cannot emit a YAML stream. 57 | ) 58 | 59 | // The pointer position. 60 | type yaml_mark_t struct { 61 | index int // The position index. 62 | line int // The position line. 63 | column int // The position column. 64 | } 65 | 66 | // Node Styles 67 | 68 | type yaml_style_t int8 69 | 70 | type yaml_scalar_style_t yaml_style_t 71 | 72 | // Scalar styles. 73 | const ( 74 | // Let the emitter choose the style. 75 | yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota 76 | 77 | yaml_PLAIN_SCALAR_STYLE // The plain scalar style. 78 | yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. 79 | yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. 80 | yaml_LITERAL_SCALAR_STYLE // The literal scalar style. 81 | yaml_FOLDED_SCALAR_STYLE // The folded scalar style. 82 | ) 83 | 84 | type yaml_sequence_style_t yaml_style_t 85 | 86 | // Sequence styles. 87 | const ( 88 | // Let the emitter choose the style. 89 | yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota 90 | 91 | yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. 92 | yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. 93 | ) 94 | 95 | type yaml_mapping_style_t yaml_style_t 96 | 97 | // Mapping styles. 98 | const ( 99 | // Let the emitter choose the style. 100 | yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota 101 | 102 | yaml_BLOCK_MAPPING_STYLE // The block mapping style. 103 | yaml_FLOW_MAPPING_STYLE // The flow mapping style. 104 | ) 105 | 106 | // Tokens 107 | 108 | type yaml_token_type_t int 109 | 110 | // Token types. 111 | const ( 112 | // An empty token. 113 | yaml_NO_TOKEN yaml_token_type_t = iota 114 | 115 | yaml_STREAM_START_TOKEN // A STREAM-START token. 116 | yaml_STREAM_END_TOKEN // A STREAM-END token. 117 | 118 | yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. 119 | yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. 120 | yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. 121 | yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. 122 | 123 | yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. 124 | yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. 125 | yaml_BLOCK_END_TOKEN // A BLOCK-END token. 126 | 127 | yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. 128 | yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. 129 | yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. 130 | yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. 131 | 132 | yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. 133 | yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. 134 | yaml_KEY_TOKEN // A KEY token. 135 | yaml_VALUE_TOKEN // A VALUE token. 136 | 137 | yaml_ALIAS_TOKEN // An ALIAS token. 138 | yaml_ANCHOR_TOKEN // An ANCHOR token. 139 | yaml_TAG_TOKEN // A TAG token. 140 | yaml_SCALAR_TOKEN // A SCALAR token. 141 | ) 142 | 143 | func (tt yaml_token_type_t) String() string { 144 | switch tt { 145 | case yaml_NO_TOKEN: 146 | return "yaml_NO_TOKEN" 147 | case yaml_STREAM_START_TOKEN: 148 | return "yaml_STREAM_START_TOKEN" 149 | case yaml_STREAM_END_TOKEN: 150 | return "yaml_STREAM_END_TOKEN" 151 | case yaml_VERSION_DIRECTIVE_TOKEN: 152 | return "yaml_VERSION_DIRECTIVE_TOKEN" 153 | case yaml_TAG_DIRECTIVE_TOKEN: 154 | return "yaml_TAG_DIRECTIVE_TOKEN" 155 | case yaml_DOCUMENT_START_TOKEN: 156 | return "yaml_DOCUMENT_START_TOKEN" 157 | case yaml_DOCUMENT_END_TOKEN: 158 | return "yaml_DOCUMENT_END_TOKEN" 159 | case yaml_BLOCK_SEQUENCE_START_TOKEN: 160 | return "yaml_BLOCK_SEQUENCE_START_TOKEN" 161 | case yaml_BLOCK_MAPPING_START_TOKEN: 162 | return "yaml_BLOCK_MAPPING_START_TOKEN" 163 | case yaml_BLOCK_END_TOKEN: 164 | return "yaml_BLOCK_END_TOKEN" 165 | case yaml_FLOW_SEQUENCE_START_TOKEN: 166 | return "yaml_FLOW_SEQUENCE_START_TOKEN" 167 | case yaml_FLOW_SEQUENCE_END_TOKEN: 168 | return "yaml_FLOW_SEQUENCE_END_TOKEN" 169 | case yaml_FLOW_MAPPING_START_TOKEN: 170 | return "yaml_FLOW_MAPPING_START_TOKEN" 171 | case yaml_FLOW_MAPPING_END_TOKEN: 172 | return "yaml_FLOW_MAPPING_END_TOKEN" 173 | case yaml_BLOCK_ENTRY_TOKEN: 174 | return "yaml_BLOCK_ENTRY_TOKEN" 175 | case yaml_FLOW_ENTRY_TOKEN: 176 | return "yaml_FLOW_ENTRY_TOKEN" 177 | case yaml_KEY_TOKEN: 178 | return "yaml_KEY_TOKEN" 179 | case yaml_VALUE_TOKEN: 180 | return "yaml_VALUE_TOKEN" 181 | case yaml_ALIAS_TOKEN: 182 | return "yaml_ALIAS_TOKEN" 183 | case yaml_ANCHOR_TOKEN: 184 | return "yaml_ANCHOR_TOKEN" 185 | case yaml_TAG_TOKEN: 186 | return "yaml_TAG_TOKEN" 187 | case yaml_SCALAR_TOKEN: 188 | return "yaml_SCALAR_TOKEN" 189 | } 190 | return "" 191 | } 192 | 193 | // The token structure. 194 | type yaml_token_t struct { 195 | // The token type. 196 | typ yaml_token_type_t 197 | 198 | // The start/end of the token. 199 | start_mark, end_mark yaml_mark_t 200 | 201 | // The stream encoding (for yaml_STREAM_START_TOKEN). 202 | encoding yaml_encoding_t 203 | 204 | // The alias/anchor/scalar value or tag/tag directive handle 205 | // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). 206 | value []byte 207 | 208 | // The tag suffix (for yaml_TAG_TOKEN). 209 | suffix []byte 210 | 211 | // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). 212 | prefix []byte 213 | 214 | // The scalar style (for yaml_SCALAR_TOKEN). 215 | style yaml_scalar_style_t 216 | 217 | // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). 218 | major, minor int8 219 | } 220 | 221 | // Events 222 | 223 | type yaml_event_type_t int8 224 | 225 | // Event types. 226 | const ( 227 | // An empty event. 228 | yaml_NO_EVENT yaml_event_type_t = iota 229 | 230 | yaml_STREAM_START_EVENT // A STREAM-START event. 231 | yaml_STREAM_END_EVENT // A STREAM-END event. 232 | yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. 233 | yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. 234 | yaml_ALIAS_EVENT // An ALIAS event. 235 | yaml_SCALAR_EVENT // A SCALAR event. 236 | yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. 237 | yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. 238 | yaml_MAPPING_START_EVENT // A MAPPING-START event. 239 | yaml_MAPPING_END_EVENT // A MAPPING-END event. 240 | ) 241 | 242 | // The event structure. 243 | type yaml_event_t struct { 244 | 245 | // The event type. 246 | typ yaml_event_type_t 247 | 248 | // The start and end of the event. 249 | start_mark, end_mark yaml_mark_t 250 | 251 | // The document encoding (for yaml_STREAM_START_EVENT). 252 | encoding yaml_encoding_t 253 | 254 | // The version directive (for yaml_DOCUMENT_START_EVENT). 255 | version_directive *yaml_version_directive_t 256 | 257 | // The list of tag directives (for yaml_DOCUMENT_START_EVENT). 258 | tag_directives []yaml_tag_directive_t 259 | 260 | // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). 261 | anchor []byte 262 | 263 | // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). 264 | tag []byte 265 | 266 | // The scalar value (for yaml_SCALAR_EVENT). 267 | value []byte 268 | 269 | // Is the document start/end indicator implicit, or the tag optional? 270 | // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). 271 | implicit bool 272 | 273 | // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). 274 | quoted_implicit bool 275 | 276 | // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). 277 | style yaml_style_t 278 | } 279 | 280 | func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } 281 | func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } 282 | func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } 283 | 284 | // Nodes 285 | 286 | const ( 287 | yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. 288 | yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. 289 | yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. 290 | yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. 291 | yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. 292 | yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. 293 | 294 | yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. 295 | yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. 296 | 297 | // Not in original libyaml. 298 | yaml_BINARY_TAG = "tag:yaml.org,2002:binary" 299 | yaml_MERGE_TAG = "tag:yaml.org,2002:merge" 300 | 301 | yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. 302 | yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. 303 | yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. 304 | ) 305 | 306 | type yaml_node_type_t int 307 | 308 | // Node types. 309 | const ( 310 | // An empty node. 311 | yaml_NO_NODE yaml_node_type_t = iota 312 | 313 | yaml_SCALAR_NODE // A scalar node. 314 | yaml_SEQUENCE_NODE // A sequence node. 315 | yaml_MAPPING_NODE // A mapping node. 316 | ) 317 | 318 | // An element of a sequence node. 319 | type yaml_node_item_t int 320 | 321 | // An element of a mapping node. 322 | type yaml_node_pair_t struct { 323 | key int // The key of the element. 324 | value int // The value of the element. 325 | } 326 | 327 | // The node structure. 328 | type yaml_node_t struct { 329 | typ yaml_node_type_t // The node type. 330 | tag []byte // The node tag. 331 | 332 | // The node data. 333 | 334 | // The scalar parameters (for yaml_SCALAR_NODE). 335 | scalar struct { 336 | value []byte // The scalar value. 337 | length int // The length of the scalar value. 338 | style yaml_scalar_style_t // The scalar style. 339 | } 340 | 341 | // The sequence parameters (for YAML_SEQUENCE_NODE). 342 | sequence struct { 343 | items_data []yaml_node_item_t // The stack of sequence items. 344 | style yaml_sequence_style_t // The sequence style. 345 | } 346 | 347 | // The mapping parameters (for yaml_MAPPING_NODE). 348 | mapping struct { 349 | pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). 350 | pairs_start *yaml_node_pair_t // The beginning of the stack. 351 | pairs_end *yaml_node_pair_t // The end of the stack. 352 | pairs_top *yaml_node_pair_t // The top of the stack. 353 | style yaml_mapping_style_t // The mapping style. 354 | } 355 | 356 | start_mark yaml_mark_t // The beginning of the node. 357 | end_mark yaml_mark_t // The end of the node. 358 | 359 | } 360 | 361 | // The document structure. 362 | type yaml_document_t struct { 363 | 364 | // The document nodes. 365 | nodes []yaml_node_t 366 | 367 | // The version directive. 368 | version_directive *yaml_version_directive_t 369 | 370 | // The list of tag directives. 371 | tag_directives_data []yaml_tag_directive_t 372 | tag_directives_start int // The beginning of the tag directives list. 373 | tag_directives_end int // The end of the tag directives list. 374 | 375 | start_implicit int // Is the document start indicator implicit? 376 | end_implicit int // Is the document end indicator implicit? 377 | 378 | // The start/end of the document. 379 | start_mark, end_mark yaml_mark_t 380 | } 381 | 382 | // The prototype of a read handler. 383 | // 384 | // The read handler is called when the parser needs to read more bytes from the 385 | // source. The handler should write not more than size bytes to the buffer. 386 | // The number of written bytes should be set to the size_read variable. 387 | // 388 | // [in,out] data A pointer to an application data specified by 389 | // yaml_parser_set_input(). 390 | // [out] buffer The buffer to write the data from the source. 391 | // [in] size The size of the buffer. 392 | // [out] size_read The actual number of bytes read from the source. 393 | // 394 | // On success, the handler should return 1. If the handler failed, 395 | // the returned value should be 0. On EOF, the handler should set the 396 | // size_read to 0 and return 1. 397 | type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) 398 | 399 | // This structure holds information about a potential simple key. 400 | type yaml_simple_key_t struct { 401 | possible bool // Is a simple key possible? 402 | required bool // Is a simple key required? 403 | token_number int // The number of the token. 404 | mark yaml_mark_t // The position mark. 405 | } 406 | 407 | // The states of the parser. 408 | type yaml_parser_state_t int 409 | 410 | const ( 411 | yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota 412 | 413 | yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. 414 | yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. 415 | yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. 416 | yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. 417 | yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. 418 | yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. 419 | yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. 420 | yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. 421 | yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. 422 | yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. 423 | yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. 424 | yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. 425 | yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. 426 | yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. 427 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. 428 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. 429 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. 430 | yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. 431 | yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. 432 | yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. 433 | yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. 434 | yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. 435 | yaml_PARSE_END_STATE // Expect nothing. 436 | ) 437 | 438 | func (ps yaml_parser_state_t) String() string { 439 | switch ps { 440 | case yaml_PARSE_STREAM_START_STATE: 441 | return "yaml_PARSE_STREAM_START_STATE" 442 | case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: 443 | return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" 444 | case yaml_PARSE_DOCUMENT_START_STATE: 445 | return "yaml_PARSE_DOCUMENT_START_STATE" 446 | case yaml_PARSE_DOCUMENT_CONTENT_STATE: 447 | return "yaml_PARSE_DOCUMENT_CONTENT_STATE" 448 | case yaml_PARSE_DOCUMENT_END_STATE: 449 | return "yaml_PARSE_DOCUMENT_END_STATE" 450 | case yaml_PARSE_BLOCK_NODE_STATE: 451 | return "yaml_PARSE_BLOCK_NODE_STATE" 452 | case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: 453 | return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" 454 | case yaml_PARSE_FLOW_NODE_STATE: 455 | return "yaml_PARSE_FLOW_NODE_STATE" 456 | case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: 457 | return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" 458 | case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: 459 | return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" 460 | case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: 461 | return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" 462 | case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: 463 | return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" 464 | case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: 465 | return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" 466 | case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: 467 | return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" 468 | case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: 469 | return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" 470 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: 471 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" 472 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: 473 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" 474 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: 475 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" 476 | case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: 477 | return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" 478 | case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: 479 | return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" 480 | case yaml_PARSE_FLOW_MAPPING_KEY_STATE: 481 | return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" 482 | case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: 483 | return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" 484 | case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: 485 | return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" 486 | case yaml_PARSE_END_STATE: 487 | return "yaml_PARSE_END_STATE" 488 | } 489 | return "" 490 | } 491 | 492 | // This structure holds aliases data. 493 | type yaml_alias_data_t struct { 494 | anchor []byte // The anchor. 495 | index int // The node id. 496 | mark yaml_mark_t // The anchor mark. 497 | } 498 | 499 | // The parser structure. 500 | // 501 | // All members are internal. Manage the structure using the 502 | // yaml_parser_ family of functions. 503 | type yaml_parser_t struct { 504 | 505 | // Error handling 506 | 507 | error yaml_error_type_t // Error type. 508 | 509 | problem string // Error description. 510 | 511 | // The byte about which the problem occured. 512 | problem_offset int 513 | problem_value int 514 | problem_mark yaml_mark_t 515 | 516 | // The error context. 517 | context string 518 | context_mark yaml_mark_t 519 | 520 | // Reader stuff 521 | 522 | read_handler yaml_read_handler_t // Read handler. 523 | 524 | input_file io.Reader // File input data. 525 | input []byte // String input data. 526 | input_pos int 527 | 528 | eof bool // EOF flag 529 | 530 | buffer []byte // The working buffer. 531 | buffer_pos int // The current position of the buffer. 532 | 533 | unread int // The number of unread characters in the buffer. 534 | 535 | raw_buffer []byte // The raw buffer. 536 | raw_buffer_pos int // The current position of the buffer. 537 | 538 | encoding yaml_encoding_t // The input encoding. 539 | 540 | offset int // The offset of the current position (in bytes). 541 | mark yaml_mark_t // The mark of the current position. 542 | 543 | // Scanner stuff 544 | 545 | stream_start_produced bool // Have we started to scan the input stream? 546 | stream_end_produced bool // Have we reached the end of the input stream? 547 | 548 | flow_level int // The number of unclosed '[' and '{' indicators. 549 | 550 | tokens []yaml_token_t // The tokens queue. 551 | tokens_head int // The head of the tokens queue. 552 | tokens_parsed int // The number of tokens fetched from the queue. 553 | token_available bool // Does the tokens queue contain a token ready for dequeueing. 554 | 555 | indent int // The current indentation level. 556 | indents []int // The indentation levels stack. 557 | 558 | simple_key_allowed bool // May a simple key occur at the current position? 559 | simple_keys []yaml_simple_key_t // The stack of simple keys. 560 | 561 | // Parser stuff 562 | 563 | state yaml_parser_state_t // The current parser state. 564 | states []yaml_parser_state_t // The parser states stack. 565 | marks []yaml_mark_t // The stack of marks. 566 | tag_directives []yaml_tag_directive_t // The list of TAG directives. 567 | 568 | // Dumper stuff 569 | 570 | aliases []yaml_alias_data_t // The alias data. 571 | 572 | document *yaml_document_t // The currently parsed document. 573 | } 574 | 575 | // Emitter Definitions 576 | 577 | // The prototype of a write handler. 578 | // 579 | // The write handler is called when the emitter needs to flush the accumulated 580 | // characters to the output. The handler should write @a size bytes of the 581 | // @a buffer to the output. 582 | // 583 | // @param[in,out] data A pointer to an application data specified by 584 | // yaml_emitter_set_output(). 585 | // @param[in] buffer The buffer with bytes to be written. 586 | // @param[in] size The size of the buffer. 587 | // 588 | // @returns On success, the handler should return @c 1. If the handler failed, 589 | // the returned value should be @c 0. 590 | // 591 | type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error 592 | 593 | type yaml_emitter_state_t int 594 | 595 | // The emitter states. 596 | const ( 597 | // Expect STREAM-START. 598 | yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota 599 | 600 | yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. 601 | yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. 602 | yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. 603 | yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. 604 | yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. 605 | yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. 606 | yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. 607 | yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. 608 | yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. 609 | yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. 610 | yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. 611 | yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. 612 | yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. 613 | yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. 614 | yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. 615 | yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. 616 | yaml_EMIT_END_STATE // Expect nothing. 617 | ) 618 | 619 | // The emitter structure. 620 | // 621 | // All members are internal. Manage the structure using the @c yaml_emitter_ 622 | // family of functions. 623 | type yaml_emitter_t struct { 624 | 625 | // Error handling 626 | 627 | error yaml_error_type_t // Error type. 628 | problem string // Error description. 629 | 630 | // Writer stuff 631 | 632 | write_handler yaml_write_handler_t // Write handler. 633 | 634 | output_buffer *[]byte // String output data. 635 | output_file io.Writer // File output data. 636 | 637 | buffer []byte // The working buffer. 638 | buffer_pos int // The current position of the buffer. 639 | 640 | raw_buffer []byte // The raw buffer. 641 | raw_buffer_pos int // The current position of the buffer. 642 | 643 | encoding yaml_encoding_t // The stream encoding. 644 | 645 | // Emitter stuff 646 | 647 | canonical bool // If the output is in the canonical style? 648 | best_indent int // The number of indentation spaces. 649 | best_width int // The preferred width of the output lines. 650 | unicode bool // Allow unescaped non-ASCII characters? 651 | line_break yaml_break_t // The preferred line break. 652 | 653 | state yaml_emitter_state_t // The current emitter state. 654 | states []yaml_emitter_state_t // The stack of states. 655 | 656 | events []yaml_event_t // The event queue. 657 | events_head int // The head of the event queue. 658 | 659 | indents []int // The stack of indentation levels. 660 | 661 | tag_directives []yaml_tag_directive_t // The list of tag directives. 662 | 663 | indent int // The current indentation level. 664 | 665 | flow_level int // The current flow level. 666 | 667 | root_context bool // Is it the document root context? 668 | sequence_context bool // Is it a sequence context? 669 | mapping_context bool // Is it a mapping context? 670 | simple_key_context bool // Is it a simple mapping key context? 671 | 672 | line int // The current line. 673 | column int // The current column. 674 | whitespace bool // If the last character was a whitespace? 675 | indention bool // If the last character was an indentation character (' ', '-', '?', ':')? 676 | open_ended bool // If an explicit document end is required? 677 | 678 | // Anchor analysis. 679 | anchor_data struct { 680 | anchor []byte // The anchor value. 681 | alias bool // Is it an alias? 682 | } 683 | 684 | // Tag analysis. 685 | tag_data struct { 686 | handle []byte // The tag handle. 687 | suffix []byte // The tag suffix. 688 | } 689 | 690 | // Scalar analysis. 691 | scalar_data struct { 692 | value []byte // The scalar value. 693 | multiline bool // Does the scalar contain line breaks? 694 | flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? 695 | block_plain_allowed bool // Can the scalar be expressed in the block plain style? 696 | single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? 697 | block_allowed bool // Can the scalar be expressed in the literal or folded styles? 698 | style yaml_scalar_style_t // The output style. 699 | } 700 | 701 | // Dumper stuff 702 | 703 | opened bool // If the stream was already opened? 704 | closed bool // If the stream was already closed? 705 | 706 | // The information associated with the document nodes. 707 | anchors *struct { 708 | references int // The number of references. 709 | anchor int // The anchor id. 710 | serialized bool // If the node has been emitted? 711 | } 712 | 713 | last_anchor_id int // The last assigned anchor id. 714 | 715 | document *yaml_document_t // The currently emitted document. 716 | } 717 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/yamlprivateh.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | const ( 4 | // The size of the input raw buffer. 5 | input_raw_buffer_size = 512 6 | 7 | // The size of the input buffer. 8 | // It should be possible to decode the whole raw buffer. 9 | input_buffer_size = input_raw_buffer_size * 3 10 | 11 | // The size of the output buffer. 12 | output_buffer_size = 128 13 | 14 | // The size of the output raw buffer. 15 | // It should be possible to encode the whole output buffer. 16 | output_raw_buffer_size = (output_buffer_size*2 + 2) 17 | 18 | // The size of other stacks and queues. 19 | initial_stack_size = 16 20 | initial_queue_size = 16 21 | initial_string_size = 16 22 | ) 23 | 24 | // Check if the character at the specified position is an alphabetical 25 | // character, a digit, '_', or '-'. 26 | func is_alpha(b []byte, i int) bool { 27 | return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' 28 | } 29 | 30 | // Check if the character at the specified position is a digit. 31 | func is_digit(b []byte, i int) bool { 32 | return b[i] >= '0' && b[i] <= '9' 33 | } 34 | 35 | // Get the value of a digit. 36 | func as_digit(b []byte, i int) int { 37 | return int(b[i]) - '0' 38 | } 39 | 40 | // Check if the character at the specified position is a hex-digit. 41 | func is_hex(b []byte, i int) bool { 42 | return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' 43 | } 44 | 45 | // Get the value of a hex-digit. 46 | func as_hex(b []byte, i int) int { 47 | bi := b[i] 48 | if bi >= 'A' && bi <= 'F' { 49 | return int(bi) - 'A' + 10 50 | } 51 | if bi >= 'a' && bi <= 'f' { 52 | return int(bi) - 'a' + 10 53 | } 54 | return int(bi) - '0' 55 | } 56 | 57 | // Check if the character is ASCII. 58 | func is_ascii(b []byte, i int) bool { 59 | return b[i] <= 0x7F 60 | } 61 | 62 | // Check if the character at the start of the buffer can be printed unescaped. 63 | func is_printable(b []byte, i int) bool { 64 | return ((b[i] == 0x0A) || // . == #x0A 65 | (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E 66 | (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF 67 | (b[i] > 0xC2 && b[i] < 0xED) || 68 | (b[i] == 0xED && b[i+1] < 0xA0) || 69 | (b[i] == 0xEE) || 70 | (b[i] == 0xEF && // #xE000 <= . <= #xFFFD 71 | !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF 72 | !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) 73 | } 74 | 75 | // Check if the character at the specified position is NUL. 76 | func is_z(b []byte, i int) bool { 77 | return b[i] == 0x00 78 | } 79 | 80 | // Check if the beginning of the buffer is a BOM. 81 | func is_bom(b []byte, i int) bool { 82 | return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF 83 | } 84 | 85 | // Check if the character at the specified position is space. 86 | func is_space(b []byte, i int) bool { 87 | return b[i] == ' ' 88 | } 89 | 90 | // Check if the character at the specified position is tab. 91 | func is_tab(b []byte, i int) bool { 92 | return b[i] == '\t' 93 | } 94 | 95 | // Check if the character at the specified position is blank (space or tab). 96 | func is_blank(b []byte, i int) bool { 97 | //return is_space(b, i) || is_tab(b, i) 98 | return b[i] == ' ' || b[i] == '\t' 99 | } 100 | 101 | // Check if the character at the specified position is a line break. 102 | func is_break(b []byte, i int) bool { 103 | return (b[i] == '\r' || // CR (#xD) 104 | b[i] == '\n' || // LF (#xA) 105 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) 106 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) 107 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) 108 | } 109 | 110 | func is_crlf(b []byte, i int) bool { 111 | return b[i] == '\r' && b[i+1] == '\n' 112 | } 113 | 114 | // Check if the character is a line break or NUL. 115 | func is_breakz(b []byte, i int) bool { 116 | //return is_break(b, i) || is_z(b, i) 117 | return ( // is_break: 118 | b[i] == '\r' || // CR (#xD) 119 | b[i] == '\n' || // LF (#xA) 120 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) 121 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) 122 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) 123 | // is_z: 124 | b[i] == 0) 125 | } 126 | 127 | // Check if the character is a line break, space, or NUL. 128 | func is_spacez(b []byte, i int) bool { 129 | //return is_space(b, i) || is_breakz(b, i) 130 | return ( // is_space: 131 | b[i] == ' ' || 132 | // is_breakz: 133 | b[i] == '\r' || // CR (#xD) 134 | b[i] == '\n' || // LF (#xA) 135 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) 136 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) 137 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) 138 | b[i] == 0) 139 | } 140 | 141 | // Check if the character is a line break, space, tab, or NUL. 142 | func is_blankz(b []byte, i int) bool { 143 | //return is_blank(b, i) || is_breakz(b, i) 144 | return ( // is_blank: 145 | b[i] == ' ' || b[i] == '\t' || 146 | // is_breakz: 147 | b[i] == '\r' || // CR (#xD) 148 | b[i] == '\n' || // LF (#xA) 149 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) 150 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) 151 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) 152 | b[i] == 0) 153 | } 154 | 155 | // Determine the width of the character. 156 | func width(b byte) int { 157 | // Don't replace these by a switch without first 158 | // confirming that it is being inlined. 159 | if b&0x80 == 0x00 { 160 | return 1 161 | } 162 | if b&0xE0 == 0xC0 { 163 | return 2 164 | } 165 | if b&0xF0 == 0xE0 { 166 | return 3 167 | } 168 | if b&0xF8 == 0xF0 { 169 | return 4 170 | } 171 | return 0 172 | 173 | } 174 | -------------------------------------------------------------------------------- /vendor/vendor.json: -------------------------------------------------------------------------------- 1 | { 2 | "comment": "", 3 | "ignore": "test", 4 | "package": [ 5 | { 6 | "checksumSHA1": "ZqrAOPSXJz9yifSWZ62OcKnDUyk=", 7 | "path": "github.com/deckarep/golang-set", 8 | "revision": "1f0f4ff8d3fbef9328522993ce71c35890f67554", 9 | "revisionTime": "2016-02-20T23:57:50Z" 10 | }, 11 | { 12 | "checksumSHA1": "+OgOXBoiQ+X+C2dsAeiOHwBIEH0=", 13 | "path": "gopkg.in/yaml.v2", 14 | "revision": "a83829b6f1293c91addabc89d0571c246397bbf4", 15 | "revisionTime": "2016-03-01T20:40:22Z" 16 | } 17 | ], 18 | "rootPath": "github.com/nevins-b/falco2seccomp" 19 | } 20 | --------------------------------------------------------------------------------