├── .github
└── workflows
│ └── build.yml
├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── api.go
├── cache.go
├── cmd.go
├── config.go
├── db.go
├── duplicate-finder.cfg
├── duplicate-finder.yml
├── go.mod
├── go.sum
├── graphql.go
├── internal
└── plugin
│ ├── common
│ ├── doc.go
│ ├── log
│ │ └── log.go
│ ├── msg.go
│ └── rpc.go
│ └── util
│ └── client.go
├── matching.go
└── vendor
├── github.com
├── natefinch
│ └── pie
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── doc.go
│ │ └── pie.go
├── nfnt
│ └── resize
│ │ ├── .travis.yml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── converter.go
│ │ ├── filters.go
│ │ ├── nearest.go
│ │ ├── resize.go
│ │ ├── thumbnail.go
│ │ └── ycc.go
├── rivo
│ └── duplo
│ │ ├── LICENSE.txt
│ │ ├── README.md
│ │ ├── candidate.go
│ │ ├── doc.go
│ │ ├── haar
│ │ └── haar.go
│ │ ├── hamming.go
│ │ ├── hash.go
│ │ ├── match.go
│ │ └── store.go
└── shurcooL
│ └── graphql
│ ├── .travis.yml
│ ├── LICENSE
│ ├── README.md
│ ├── doc.go
│ ├── graphql.go
│ ├── ident
│ └── ident.go
│ ├── internal
│ └── jsonutil
│ │ └── graphql.go
│ ├── query.go
│ └── scalar.go
├── golang.org
└── x
│ └── net
│ ├── AUTHORS
│ ├── CONTRIBUTORS
│ ├── LICENSE
│ ├── PATENTS
│ └── context
│ └── ctxhttp
│ └── ctxhttp.go
├── gopkg.in
└── yaml.v2
│ ├── .travis.yml
│ ├── LICENSE
│ ├── LICENSE.libyaml
│ ├── NOTICE
│ ├── README.md
│ ├── apic.go
│ ├── decode.go
│ ├── emitterc.go
│ ├── encode.go
│ ├── go.mod
│ ├── parserc.go
│ ├── readerc.go
│ ├── resolve.go
│ ├── scannerc.go
│ ├── sorter.go
│ ├── writerc.go
│ ├── yaml.go
│ ├── yamlh.go
│ └── yamlprivateh.go
└── modules.txt
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Build
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 | release:
9 | types: [ published ]
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-20.04
14 | steps:
15 | - uses: actions/checkout@v2
16 |
17 | - name: Checkout
18 | run: git fetch --prune --unshallow --tags
19 |
20 | - name: Cross Compile
21 | run: make build-release-docker
22 |
23 | - name: Upload Windows release
24 | # only upload binaries for pull requests
25 | if: ${{ github.event_name == 'pull_request'}}
26 | uses: actions/upload-artifact@v2
27 | with:
28 | name: stash-plugin-duplicate-finder-win.tar
29 | path: dist/stash-plugin-duplicate-finder-win.tar
30 |
31 | - name: Upload OSX release
32 | # only upload binaries for pull requests
33 | if: ${{ github.event_name == 'pull_request'}}
34 | uses: actions/upload-artifact@v2
35 | with:
36 | name: stash-plugin-duplicate-finder-osx.tar
37 | path: dist/stash-plugin-duplicate-finder-osx.tar
38 |
39 | - name: Upload Linux release
40 | # only upload binaries for pull requests
41 | if: ${{ github.event_name == 'pull_request'}}
42 | uses: actions/upload-artifact@v2
43 | with:
44 | name: stash-plugin-duplicate-finder-linux.tar
45 | path: dist/stash-plugin-duplicate-finder-linux.tar
46 |
47 | - name: Master release
48 | if: ${{ github.event_name == 'release' }}
49 | uses: meeDamian/github-release@2.0
50 | with:
51 | token: "${{ secrets.GITHUB_TOKEN }}"
52 | allow_override: true
53 | files: |
54 | dist/stash-plugin-duplicate-finder-win.tar
55 | dist/stash-plugin-duplicate-finder-osx.tar
56 | dist/stash-plugin-duplicate-finder-linux.tar
57 | dist/stash-plugin-duplicate-finder-pi.tar
58 | gzip: false
59 |
60 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 |
3 | *.exe
4 | stash-plugin-duplicate-finder
5 | .vscode
6 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | ifeq ($(OS), Windows_NT)
2 | EXT := .exe
3 | endif
4 |
5 | OUTPUT := plugin_duplicate_finder
6 |
7 | .PHONY: build pre-cross-compile cross-compile-win cross-compile-osx cross-compile-linux cross-compile-pi cross-compile-all cross-compile-docker
8 |
9 | build:
10 | go build -o $(OUTPUT)$(EXT)
11 |
12 | LDFLAGS := -ldflags "-extldflags '-static -s -w'"
13 | LDFLAGS_WIN := -ldflags "-extldflags '-static -s -w'"
14 |
15 | cross-compile-win:
16 | GOOS=windows GOARCH=amd64 CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ go build $(LDFLAGS_WIN) -mod=vendor -o "dist/win/$(OUTPUT).exe"
17 |
18 | cross-compile-osx:
19 | GOOS=darwin GOARCH=amd64 CC=o64-clang CXX=o64-clang++ go build $(LDFLAGS) -mod=vendor -o "dist/osx/$(OUTPUT)"
20 |
21 | cross-compile-linux:
22 | go build -tags "osusergo netgo" $(LDFLAGS) -mod=vendor -o "dist/linux/$(OUTPUT)"
23 |
24 | cross-compile-pi:
25 | GOOS=linux GOARCH=arm GOARM=5 CC=arm-linux-gnueabi-gcc go build -tags "osusergo netgo" $(LDFLAGS) -mod=vendor -o "dist/pi/$(OUTPUT)"
26 |
27 | cross-compile-all: cross-compile-win cross-compile-osx cross-compile-linux cross-compile-pi
28 |
29 | pre-docker:
30 | docker pull stashapp/compiler:develop
31 |
32 | RELEASE_INC := duplicate-finder.yml duplicate-finder.cfg
33 |
34 | build-release-win: cross-compile-win
35 | tar -cf dist/stash-plugin-duplicate-finder-win.tar -C dist/win $(OUTPUT).exe
36 | tar -rf dist/stash-plugin-duplicate-finder-win.tar $(RELEASE_INC)
37 |
38 | build-release-osx: cross-compile-osx
39 | tar -cf dist/stash-plugin-duplicate-finder-osx.tar -C dist/osx $(OUTPUT)
40 | tar -rf dist/stash-plugin-duplicate-finder-osx.tar $(RELEASE_INC)
41 |
42 | build-release-linux: cross-compile-linux
43 | tar -cf dist/stash-plugin-duplicate-finder-linux.tar -C dist/linux $(OUTPUT)
44 | tar -rf dist/stash-plugin-duplicate-finder-linux.tar $(RELEASE_INC)
45 |
46 | build-release-pi: cross-compile-pi
47 | tar -cf dist/stash-plugin-duplicate-finder-pi.tar -C dist/pi $(OUTPUT)
48 | tar -rf dist/stash-plugin-duplicate-finder-pi.tar $(RELEASE_INC)
49 |
50 | RUN_DOCKER := docker run --rm --mount type=bind,source="$(shell pwd)",target=/stash -w /stash stashapp/compiler:4 /bin/bash -c
51 |
52 | cross-compile-docker:
53 | $(RUN_DOCKER) "make cross-compile-all"
54 |
55 | build-release-docker:
56 | $(RUN_DOCKER) "make build-release-win build-release-osx build-release-linux build-release-pi"
57 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Stash plugin: Duplicate finder
2 |
3 | This is a plugin for stash. It adds a `Find duplicate scenes` task. This task processes the vtt sprite files in your stash library, performing a perceptual hash. Any scenes that it detects are output in the plugin log.
4 |
5 | Optionally, it can tag duplicate scenes with a (existing) tag, and it can populate the details field of the duplicate scene with the ids of its duplicates.
6 |
7 | # How to use
8 |
9 | Untar the release for your platform into your `plugins` stash directory and reload plugins (or restart stash). A new task should be present in the Tasks page.
10 |
11 | A documented default configuration file is included.
12 |
13 | *NOTE:* the plugin uses the sprite files to find duplicates. This means that if you remove a file from your stash library but do not remove the generated files (specifically the generated sprite file), then the plugin will continue to use the sprite file for duplicate detection.
14 |
15 | # How to build
16 |
17 | `make build` - builds the plugin executable for your platform
18 | `make build-release-docker` - performs cross compilation in the `stashapp/compiler:develop` docker image and builds release tars
19 |
20 | # Command-line mode
21 |
22 | Command-line mode can be run by providing the sprite directory as a command line parameter. In this mode, it outputs a `duplicates.csv` file containing matching checksums with the match score. It is intended for debugging and fine-tuning the sensitivity. The execution can be stopped safely by touching a `.stop` file in the cwd.
23 |
--------------------------------------------------------------------------------
/api.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "os"
7 | "path/filepath"
8 | "regexp"
9 | "runtime/debug"
10 | "strings"
11 |
12 | "stash-plugin-duplicate-finder/internal/plugin/common"
13 | "stash-plugin-duplicate-finder/internal/plugin/common/log"
14 | "stash-plugin-duplicate-finder/internal/plugin/util"
15 |
16 | "github.com/rivo/duplo"
17 | "github.com/shurcooL/graphql"
18 | )
19 |
20 | const spriteSuffix = "_sprite.jpg"
21 |
22 | type api struct {
23 | stopping bool
24 | cfg config
25 | client *graphql.Client
26 | cache *sceneCache
27 | duplicateTagID *graphql.ID
28 | }
29 |
30 | func main() {
31 | if len(os.Args) > 1 {
32 | cmdMain()
33 | return
34 | }
35 |
36 | // serves the plugin, providing an object that satisfies the
37 | // common.RPCRunner interface
38 | err := common.ServePlugin(&api{})
39 | if err != nil {
40 | panic(err)
41 | }
42 | }
43 |
44 | func (a *api) Stop(input struct{}, output *bool) error {
45 | log.Info("Stopping...")
46 | a.stopping = true
47 | *output = true
48 | return nil
49 | }
50 |
51 | // Run is the main work function of the plugin. It interprets the input and
52 | // acts accordingly.
53 | func (a *api) Run(input common.PluginInput, output *common.PluginOutput) error {
54 | err := a.runImpl(input)
55 |
56 | if err != nil {
57 | errStr := err.Error()
58 | *output = common.PluginOutput{
59 | Error: &errStr,
60 | }
61 | return nil
62 | }
63 |
64 | outputStr := "ok"
65 | *output = common.PluginOutput{
66 | Output: &outputStr,
67 | }
68 |
69 | return nil
70 | }
71 |
72 | func (a *api) runImpl(input common.PluginInput) (err error) {
73 | defer func() {
74 | // handle panic
75 | if r := recover(); r != nil {
76 | err = fmt.Errorf("panic: %v\nstacktrace: %s", r, string(debug.Stack()))
77 | }
78 | }()
79 |
80 | pluginDir := input.ServerConnection.PluginDir
81 | cfg, err := readConfig(filepath.Join(pluginDir, "duplicate-finder.cfg"))
82 | if err != nil {
83 | return fmt.Errorf("error reading configuration file: %s", err.Error())
84 | }
85 |
86 | a.cfg = *cfg
87 | if !filepath.IsAbs(a.cfg.DBFilename) {
88 | a.cfg.DBFilename = filepath.Join(pluginDir, a.cfg.DBFilename)
89 | }
90 |
91 | // HACK - get the server address from the server config file
92 | serverCfg, err := readServerConfig(filepath.Join(input.ServerConnection.Dir, "config.yml"))
93 | if err != nil {
94 | return fmt.Errorf("error reading server configuration file: %s", err.Error())
95 | }
96 |
97 | a.client = util.NewClient(input.ServerConnection, serverCfg.Host)
98 | a.cache = newSceneCache(a.client)
99 |
100 | if cfg.AddTagName != "" {
101 | tagID, err := getDuplicateTagId(a.client, cfg.AddTagName)
102 | if err != nil {
103 | return err
104 | }
105 |
106 | if tagID == nil {
107 | return fmt.Errorf("could not find tag with name %s", cfg.AddTagName)
108 | }
109 |
110 | a.duplicateTagID = tagID
111 | log.Debugf("Duplicate tag id = %v", *a.duplicateTagID)
112 | }
113 |
114 | // find where the generated sprite files are stored
115 | path, err := getSpriteDir(a.client)
116 | if err != nil {
117 | return err
118 | }
119 |
120 | log.Debugf("Sprite directory is: %s", path)
121 |
122 | log.Info("Processing files for perceptual hashes...")
123 | m := make(matchInfoMap)
124 | foundDupes := 0
125 |
126 | hdFunc := func(checksum string, matches duplo.Matches) {
127 | if len(matches) > 0 {
128 | foundDupes++
129 | for _, match := range matches {
130 | m.add(checksum, match.ID.(string), match.Score)
131 | a.logDuplicate(checksum, match)
132 | a.handleDuplicate(m, checksum, true)
133 | }
134 | }
135 | }
136 |
137 | err = a.processFiles(path, hdFunc)
138 | if err != nil {
139 | return err
140 | }
141 |
142 | log.Infof("Found %d duplicate scenes", foundDupes)
143 | return nil
144 | }
145 |
146 | type handleDuplicatesFunc func(checksum string, matches duplo.Matches)
147 |
148 | func (a *api) processFiles(path string, hdFunc handleDuplicatesFunc) error {
149 | files, err := ioutil.ReadDir(path)
150 | if err != nil {
151 | return err
152 | }
153 |
154 | // read the store
155 | store := duplo.New()
156 | readDB(store, a.cfg.DBFilename)
157 | total := len(files)
158 |
159 | for i, f := range files {
160 | if a.stopping {
161 | break
162 | }
163 |
164 | log.Progress(float64(i) / float64(total))
165 |
166 | fn := filepath.Join(path, f.Name())
167 | if err := a.processFile(fn, store, hdFunc); err != nil {
168 | log.Errorf("Error processing file %s: %s", f.Name(), err.Error())
169 | }
170 | }
171 |
172 | storeDB(store, a.cfg.DBFilename)
173 |
174 | return nil
175 | }
176 |
177 | func (a *api) processFile(fn string, store *duplo.Store, hdFunc handleDuplicatesFunc) error {
178 | if !isSpriteFile(fn) {
179 | return nil
180 | }
181 |
182 | checksum := getChecksum(fn)
183 | existing := store.Has(checksum)
184 | if existing && a.cfg.NewOnly {
185 | return nil
186 | }
187 |
188 | hash, err := getImageHash(fn)
189 | if err != nil {
190 | return err
191 | }
192 |
193 | matches := getHashMatches(store, checksum, *hash, a.cfg.Threshold)
194 |
195 | // remove any matches that no longer exist
196 | var filteredMatches duplo.Matches
197 | path := filepath.Dir(fn)
198 | for _, m := range matches {
199 | dupeSprite := getSpriteFilename(path, m.ID.(string))
200 | if _, err := os.Stat(dupeSprite); os.IsNotExist(err) {
201 | store.Delete(m.ID)
202 | } else {
203 | filteredMatches = append(filteredMatches, m)
204 | }
205 | }
206 |
207 | hdFunc(checksum, filteredMatches)
208 |
209 | if !existing {
210 | store.Add(checksum, *hash)
211 | }
212 |
213 | return nil
214 | }
215 |
216 | func (a *api) logDuplicate(checksum string, match *duplo.Match) {
217 | subject, err := a.cache.get(checksum)
218 | if err != nil {
219 | log.Errorf("error getting scene with checksum %s: %s", checksum, err.Error())
220 | return
221 | }
222 |
223 | s, err := a.cache.get(match.ID.(string))
224 | if err != nil {
225 | log.Errorf("error getting scene with checksum %s: %s", match.ID.(string), err.Error())
226 | return
227 | }
228 |
229 | log.Infof("Duplicate: %s - %s (score: %.f)", subject.ID, s.ID, -match.Score)
230 | }
231 |
232 | func (a *api) handleDuplicate(m matchInfoMap, checksum string, recurse bool) {
233 | matches := m[checksum]
234 | subject, err := a.cache.get(checksum)
235 | if err != nil {
236 | log.Errorf("error getting scene with checksum %s: %s", checksum, err.Error())
237 | return
238 | }
239 |
240 | newDetails := "=== Duplicate finder plugin ==="
241 | for _, match := range matches {
242 | s, err := a.cache.get(match.other)
243 | if err != nil {
244 | log.Errorf("error getting scene with checksum %s: %s", match, err.Error())
245 | continue
246 | }
247 |
248 | newDetails += fmt.Sprintf("\nDuplicate ID: %s (score: %.f)", s.ID, -match.score)
249 |
250 | if recurse {
251 | a.handleDuplicate(m, match.other, false)
252 | }
253 | }
254 | newDetails += "\n=== End Duplicate finder plugin ==="
255 |
256 | if a.cfg.AddDetails || a.duplicateTagID != nil {
257 | details := ""
258 | if subject.Details != nil {
259 | details = string(*subject.Details)
260 | }
261 |
262 | if a.cfg.AddDetails {
263 | newDetails = addDuplicateDetails(details, newDetails)
264 | } else {
265 | newDetails = string(details)
266 | }
267 |
268 | err = updateScene(a.client, *subject, newDetails, a.duplicateTagID)
269 | if err != nil {
270 | log.Errorf("Error updating scene: %s", err.Error())
271 | }
272 | }
273 | }
274 |
275 | func addDuplicateDetails(origDetails, newDetails string) string {
276 | re := regexp.MustCompile("(?s)=== Duplicate finder plugin ===.*=== End Duplicate finder plugin ===")
277 | found := re.FindStringIndex(origDetails)
278 | if found == nil {
279 | if len(origDetails) > 0 {
280 | return origDetails + "\n" + newDetails
281 | }
282 |
283 | return newDetails
284 | }
285 |
286 | // replace existing
287 | return re.ReplaceAllString(origDetails, newDetails)
288 | }
289 |
290 | func isSpriteFile(fn string) bool {
291 | return strings.HasSuffix(fn, spriteSuffix)
292 | }
293 |
294 | func getChecksum(fn string) string {
295 | baseName := filepath.Base(fn)
296 | return strings.Replace(baseName, spriteSuffix, "", -1)
297 | }
298 |
299 | func getSpriteFilename(path, checksum string) string {
300 | return filepath.Join(path, checksum+spriteSuffix)
301 | }
302 |
--------------------------------------------------------------------------------
/cache.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/shurcooL/graphql"
7 | )
8 |
9 | type sceneCache struct {
10 | scenes map[string]*Scene
11 | client *graphql.Client
12 | }
13 |
14 | func newSceneCache(client *graphql.Client) *sceneCache {
15 | return &sceneCache{
16 | scenes: make(map[string]*Scene),
17 | client: client,
18 | }
19 | }
20 |
21 | func (c *sceneCache) get(hash string) (*Scene, error) {
22 | if c.scenes[hash] != nil {
23 | return c.scenes[hash], nil
24 | }
25 |
26 | var ret *Scene
27 | var err error
28 | if len(hash) == 32 {
29 | ret, err = findSceneFromChecksum(c.client, hash)
30 | if err != nil {
31 | return nil, err
32 | }
33 | } else if len(hash) == 16 {
34 | ret, err = findSceneFromOshash(c.client, hash)
35 | if err != nil {
36 | return nil, err
37 | }
38 | }
39 |
40 | if ret == nil {
41 | return nil, fmt.Errorf("scene with hash %s is nil", hash)
42 | }
43 |
44 | c.scenes[hash] = ret
45 | return ret, nil
46 | }
47 |
--------------------------------------------------------------------------------
/cmd.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "time"
7 |
8 | "github.com/rivo/duplo"
9 | )
10 |
11 | func cmdMain() {
12 | // default is to accept sprite directory and output csv of all matches
13 | path := os.Args[1]
14 |
15 | fmt.Fprintln(os.Stderr, "Outputting duplicates to csv")
16 |
17 | f, err := os.Create("duplicates.csv")
18 | if err != nil {
19 | panic(err)
20 | }
21 | defer f.Close()
22 |
23 | // output to stdout
24 | a := api{}
25 | a.cfg.DBFilename = "df-hashstore.db"
26 | hdFunc := func(checksum string, matches duplo.Matches) {
27 | if len(matches) > 0 {
28 | match := matches[0]
29 | fmt.Fprintf(f, "%s,%s,%.f\n", checksum, match.ID.(string), -match.Score)
30 | fmt.Printf("%s - %s [%.f]\n", checksum, match.ID.(string), -match.Score)
31 | }
32 | }
33 |
34 | c := make(chan bool, 1)
35 |
36 | go func() {
37 | err = a.processFiles(path, hdFunc)
38 | if err != nil {
39 | panic(err)
40 | }
41 | c <- true
42 | }()
43 |
44 | for {
45 | select {
46 | case <-c:
47 | return
48 | default:
49 | _, err := os.Stat(".stop")
50 | if err == nil {
51 | a.stopping = true
52 | }
53 | time.Sleep(5 * time.Second)
54 | }
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/config.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 |
6 | "gopkg.in/yaml.v2"
7 | )
8 |
9 | type config struct {
10 | DBFilename string `yaml:"db_filename"`
11 | Threshold int `yaml:"threshold"`
12 | AddTagName string `yaml:"add_tag_name"`
13 | AddDetails bool `yaml:"add_details"`
14 | NewOnly bool `yaml:"new_only"`
15 | }
16 |
17 | func readConfig(fn string) (*config, error) {
18 | ret := &config{
19 | DBFilename: "df-hashstore.db",
20 | Threshold: 50,
21 | }
22 |
23 | _, err := os.Stat(fn)
24 | if err != nil {
25 | if os.IsNotExist(err) {
26 | // just return default config
27 | return ret, nil
28 | }
29 |
30 | return nil, err
31 | }
32 |
33 | file, err := os.Open(fn)
34 | defer file.Close()
35 | if err != nil {
36 | return nil, err
37 | }
38 | parser := yaml.NewDecoder(file)
39 | parser.SetStrict(true)
40 | err = parser.Decode(&ret)
41 | if err != nil {
42 | return nil, err
43 | }
44 |
45 | return ret, nil
46 | }
47 |
48 | // HACK - read the host from the server config - this should be provided
49 | // by the server itself
50 | type serverConfig struct {
51 | Host string `yaml:"host"`
52 | }
53 |
54 | func readServerConfig(fn string) (*serverConfig, error) {
55 | ret := &serverConfig{}
56 |
57 | file, err := os.Open(fn)
58 | defer file.Close()
59 | if err != nil {
60 | return nil, err
61 | }
62 | parser := yaml.NewDecoder(file)
63 | if err := parser.Decode(&ret); err != nil {
64 | return nil, err
65 | }
66 |
67 | return ret, nil
68 | }
69 |
--------------------------------------------------------------------------------
/db.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "image/jpeg"
5 | "io/ioutil"
6 | "os"
7 | "sort"
8 |
9 | "stash-plugin-duplicate-finder/internal/plugin/common/log"
10 |
11 | "github.com/rivo/duplo"
12 | )
13 |
14 | func storeDB(store *duplo.Store, filename string) error {
15 | data, err := store.GobEncode()
16 | if err != nil {
17 | return err
18 | }
19 |
20 | err = ioutil.WriteFile(filename, data, 0644)
21 | if err != nil {
22 | return err
23 | }
24 |
25 | return nil
26 | }
27 |
28 | func readDB(store *duplo.Store, filename string) error {
29 | data, err := ioutil.ReadFile(filename)
30 | if err != nil {
31 | // assume no file
32 | log.Info("Assuming no existing db file. Starting from scratch...")
33 | }
34 |
35 | err = store.GobDecode(data)
36 | if err != nil {
37 | return err
38 | }
39 |
40 | log.Infof("Read store from file: %d hashes loaded", store.Size())
41 | return nil
42 | }
43 |
44 | func getImageHash(fn string) (*duplo.Hash, error) {
45 | f, err := os.Open(fn)
46 | if err != nil {
47 | return nil, err
48 | }
49 |
50 | img, err := jpeg.Decode(f)
51 | if err != nil {
52 | return nil, err
53 | }
54 |
55 | hash, _ := duplo.CreateHash(img)
56 | return &hash, nil
57 | }
58 |
59 | func getHashMatches(store *duplo.Store, checksum string, hash duplo.Hash, threshold int) duplo.Matches {
60 | ret := duplo.Matches{}
61 |
62 | matches := store.Query(hash)
63 | sort.Sort(matches)
64 |
65 | for _, m := range matches {
66 | // exclude same id
67 | if checksum == m.ID {
68 | continue
69 | }
70 |
71 | if m.Score <= float64(-threshold) {
72 | ret = append(ret, m)
73 | }
74 | }
75 |
76 | return ret
77 | }
78 |
--------------------------------------------------------------------------------
/duplicate-finder.cfg:
--------------------------------------------------------------------------------
1 | # filename of the image hash database. Default is shown. If not absolute, then
2 | # path is relative to the path containing the plugin yml file
3 | db_filename: df-hashstore.db
4 |
5 | # threshold for image matches. Default is shown. Lower values may result in
6 | # more (and possibly more false positive) duplicate results. Higher values
7 | # will make matching more stringent.
8 | threshold: 50
9 |
10 | # if present, tags duplicate files with the named tag. Tag must be already
11 | # present in the system
12 | # add_tag_name: duplicate
13 |
14 | # if true, adds the ids of duplicate scenes to the details of scenes
15 | add_details: false
16 |
17 | # if true, only check files that are not already stored in image hash database.
18 | new_only: false
19 |
--------------------------------------------------------------------------------
/duplicate-finder.yml:
--------------------------------------------------------------------------------
1 | name: Duplicate File Finder
2 | description: Finds perceptually duplicate scenes using sprite files.
3 | version: 0.1.5
4 | url: https://github.com/WithoutPants/stash-plugin-duplicate-finder
5 | exec:
6 | - plugin_duplicate_finder
7 | interface: rpc
8 | tasks:
9 | - name: Find duplicate scenes
10 | description: Finds perceptually duplicate scenes
11 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module stash-plugin-duplicate-finder
2 |
3 | go 1.11
4 |
5 | require (
6 | github.com/natefinch/pie v0.0.0-20170715172608-9a0d72014007
7 | github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
8 | github.com/rivo/duplo v0.0.0-20180323201418-c4ec823d58cd
9 | github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f
10 | golang.org/x/net v0.0.0-20200707034311-ab3426394381 // indirect
11 | gopkg.in/yaml.v2 v2.3.0
12 | )
13 |
--------------------------------------------------------------------------------
/graphql.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "path/filepath"
7 |
8 | "github.com/shurcooL/graphql"
9 | )
10 |
11 | type Tag struct {
12 | ID graphql.ID `graphql:"id"`
13 | Name graphql.String `graphql:"name"`
14 | }
15 |
16 | type Scene struct {
17 | ID graphql.ID
18 | Title *graphql.String
19 | Path graphql.String
20 | Details *graphql.String
21 | Tags []Tag
22 | }
23 |
24 | func (s Scene) getTagIds() []graphql.ID {
25 | ret := []graphql.ID{}
26 |
27 | for _, t := range s.Tags {
28 | ret = append(ret, t.ID)
29 | }
30 |
31 | return ret
32 | }
33 |
34 | type ConfigGeneralResult struct {
35 | GeneratedPath graphql.String `graphql:"generatedPath"`
36 | }
37 |
38 | type ConfigResult struct {
39 | General ConfigGeneralResult `graphql:"general"`
40 | }
41 |
42 | func getSpriteDir(client *graphql.Client) (string, error) {
43 | var m struct {
44 | Configuration *ConfigResult `graphql:"configuration"`
45 | }
46 |
47 | err := client.Query(context.Background(), &m, nil)
48 | if err != nil {
49 | return "", fmt.Errorf("Error getting sprite directory from configuration: %s", err.Error())
50 | }
51 |
52 | ret := filepath.Join(string(m.Configuration.General.GeneratedPath), "vtt")
53 | return ret, nil
54 | }
55 |
56 | func addTagId(tagIds []graphql.ID, tagId graphql.ID) []graphql.ID {
57 | for _, t := range tagIds {
58 | if t == tagId {
59 | return tagIds
60 | }
61 | }
62 |
63 | tagIds = append(tagIds, tagId)
64 | return tagIds
65 | }
66 |
67 | func findSceneFromChecksum(client *graphql.Client, checksum string) (*Scene, error) {
68 | var m struct {
69 | FindScene *Scene `graphql:"findScene(checksum: $c)"`
70 | }
71 |
72 | vars := map[string]interface{}{
73 | "c": graphql.String(checksum),
74 | }
75 |
76 | err := client.Query(context.Background(), &m, vars)
77 | if err != nil {
78 | return nil, err
79 | }
80 |
81 | return m.FindScene, nil
82 | }
83 |
84 | type SceneHashInput struct {
85 | Oshash *graphql.String `graphql:"oshash" json:"oshash"`
86 | }
87 |
88 | func findSceneFromOshash(client *graphql.Client, oshash string) (*Scene, error) {
89 | var m struct {
90 | FindScene *Scene `graphql:"findSceneByHash(input: $i)"`
91 | }
92 |
93 | input := SceneHashInput{
94 | Oshash: graphql.NewString(graphql.String(oshash)),
95 | }
96 |
97 | vars := map[string]interface{}{
98 | "i": input,
99 | }
100 |
101 | err := client.Query(context.Background(), &m, vars)
102 | if err != nil {
103 | return nil, err
104 | }
105 |
106 | return m.FindScene, nil
107 | }
108 |
109 | type SceneUpdate struct {
110 | ID graphql.ID `graphql:"id"`
111 | }
112 |
113 | type BulkUpdateIds struct {
114 | IDs []graphql.ID `graphql:"ids" json:"ids"`
115 | Mode graphql.String `graphql:"mode" json:"mode"`
116 | }
117 |
118 | func updateScene(client *graphql.Client, s Scene, details string, duplicateTagID *graphql.ID) error {
119 | // use BulkSceneUpdateInput since sceneUpdate requires performers, etc.
120 | var m struct {
121 | SceneUpdate []SceneUpdate `graphql:"bulkSceneUpdate(input: {ids: $ids, details: $details, tag_ids: $tag_ids})"`
122 | }
123 |
124 | ids := []graphql.ID{s.ID}
125 | detailsInput := graphql.String(details)
126 | tagIds := &BulkUpdateIds{}
127 |
128 | if duplicateTagID != nil {
129 | tagIds.Mode = "ADD"
130 | tagIds.IDs = addTagId(tagIds.IDs, *duplicateTagID)
131 | }
132 |
133 | vars := map[string]interface{}{
134 | "ids": ids,
135 | "details": detailsInput,
136 | "tag_ids": tagIds,
137 | }
138 |
139 | err := client.Mutate(context.Background(), &m, vars)
140 | if err != nil {
141 | return err
142 | }
143 |
144 | return nil
145 | }
146 |
147 | func getDuplicateTagId(client *graphql.Client, tagName string) (*graphql.ID, error) {
148 | var m struct {
149 | AllTags []Tag `graphql:"allTags"`
150 | }
151 |
152 | err := client.Query(context.Background(), &m, nil)
153 | if err != nil {
154 | fmt.Printf("Error getting tags: %s\n", err.Error())
155 | return nil, err
156 | }
157 |
158 | for _, t := range m.AllTags {
159 | if string(t.Name) == tagName {
160 | id := t.ID
161 | return &id, nil
162 | }
163 | }
164 |
165 | return nil, err
166 | }
167 |
--------------------------------------------------------------------------------
/internal/plugin/common/doc.go:
--------------------------------------------------------------------------------
1 | // Package common encapulates data structures and functions that will be used
2 | // by plugin executables and the plugin subsystem in the stash server.
3 | package common
4 |
--------------------------------------------------------------------------------
/internal/plugin/common/log/log.go:
--------------------------------------------------------------------------------
1 | // Package log provides a number of logging utility functions for encoding and
2 | // decoding log messages between a stash server and a plugin instance.
3 | //
4 | // Log messages sent from a plugin instance are transmitted via stderr and are
5 | // encoded with a prefix consisting of special character SOH, then the log
6 | // level (one of t, d, i, w, e, or p - corresponding to trace, debug, info,
7 | // warning, error and progress levels respectively), then special character
8 | // STX.
9 | //
10 | // The Trace, Debug, Info, Warning, and Error methods, and their equivalent
11 | // formatted methods are intended for use by plugin instances to transmit log
12 | // messages. The Progress method is also intended for sending progress data.
13 | //
14 | // Conversely, LevelFromName and DetectLogLevel are intended for use by the
15 | // stash server.
16 | package log
17 |
18 | import (
19 | "fmt"
20 | "math"
21 | "os"
22 | "strings"
23 | )
24 |
25 | // Level represents a logging level for plugin outputs.
26 | type Level struct {
27 | char byte
28 | Name string
29 | }
30 |
31 | // Valid Level values.
32 | var (
33 | TraceLevel = Level{
34 | char: 't',
35 | Name: "trace",
36 | }
37 | DebugLevel = Level{
38 | char: 'd',
39 | Name: "debug",
40 | }
41 | InfoLevel = Level{
42 | char: 'i',
43 | Name: "info",
44 | }
45 | WarningLevel = Level{
46 | char: 'w',
47 | Name: "warning",
48 | }
49 | ErrorLevel = Level{
50 | char: 'e',
51 | Name: "error",
52 | }
53 | ProgressLevel = Level{
54 | char: 'p',
55 | }
56 | NoneLevel = Level{
57 | Name: "none",
58 | }
59 | )
60 |
61 | var validLevels = []Level{
62 | TraceLevel,
63 | DebugLevel,
64 | InfoLevel,
65 | WarningLevel,
66 | ErrorLevel,
67 | ProgressLevel,
68 | NoneLevel,
69 | }
70 |
71 | const startLevelChar byte = 1
72 | const endLevelChar byte = 2
73 |
74 | func (l Level) prefix() string {
75 | return string([]byte{
76 | startLevelChar,
77 | byte(l.char),
78 | endLevelChar,
79 | })
80 | }
81 |
82 | func (l Level) log(args ...interface{}) {
83 | if l.char == 0 {
84 | return
85 | }
86 |
87 | argsToUse := []interface{}{
88 | l.prefix(),
89 | }
90 | argsToUse = append(argsToUse, args...)
91 | fmt.Fprintln(os.Stderr, argsToUse...)
92 | }
93 |
94 | func (l Level) logf(format string, args ...interface{}) {
95 | if l.char == 0 {
96 | return
97 | }
98 |
99 | formatToUse := string(l.prefix()) + format + "\n"
100 | fmt.Fprintf(os.Stderr, formatToUse, args...)
101 | }
102 |
103 | // Trace outputs a trace logging message to os.Stderr. Message is encoded with a
104 | // prefix that signifies to the server that it is a trace message.
105 | func Trace(args ...interface{}) {
106 | TraceLevel.log(args...)
107 | }
108 |
109 | // Tracef is the equivalent of Printf outputting as a trace logging message.
110 | func Tracef(format string, args ...interface{}) {
111 | TraceLevel.logf(format, args...)
112 | }
113 |
114 | // Debug outputs a debug logging message to os.Stderr. Message is encoded with a
115 | // prefix that signifies to the server that it is a debug message.
116 | func Debug(args ...interface{}) {
117 | DebugLevel.log(args...)
118 | }
119 |
120 | // Debugf is the equivalent of Printf outputting as a debug logging message.
121 | func Debugf(format string, args ...interface{}) {
122 | DebugLevel.logf(format, args...)
123 | }
124 |
125 | // Info outputs an info logging message to os.Stderr. Message is encoded with a
126 | // prefix that signifies to the server that it is an info message.
127 | func Info(args ...interface{}) {
128 | InfoLevel.log(args...)
129 | }
130 |
131 | // Infof is the equivalent of Printf outputting as an info logging message.
132 | func Infof(format string, args ...interface{}) {
133 | InfoLevel.logf(format, args...)
134 | }
135 |
136 | // Warn outputs a warning logging message to os.Stderr. Message is encoded with a
137 | // prefix that signifies to the server that it is a warning message.
138 | func Warn(args ...interface{}) {
139 | WarningLevel.log(args...)
140 | }
141 |
142 | // Warnf is the equivalent of Printf outputting as a warning logging message.
143 | func Warnf(format string, args ...interface{}) {
144 | WarningLevel.logf(format, args...)
145 | }
146 |
147 | // Error outputs an error logging message to os.Stderr. Message is encoded with a
148 | // prefix that signifies to the server that it is an error message.
149 | func Error(args ...interface{}) {
150 | ErrorLevel.log(args...)
151 | }
152 |
153 | // Errorf is the equivalent of Printf outputting as an error logging message.
154 | func Errorf(format string, args ...interface{}) {
155 | ErrorLevel.logf(format, args...)
156 | }
157 |
158 | // Progress logs the current progress value. The progress value should be
159 | // between 0 and 1.0 inclusively, with 1 representing that the task is
160 | // complete. Values outside of this range will be clamp to be within it.
161 | func Progress(progress float64) {
162 | progress = math.Min(math.Max(0, progress), 1)
163 | ProgressLevel.log(progress)
164 | }
165 |
166 | // LevelFromName returns the Level that matches the provided name or nil if
167 | // the name does not match a valid value.
168 | func LevelFromName(name string) *Level {
169 | for _, l := range validLevels {
170 | if l.Name == name {
171 | return &l
172 | }
173 | }
174 |
175 | return nil
176 | }
177 |
178 | // DetectLogLevel returns the Level and the logging string for a provided line
179 | // of plugin output. It parses the string for logging control characters and
180 | // determines the log level, if present. If not present, the plugin output
181 | // is returned unchanged with a nil Level.
182 | func DetectLogLevel(line string) (*Level, string) {
183 | if len(line) < 4 || line[0] != startLevelChar || line[2] != endLevelChar {
184 | return nil, line
185 | }
186 |
187 | char := line[1]
188 | var level *Level
189 | for _, l := range validLevels {
190 | if l.char == char {
191 | level = &l
192 | break
193 | }
194 | }
195 |
196 | if level == nil {
197 | return nil, line
198 | }
199 |
200 | line = strings.TrimSpace(line[3:])
201 |
202 | return level, line
203 | }
204 |
--------------------------------------------------------------------------------
/internal/plugin/common/msg.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import "net/http"
4 |
5 | // StashServerConnection represents the connection details needed for a
6 | // plugin instance to connect to its parent stash server.
7 | type StashServerConnection struct {
8 | // http or https
9 | Scheme string
10 |
11 | Port int
12 |
13 | // Cookie for authentication purposes
14 | SessionCookie *http.Cookie
15 |
16 | // Dir specifies the directory containing the stash server's configuration
17 | // file.
18 | Dir string
19 |
20 | // PluginDir specifies the directory containing the plugin configuration
21 | // file.
22 | PluginDir string
23 | }
24 |
25 | // PluginArgValue represents a single value parameter for plugin operations.
26 | type PluginArgValue interface{}
27 |
28 | // ArgsMap is a map of argument key to value.
29 | type ArgsMap map[string]PluginArgValue
30 |
31 | // String returns the string field or an empty string if the string field is
32 | // nil
33 | func (m ArgsMap) String(key string) string {
34 | v, found := m[key]
35 | var ret string
36 | if !found {
37 | return ret
38 | }
39 | ret, _ = v.(string)
40 | return ret
41 | }
42 |
43 | // Int returns the int field or 0 if the int field is nil
44 | func (m ArgsMap) Int(key string) int {
45 | v, found := m[key]
46 | var ret int
47 | if !found {
48 | return ret
49 | }
50 | ret, _ = v.(int)
51 | return ret
52 | }
53 |
54 | // Bool returns the boolean field or false if the boolean field is nil
55 | func (m ArgsMap) Bool(key string) bool {
56 | v, found := m[key]
57 | var ret bool
58 | if !found {
59 | return ret
60 | }
61 | ret, _ = v.(bool)
62 | return ret
63 | }
64 |
65 | // Float returns the float field or 0 if the float field is nil
66 | func (m ArgsMap) Float(key string) float64 {
67 | v, found := m[key]
68 | var ret float64
69 | if !found {
70 | return ret
71 | }
72 | ret, _ = v.(float64)
73 | return ret
74 | }
75 |
76 | // PluginInput is the data structure that is sent to plugin instances when they
77 | // are spawned.
78 | type PluginInput struct {
79 | // Server details to connect to the stash server.
80 | ServerConnection StashServerConnection `json:"server_connection"`
81 |
82 | // Arguments to the plugin operation.
83 | Args ArgsMap `json:"args"`
84 | }
85 |
86 | // PluginOutput is the data structure that is expected to be output by plugin
87 | // processes when execution has concluded. It is expected that this data will
88 | // be encoded as JSON.
89 | type PluginOutput struct {
90 | Error *string `json:"error"`
91 | Output interface{} `json:"output"`
92 | }
93 |
94 | // SetError is a convenience method that sets the Error field based on the
95 | // provided error.
96 | func (o *PluginOutput) SetError(err error) {
97 | errStr := err.Error()
98 | o.Error = &errStr
99 | }
100 |
--------------------------------------------------------------------------------
/internal/plugin/common/rpc.go:
--------------------------------------------------------------------------------
1 | package common
2 |
3 | import (
4 | "net/rpc/jsonrpc"
5 |
6 | "github.com/natefinch/pie"
7 | )
8 |
9 | // RPCRunner is the interface that RPC plugins are expected to fulfil.
10 | type RPCRunner interface {
11 | // Perform the operation, using the provided input and populating the
12 | // output object.
13 | Run(input PluginInput, output *PluginOutput) error
14 |
15 | // Stop any running operations, if possible. No input is sent and any
16 | // output is ignored.
17 | Stop(input struct{}, output *bool) error
18 | }
19 |
20 | // ServePlugin is used by plugin instances to serve the plugin via RPC, using
21 | // the provided RPCRunner interface.
22 | func ServePlugin(iface RPCRunner) error {
23 | p := pie.NewProvider()
24 | if err := p.RegisterName("RPCRunner", iface); err != nil {
25 | return err
26 | }
27 |
28 | p.ServeCodec(jsonrpc.NewServerCodec)
29 | return nil
30 | }
31 |
--------------------------------------------------------------------------------
/internal/plugin/util/client.go:
--------------------------------------------------------------------------------
1 | // Package util implements utility and convenience methods for plugins. It is
2 | // not intended for the main stash code to access.
3 | package util
4 |
5 | import (
6 | "fmt"
7 | "net/http"
8 | "net/http/cookiejar"
9 | "net/url"
10 |
11 | "github.com/shurcooL/graphql"
12 |
13 | "stash-plugin-duplicate-finder/internal/plugin/common"
14 | )
15 |
16 | // NewClient creates a graphql Client connecting to the stash server using
17 | // the provided server connection details.
18 | func NewClient(provider common.StashServerConnection, addr string) *graphql.Client {
19 | u, _ := url.Parse(fmt.Sprintf("http://%s:%d/graphql", addr, provider.Port))
20 | u.Scheme = provider.Scheme
21 |
22 | cookieJar, _ := cookiejar.New(nil)
23 |
24 | cookie := provider.SessionCookie
25 | if cookie != nil {
26 | cookieJar.SetCookies(u, []*http.Cookie{
27 | cookie,
28 | })
29 | }
30 |
31 | httpClient := &http.Client{
32 | Jar: cookieJar,
33 | }
34 |
35 | return graphql.NewClient(u.String(), httpClient)
36 | }
37 |
--------------------------------------------------------------------------------
/matching.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | type matchInfo struct {
4 | other string
5 | otherScene *Scene
6 | score float64
7 | }
8 |
9 | type matchInfoMap map[string][]matchInfo
10 |
11 | func (m *matchInfoMap) add(subject, match string, score float64) {
12 | existing := (*m)[subject]
13 | existing = append(existing, matchInfo{
14 | other: match,
15 | score: score,
16 | })
17 |
18 | (*m)[subject] = existing
19 |
20 | existing = (*m)[match]
21 | existing = append(existing, matchInfo{
22 | other: subject,
23 | score: score,
24 | })
25 |
26 | (*m)[match] = existing
27 | }
28 |
--------------------------------------------------------------------------------
/vendor/github.com/natefinch/pie/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled Object files, Static and Dynamic libs (Shared Objects)
2 | *.o
3 | *.a
4 | *.so
5 |
6 | # Folders
7 | _obj
8 | _test
9 |
10 | # Architecture specific extensions/prefixes
11 | *.[568vq]
12 | [568vq].out
13 |
14 | *.cgo1.go
15 | *.cgo2.c
16 | _cgo_defun.c
17 | _cgo_gotypes.go
18 | _cgo_export.*
19 |
20 | _testmain.go
21 |
22 | *.exe
23 | *.test
24 | *.prof
25 |
--------------------------------------------------------------------------------
/vendor/github.com/natefinch/pie/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2015 Nate Finch
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
23 |
--------------------------------------------------------------------------------
/vendor/github.com/natefinch/pie/README.md:
--------------------------------------------------------------------------------
1 | # pie [](https://godoc.org/github.com/natefinch/pie) [ ](https://app.codeship.com/projects/232834)
2 |
3 | import "github.com/natefinch/pie"
4 |
5 | package pie provides a toolkit for creating plugins for Go applications.
6 |
7 | 
8 |
9 | **Why is it called pie?**
10 |
11 | Because if you pronounce API like "a pie", then all this consuming and serving
12 | of APIs becomes a lot more palatable. Also, pies are the ultimate pluggable
13 | interface - depending on what's inside, you can get dinner, dessert, a snack, or
14 | even breakfast. Plus, then I get to say that plugins in Go are as easy as...
15 | well, you know.
16 |
17 | If you have to explain it to your boss, just say it's an acronym for Plug In
18 | Executables. (but it's not, really)
19 |
20 | ## About Pie
21 |
22 | Plugins using this toolkit and the applications managing those plugins
23 | communicate via RPC over the plugin application's Stdin and Stdout.
24 |
25 | Functions in this package with the prefix `New` are intended to be used by the
26 | plugin to set up its end of the communication. Functions in this package
27 | with the prefix `Start` are intended to be used by the main application to set
28 | up its end of the communication and start a plugin executable.
29 |
30 |
31 |
32 | This package provides two conceptually different types of plugins, based on
33 | which side of the communication is the server and which is the client.
34 | Plugins which provide an API server for the main application to call are
35 | called Providers. Plugins which consume an API provided by the main
36 | application are called Consumers.
37 |
38 | The default codec for RPC for this package is Go's gob encoding, however you
39 | may provide your own codec, such as JSON-RPC provided by net/rpc/jsonrpc.
40 |
41 | There is no requirement that plugins for applications using this toolkit be
42 | written in Go. As long as the plugin application can consume or provide an
43 | RPC API of the correct codec, it can interoperate with main applications
44 | using this process. For example, if your main application uses JSON-RPC,
45 | many languages are capable of producing an executable that can provide a
46 | JSON-RPC API for your application to use.
47 |
48 | Included in this repo are some simple examples of a master process and a
49 | plugin process, to see how the library can be used. An example of the
50 | standard plugin that provides an API the master process consumes is in the
51 | examples/provider directory. master\_provider expects plugin\_provider to be
52 | in the same directory or in your $PATH. You can just go install both of
53 | them, and it'll work correctly.
54 |
55 | In addition to a regular plugin that provides an API, this package can be
56 | used for plugins that consume an API provided by the main process. To see an
57 | example of this, look in the examples/consumer folder.
58 |
59 |
60 | ## func NewConsumer
61 | ``` go
62 | func NewConsumer() *rpc.Client
63 | ```
64 | NewConsumer returns an rpc.Client that will consume an API from the host
65 | process over this application's Stdin and Stdout using gob encoding.
66 |
67 |
68 | ## func NewConsumerCodec
69 | ``` go
70 | func NewConsumerCodec(f func(io.ReadWriteCloser) rpc.ClientCodec) *rpc.Client
71 | ```
72 | NewConsumerCodec returns an rpc.Client that will consume an API from the host
73 | process over this application's Stdin and Stdout using the ClientCodec
74 | returned by f.
75 |
76 |
77 | ## func StartProvider
78 | ``` go
79 | func StartProvider(output io.Writer, path string, args ...string) (*rpc.Client, error)
80 | ```
81 | StartProvider start a provider-style plugin application at the given path and
82 | args, and returns an RPC client that communicates with the plugin using gob
83 | encoding over the plugin's Stdin and Stdout. The writer passed to output
84 | will receive output from the plugin's stderr. Closing the RPC client
85 | returned from this function will shut down the plugin application.
86 |
87 |
88 | ## func StartProviderCodec
89 | ``` go
90 | func StartProviderCodec(
91 | f func(io.ReadWriteCloser) rpc.ClientCodec,
92 | output io.Writer,
93 | path string,
94 | args ...string,
95 | ) (*rpc.Client, error)
96 | ```
97 | StartProviderCodec starts a provider-style plugin application at the given
98 | path and args, and returns an RPC client that communicates with the plugin
99 | using the ClientCodec returned by f over the plugin's Stdin and Stdout. The
100 | writer passed to output will receive output from the plugin's stderr.
101 | Closing the RPC client returned from this function will shut down the plugin
102 | application.
103 |
104 |
105 | ## type Server
106 | ``` go
107 | type Server struct {
108 | // contains filtered or unexported fields
109 | }
110 | ```
111 | Server is a type that represents an RPC server that serves an API over
112 | stdin/stdout.
113 |
114 |
115 | ### func NewProvider
116 | ``` go
117 | func NewProvider() Server
118 | ```
119 | NewProvider returns a Server that will serve RPC over this
120 | application's Stdin and Stdout. This method is intended to be run by the
121 | plugin application.
122 |
123 |
124 | ### func StartConsumer
125 | ``` go
126 | func StartConsumer(output io.Writer, path string, args ...string) (Server, error)
127 | ```
128 | StartConsumer starts a consumer-style plugin application with the given path
129 | and args, writing its stderr to output. The plugin consumes an API this
130 | application provides. The function returns the Server for this host
131 | application, which should be used to register APIs for the plugin to consume.
132 |
133 |
134 | ### func (Server) Close
135 | ``` go
136 | func (s Server) Close() error
137 | ```
138 | Close closes the connection with the client. If the client is a plugin
139 | process, the process will be stopped. Further communication using this
140 | Server will fail.
141 |
142 |
143 | ### func (Server) Register
144 | ``` go
145 | func (s Server) Register(rcvr interface{}) error
146 | ```
147 | Register publishes in the provider the set of methods of the receiver value
148 | that satisfy the following conditions:
149 |
150 |
151 | - exported method
152 | - two arguments, both of exported type
153 | - the second argument is a pointer
154 | - one return value, of type error
155 |
156 | It returns an error if the receiver is not an exported type or has no
157 | suitable methods. It also logs the error using package log. The client
158 | accesses each method using a string of the form "Type.Method", where Type is
159 | the receiver's concrete type.
160 |
161 |
162 | ### func (Server) RegisterName
163 | ``` go
164 | func (s Server) RegisterName(name string, rcvr interface{}) error
165 | ```
166 | RegisterName is like Register but uses the provided name for the type
167 | instead of the receiver's concrete type.
168 |
169 |
170 | ### func (Server) Serve
171 | ``` go
172 | func (s Server) Serve()
173 | ```
174 | Serve starts the Server's RPC server, serving via gob encoding. This call
175 | will block until the client hangs up.
176 |
177 |
178 | ### func (Server) ServeCodec
179 | ``` go
180 | func (s Server) ServeCodec(f func(io.ReadWriteCloser) rpc.ServerCodec)
181 | ```
182 | ServeCodec starts the Server's RPC server, serving via the encoding returned
183 | by f. This call will block until the client hangs up.
184 |
--------------------------------------------------------------------------------
/vendor/github.com/natefinch/pie/doc.go:
--------------------------------------------------------------------------------
1 | // Package pie provides a toolkit for creating plugins for Go applications.
2 | //
3 | // Plugins using this toolkit and the applications managing those plugins
4 | // communicate via RPC over the plugin application's Stdin and Stdout.
5 | //
6 | // Functions in this package with the prefix New are intended to be used by the
7 | // plugin to set up its end of the communication. Functions in this package
8 | // with the prefix Start are intended to be used by the main application to set
9 | // up its end of the communication and run a plugin executable.
10 | //
11 | // This package provides two conceptually different types of plugins, based on
12 | // which side of the communication is the server and which is the client.
13 | // Plugins which provide an API server for the main application to call are
14 | // called Providers. Plugins which consume an API provided by the main
15 | // application are called Consumers.
16 | //
17 | // The default codec for RPC for this package is Go's gob encoding, however you
18 | // may provide your own codec, such as JSON-RPC provided by net/rpc/jsonrpc.
19 | //
20 | // There is no requirement that plugins for applications using this toolkit be
21 | // written in Go. As long as the plugin application can consume or provide an
22 | // RPC API of the correct codec, it can interoperate with main applications
23 | // using this process. For example, if your main application uses JSON-RPC,
24 | // many languages are capable of producing an executable that can provide a
25 | // JSON-RPC API for your application to use.
26 | //
27 | // Included in this repo are some simple examples of a master process and a
28 | // plugin process, to see how the library can be used. An example of the
29 | // standard plugin that provides an API the master process consumes is in the
30 | // exmaples/provider directory. master_provider expects plugin_provider to be
31 | // in the same directory or in your $PATH. You can just go install both of
32 | // them, and it'll work correctly.
33 |
34 | // In addition to a regular plugin that provides an API, this package can be
35 | // used for plugins that consume an API provided by the main process. To see an
36 | // example of this, look in the examples/consumer folder.
37 | package pie
38 |
--------------------------------------------------------------------------------
/vendor/github.com/natefinch/pie/pie.go:
--------------------------------------------------------------------------------
1 | package pie
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "io"
7 | "net/rpc"
8 | "os"
9 | "os/exec"
10 | "time"
11 | )
12 |
13 | var errProcStopTimeout = errors.New("process killed after timeout waiting for process to stop")
14 |
15 | // NewProvider returns a Server that will serve RPC over this
16 | // application's Stdin and Stdout. This method is intended to be run by the
17 | // plugin application.
18 | func NewProvider() Server {
19 | return Server{
20 | server: rpc.NewServer(),
21 | rwc: rwCloser{os.Stdin, os.Stdout},
22 | }
23 | }
24 |
25 | // Server is a type that represents an RPC server that serves an API over
26 | // stdin/stdout.
27 | type Server struct {
28 | server *rpc.Server
29 | rwc io.ReadWriteCloser
30 | codec rpc.ServerCodec
31 | }
32 |
33 | // Close closes the connection with the client. If the client is a plugin
34 | // process, the process will be stopped. Further communication using this
35 | // Server will fail.
36 | func (s Server) Close() error {
37 | if s.codec != nil {
38 | return s.codec.Close()
39 | }
40 | return s.rwc.Close()
41 | }
42 |
43 | // Serve starts the Server's RPC server, serving via gob encoding. This call
44 | // will block until the client hangs up.
45 | func (s Server) Serve() {
46 | s.server.ServeConn(s.rwc)
47 | }
48 |
49 | // ServeCodec starts the Server's RPC server, serving via the encoding returned
50 | // by f. This call will block until the client hangs up.
51 | func (s Server) ServeCodec(f func(io.ReadWriteCloser) rpc.ServerCodec) {
52 | s.server.ServeCodec(f(s.rwc))
53 | }
54 |
55 | // Register publishes in the provider the set of methods of the receiver value
56 | // that satisfy the following conditions:
57 | //
58 | // - exported method
59 | // - two arguments, both of exported type
60 | // - the second argument is a pointer
61 | // - one return value, of type error
62 | //
63 | // It returns an error if the receiver is not an exported type or has no
64 | // suitable methods. It also logs the error using package log. The client
65 | // accesses each method using a string of the form "Type.Method", where Type is
66 | // the receiver's concrete type.
67 | func (s Server) Register(rcvr interface{}) error {
68 | return s.server.Register(rcvr)
69 | }
70 |
71 | // RegisterName is like Register but uses the provided name for the type
72 | // instead of the receiver's concrete type.
73 | func (s Server) RegisterName(name string, rcvr interface{}) error {
74 | return s.server.RegisterName(name, rcvr)
75 | }
76 |
77 | // StartProvider start a provider-style plugin application at the given path and
78 | // args, and returns an RPC client that communicates with the plugin using gob
79 | // encoding over the plugin's Stdin and Stdout. The writer passed to output
80 | // will receive output from the plugin's stderr. Closing the RPC client
81 | // returned from this function will shut down the plugin application.
82 | func StartProvider(output io.Writer, path string, args ...string) (*rpc.Client, error) {
83 | pipe, err := start(makeCommand(output, path, args))
84 | if err != nil {
85 | return nil, err
86 | }
87 | return rpc.NewClient(pipe), nil
88 | }
89 |
90 | // StartProviderCodec starts a provider-style plugin application at the given
91 | // path and args, and returns an RPC client that communicates with the plugin
92 | // using the ClientCodec returned by f over the plugin's Stdin and Stdout. The
93 | // writer passed to output will receive output from the plugin's stderr.
94 | // Closing the RPC client returned from this function will shut down the plugin
95 | // application.
96 | func StartProviderCodec(
97 | f func(io.ReadWriteCloser) rpc.ClientCodec,
98 | output io.Writer,
99 | path string,
100 | args ...string,
101 | ) (*rpc.Client, error) {
102 | pipe, err := start(makeCommand(output, path, args))
103 | if err != nil {
104 | return nil, err
105 | }
106 | return rpc.NewClientWithCodec(f(pipe)), nil
107 | }
108 |
109 | // StartConsumer starts a consumer-style plugin application with the given path
110 | // and args, writing its stderr to output. The plugin consumes an API this
111 | // application provides. The function returns the Server for this host
112 | // application, which should be used to register APIs for the plugin to consume.
113 | func StartConsumer(output io.Writer, path string, args ...string) (Server, error) {
114 | pipe, err := start(makeCommand(output, path, args))
115 | if err != nil {
116 | return Server{}, err
117 | }
118 | return Server{
119 | server: rpc.NewServer(),
120 | rwc: pipe,
121 | }, nil
122 | }
123 |
124 | // NewConsumer returns an rpc.Client that will consume an API from the host
125 | // process over this application's Stdin and Stdout using gob encoding.
126 | func NewConsumer() *rpc.Client {
127 | return rpc.NewClient(rwCloser{os.Stdin, os.Stdout})
128 | }
129 |
130 | // NewConsumerCodec returns an rpc.Client that will consume an API from the host
131 | // process over this application's Stdin and Stdout using the ClientCodec
132 | // returned by f.
133 | func NewConsumerCodec(f func(io.ReadWriteCloser) rpc.ClientCodec) *rpc.Client {
134 | return rpc.NewClientWithCodec(f(rwCloser{os.Stdin, os.Stdout}))
135 | }
136 |
137 | // start runs the plugin and returns an ioPipe that can be used to control the
138 | // plugin.
139 | func start(cmd commander) (_ ioPipe, err error) {
140 | in, err := cmd.StdinPipe()
141 | if err != nil {
142 | return ioPipe{}, err
143 | }
144 | defer func() {
145 | if err != nil {
146 | in.Close()
147 | }
148 | }()
149 | out, err := cmd.StdoutPipe()
150 | if err != nil {
151 | return ioPipe{}, err
152 | }
153 | defer func() {
154 | if err != nil {
155 | out.Close()
156 | }
157 | }()
158 |
159 | proc, err := cmd.Start()
160 | if err != nil {
161 | return ioPipe{}, err
162 | }
163 | return ioPipe{out, in, proc}, nil
164 | }
165 |
166 | // makeCommand is a function that just creates an exec.Cmd and the process in
167 | // it. It exists to facilitate testing.
168 | var makeCommand = func(w io.Writer, path string, args []string) commander {
169 | cmd := exec.Command(path, args...)
170 | cmd.Stderr = w
171 | return execCmd{cmd}
172 | }
173 |
174 | type execCmd struct {
175 | *exec.Cmd
176 | }
177 |
178 | func (e execCmd) Start() (osProcess, error) {
179 | if err := e.Cmd.Start(); err != nil {
180 | return nil, err
181 | }
182 | return e.Cmd.Process, nil
183 | }
184 |
185 | // commander is an interface that is fulfilled by exec.Cmd and makes our testing
186 | // a little easier.
187 | type commander interface {
188 | StdinPipe() (io.WriteCloser, error)
189 | StdoutPipe() (io.ReadCloser, error)
190 | // Start is like exec.Cmd's start, except it also returns the os.Process if
191 | // start succeeds.
192 | Start() (osProcess, error)
193 | }
194 |
195 | // osProcess is an interface that is fullfilled by *os.Process and makes our
196 | // testing a little easier.
197 | type osProcess interface {
198 | Wait() (*os.ProcessState, error)
199 | Kill() error
200 | Signal(os.Signal) error
201 | }
202 |
203 | // ioPipe simply wraps a ReadCloser, WriteCloser, and a Process, and coordinates
204 | // them so they all close together.
205 | type ioPipe struct {
206 | io.ReadCloser
207 | io.WriteCloser
208 | proc osProcess
209 | }
210 |
211 | // Close closes the pipe's WriteCloser, ReadClosers, and process.
212 | func (iop ioPipe) Close() error {
213 | err := iop.ReadCloser.Close()
214 | if writeErr := iop.WriteCloser.Close(); writeErr != nil {
215 | err = writeErr
216 | }
217 | if procErr := iop.closeProc(); procErr != nil {
218 | err = procErr
219 | }
220 | return err
221 | }
222 |
223 | // procTimeout is the timeout to wait for a process to stop after being
224 | // signalled. It is adjustable to keep tests fast.
225 | var procTimeout = time.Second
226 |
227 | // closeProc sends an interrupt signal to the pipe's process, and if it doesn't
228 | // respond in one second, kills the process.
229 | func (iop ioPipe) closeProc() error {
230 | result := make(chan error, 1)
231 | go func() { _, err := iop.proc.Wait(); result <- err }()
232 | if err := iop.proc.Signal(os.Interrupt); err != nil {
233 | return err
234 | }
235 | select {
236 | case err := <-result:
237 | return err
238 | case <-time.After(procTimeout):
239 | if err := iop.proc.Kill(); err != nil {
240 | return fmt.Errorf("error killing process after timeout: %s", err)
241 | }
242 | return errProcStopTimeout
243 | }
244 | }
245 |
246 | // rwCloser just merges a ReadCloser and a WriteCloser into a ReadWriteCloser.
247 | type rwCloser struct {
248 | io.ReadCloser
249 | io.WriteCloser
250 | }
251 |
252 | // Close closes both the ReadCloser and the WriteCloser, returning the last
253 | // error from either.
254 | func (rw rwCloser) Close() error {
255 | err := rw.ReadCloser.Close()
256 | if err := rw.WriteCloser.Close(); err != nil {
257 | return err
258 | }
259 | return err
260 | }
261 |
--------------------------------------------------------------------------------
/vendor/github.com/nfnt/resize/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 |
3 | go:
4 | - "1.x"
5 | - "1.1"
6 | - "1.4"
7 | - "1.10"
8 |
--------------------------------------------------------------------------------
/vendor/github.com/nfnt/resize/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2012, Jan Schlicht
2 |
3 | Permission to use, copy, modify, and/or distribute this software for any purpose
4 | with or without fee is hereby granted, provided that the above copyright notice
5 | and this permission notice appear in all copies.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
8 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
9 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
10 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
11 | OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
12 | TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
13 | THIS SOFTWARE.
14 |
--------------------------------------------------------------------------------
/vendor/github.com/nfnt/resize/README.md:
--------------------------------------------------------------------------------
1 | # This package is no longer being updated! Please look for alternatives if that bothers you.
2 |
3 | Resize
4 | ======
5 |
6 | Image resizing for the [Go programming language](http://golang.org) with common interpolation methods.
7 |
8 | [](https://travis-ci.org/nfnt/resize)
9 |
10 | Installation
11 | ------------
12 |
13 | ```bash
14 | $ go get github.com/nfnt/resize
15 | ```
16 |
17 | It's that easy!
18 |
19 | Usage
20 | -----
21 |
22 | This package needs at least Go 1.1. Import package with
23 |
24 | ```go
25 | import "github.com/nfnt/resize"
26 | ```
27 |
28 | The resize package provides 2 functions:
29 |
30 | * `resize.Resize` creates a scaled image with new dimensions (`width`, `height`) using the interpolation function `interp`.
31 | If either `width` or `height` is set to 0, it will be set to an aspect ratio preserving value.
32 | * `resize.Thumbnail` downscales an image preserving its aspect ratio to the maximum dimensions (`maxWidth`, `maxHeight`).
33 | It will return the original image if original sizes are smaller than the provided dimensions.
34 |
35 | ```go
36 | resize.Resize(width, height uint, img image.Image, interp resize.InterpolationFunction) image.Image
37 | resize.Thumbnail(maxWidth, maxHeight uint, img image.Image, interp resize.InterpolationFunction) image.Image
38 | ```
39 |
40 | The provided interpolation functions are (from fast to slow execution time)
41 |
42 | - `NearestNeighbor`: [Nearest-neighbor interpolation](http://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
43 | - `Bilinear`: [Bilinear interpolation](http://en.wikipedia.org/wiki/Bilinear_interpolation)
44 | - `Bicubic`: [Bicubic interpolation](http://en.wikipedia.org/wiki/Bicubic_interpolation)
45 | - `MitchellNetravali`: [Mitchell-Netravali interpolation](http://dl.acm.org/citation.cfm?id=378514)
46 | - `Lanczos2`: [Lanczos resampling](http://en.wikipedia.org/wiki/Lanczos_resampling) with a=2
47 | - `Lanczos3`: [Lanczos resampling](http://en.wikipedia.org/wiki/Lanczos_resampling) with a=3
48 |
49 | Which of these methods gives the best results depends on your use case.
50 |
51 | Sample usage:
52 |
53 | ```go
54 | package main
55 |
56 | import (
57 | "github.com/nfnt/resize"
58 | "image/jpeg"
59 | "log"
60 | "os"
61 | )
62 |
63 | func main() {
64 | // open "test.jpg"
65 | file, err := os.Open("test.jpg")
66 | if err != nil {
67 | log.Fatal(err)
68 | }
69 |
70 | // decode jpeg into image.Image
71 | img, err := jpeg.Decode(file)
72 | if err != nil {
73 | log.Fatal(err)
74 | }
75 | file.Close()
76 |
77 | // resize to width 1000 using Lanczos resampling
78 | // and preserve aspect ratio
79 | m := resize.Resize(1000, 0, img, resize.Lanczos3)
80 |
81 | out, err := os.Create("test_resized.jpg")
82 | if err != nil {
83 | log.Fatal(err)
84 | }
85 | defer out.Close()
86 |
87 | // write new image to file
88 | jpeg.Encode(out, m, nil)
89 | }
90 | ```
91 |
92 | Caveats
93 | -------
94 |
95 | * Optimized access routines are used for `image.RGBA`, `image.NRGBA`, `image.RGBA64`, `image.NRGBA64`, `image.YCbCr`, `image.Gray`, and `image.Gray16` types. All other image types are accessed in a generic way that will result in slow processing speed.
96 | * JPEG images are stored in `image.YCbCr`. This image format stores data in a way that will decrease processing speed. A resize may be up to 2 times slower than with `image.RGBA`.
97 |
98 |
99 | Downsizing Samples
100 | -------
101 |
102 | Downsizing is not as simple as it might look like. Images have to be filtered before they are scaled down, otherwise aliasing might occur.
103 | Filtering is highly subjective: Applying too much will blur the whole image, too little will make aliasing become apparent.
104 | Resize tries to provide sane defaults that should suffice in most cases.
105 |
106 | ### Artificial sample
107 |
108 | Original image
109 | 
110 |
111 |
112 |
113 |  Nearest-Neighbor |
114 |  Bilinear |
115 |
116 |
117 |  Bicubic |
118 |  Mitchell-Netravali |
119 |
120 |
121 |  Lanczos2 |
122 |  Lanczos3 |
123 |
124 |
125 |
126 | ### Real-Life sample
127 |
128 | Original image
129 | 
130 |
131 |
132 |
133 |  Nearest-Neighbor |
134 |  Bilinear |
135 |
136 |
137 |  Bicubic |
138 |  Mitchell-Netravali |
139 |
140 |
141 |  Lanczos2 |
142 |  Lanczos3 |
143 |
144 |
145 |
146 |
147 | License
148 | -------
149 |
150 | Copyright (c) 2012 Jan Schlicht
151 | Resize is released under a MIT style license.
152 |
--------------------------------------------------------------------------------
/vendor/github.com/nfnt/resize/converter.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright (c) 2012, Jan Schlicht
3 |
4 | Permission to use, copy, modify, and/or distribute this software for any purpose
5 | with or without fee is hereby granted, provided that the above copyright notice
6 | and this permission notice appear in all copies.
7 |
8 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
10 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
12 | OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
13 | TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
14 | THIS SOFTWARE.
15 | */
16 |
17 | package resize
18 |
19 | import "image"
20 |
21 | // Keep value in [0,255] range.
22 | func clampUint8(in int32) uint8 {
23 | // casting a negative int to an uint will result in an overflown
24 | // large uint. this behavior will be exploited here and in other functions
25 | // to achieve a higher performance.
26 | if uint32(in) < 256 {
27 | return uint8(in)
28 | }
29 | if in > 255 {
30 | return 255
31 | }
32 | return 0
33 | }
34 |
35 | // Keep value in [0,65535] range.
36 | func clampUint16(in int64) uint16 {
37 | if uint64(in) < 65536 {
38 | return uint16(in)
39 | }
40 | if in > 65535 {
41 | return 65535
42 | }
43 | return 0
44 | }
45 |
46 | func resizeGeneric(in image.Image, out *image.RGBA64, scale float64, coeffs []int32, offset []int, filterLength int) {
47 | newBounds := out.Bounds()
48 | maxX := in.Bounds().Dx() - 1
49 |
50 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
51 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
52 | var rgba [4]int64
53 | var sum int64
54 | start := offset[y]
55 | ci := y * filterLength
56 | for i := 0; i < filterLength; i++ {
57 | coeff := coeffs[ci+i]
58 | if coeff != 0 {
59 | xi := start + i
60 | switch {
61 | case xi < 0:
62 | xi = 0
63 | case xi >= maxX:
64 | xi = maxX
65 | }
66 |
67 | r, g, b, a := in.At(xi+in.Bounds().Min.X, x+in.Bounds().Min.Y).RGBA()
68 |
69 | rgba[0] += int64(coeff) * int64(r)
70 | rgba[1] += int64(coeff) * int64(g)
71 | rgba[2] += int64(coeff) * int64(b)
72 | rgba[3] += int64(coeff) * int64(a)
73 | sum += int64(coeff)
74 | }
75 | }
76 |
77 | offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
78 |
79 | value := clampUint16(rgba[0] / sum)
80 | out.Pix[offset+0] = uint8(value >> 8)
81 | out.Pix[offset+1] = uint8(value)
82 | value = clampUint16(rgba[1] / sum)
83 | out.Pix[offset+2] = uint8(value >> 8)
84 | out.Pix[offset+3] = uint8(value)
85 | value = clampUint16(rgba[2] / sum)
86 | out.Pix[offset+4] = uint8(value >> 8)
87 | out.Pix[offset+5] = uint8(value)
88 | value = clampUint16(rgba[3] / sum)
89 | out.Pix[offset+6] = uint8(value >> 8)
90 | out.Pix[offset+7] = uint8(value)
91 | }
92 | }
93 | }
94 |
95 | func resizeRGBA(in *image.RGBA, out *image.RGBA, scale float64, coeffs []int16, offset []int, filterLength int) {
96 | newBounds := out.Bounds()
97 | maxX := in.Bounds().Dx() - 1
98 |
99 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
100 | row := in.Pix[x*in.Stride:]
101 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
102 | var rgba [4]int32
103 | var sum int32
104 | start := offset[y]
105 | ci := y * filterLength
106 | for i := 0; i < filterLength; i++ {
107 | coeff := coeffs[ci+i]
108 | if coeff != 0 {
109 | xi := start + i
110 | switch {
111 | case uint(xi) < uint(maxX):
112 | xi *= 4
113 | case xi >= maxX:
114 | xi = 4 * maxX
115 | default:
116 | xi = 0
117 | }
118 |
119 | rgba[0] += int32(coeff) * int32(row[xi+0])
120 | rgba[1] += int32(coeff) * int32(row[xi+1])
121 | rgba[2] += int32(coeff) * int32(row[xi+2])
122 | rgba[3] += int32(coeff) * int32(row[xi+3])
123 | sum += int32(coeff)
124 | }
125 | }
126 |
127 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4
128 |
129 | out.Pix[xo+0] = clampUint8(rgba[0] / sum)
130 | out.Pix[xo+1] = clampUint8(rgba[1] / sum)
131 | out.Pix[xo+2] = clampUint8(rgba[2] / sum)
132 | out.Pix[xo+3] = clampUint8(rgba[3] / sum)
133 | }
134 | }
135 | }
136 |
137 | func resizeNRGBA(in *image.NRGBA, out *image.RGBA, scale float64, coeffs []int16, offset []int, filterLength int) {
138 | newBounds := out.Bounds()
139 | maxX := in.Bounds().Dx() - 1
140 |
141 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
142 | row := in.Pix[x*in.Stride:]
143 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
144 | var rgba [4]int32
145 | var sum int32
146 | start := offset[y]
147 | ci := y * filterLength
148 | for i := 0; i < filterLength; i++ {
149 | coeff := coeffs[ci+i]
150 | if coeff != 0 {
151 | xi := start + i
152 | switch {
153 | case uint(xi) < uint(maxX):
154 | xi *= 4
155 | case xi >= maxX:
156 | xi = 4 * maxX
157 | default:
158 | xi = 0
159 | }
160 |
161 | // Forward alpha-premultiplication
162 | a := int32(row[xi+3])
163 | r := int32(row[xi+0]) * a
164 | r /= 0xff
165 | g := int32(row[xi+1]) * a
166 | g /= 0xff
167 | b := int32(row[xi+2]) * a
168 | b /= 0xff
169 |
170 | rgba[0] += int32(coeff) * r
171 | rgba[1] += int32(coeff) * g
172 | rgba[2] += int32(coeff) * b
173 | rgba[3] += int32(coeff) * a
174 | sum += int32(coeff)
175 | }
176 | }
177 |
178 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4
179 |
180 | out.Pix[xo+0] = clampUint8(rgba[0] / sum)
181 | out.Pix[xo+1] = clampUint8(rgba[1] / sum)
182 | out.Pix[xo+2] = clampUint8(rgba[2] / sum)
183 | out.Pix[xo+3] = clampUint8(rgba[3] / sum)
184 | }
185 | }
186 | }
187 |
188 | func resizeRGBA64(in *image.RGBA64, out *image.RGBA64, scale float64, coeffs []int32, offset []int, filterLength int) {
189 | newBounds := out.Bounds()
190 | maxX := in.Bounds().Dx() - 1
191 |
192 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
193 | row := in.Pix[x*in.Stride:]
194 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
195 | var rgba [4]int64
196 | var sum int64
197 | start := offset[y]
198 | ci := y * filterLength
199 | for i := 0; i < filterLength; i++ {
200 | coeff := coeffs[ci+i]
201 | if coeff != 0 {
202 | xi := start + i
203 | switch {
204 | case uint(xi) < uint(maxX):
205 | xi *= 8
206 | case xi >= maxX:
207 | xi = 8 * maxX
208 | default:
209 | xi = 0
210 | }
211 |
212 | rgba[0] += int64(coeff) * (int64(row[xi+0])<<8 | int64(row[xi+1]))
213 | rgba[1] += int64(coeff) * (int64(row[xi+2])<<8 | int64(row[xi+3]))
214 | rgba[2] += int64(coeff) * (int64(row[xi+4])<<8 | int64(row[xi+5]))
215 | rgba[3] += int64(coeff) * (int64(row[xi+6])<<8 | int64(row[xi+7]))
216 | sum += int64(coeff)
217 | }
218 | }
219 |
220 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
221 |
222 | value := clampUint16(rgba[0] / sum)
223 | out.Pix[xo+0] = uint8(value >> 8)
224 | out.Pix[xo+1] = uint8(value)
225 | value = clampUint16(rgba[1] / sum)
226 | out.Pix[xo+2] = uint8(value >> 8)
227 | out.Pix[xo+3] = uint8(value)
228 | value = clampUint16(rgba[2] / sum)
229 | out.Pix[xo+4] = uint8(value >> 8)
230 | out.Pix[xo+5] = uint8(value)
231 | value = clampUint16(rgba[3] / sum)
232 | out.Pix[xo+6] = uint8(value >> 8)
233 | out.Pix[xo+7] = uint8(value)
234 | }
235 | }
236 | }
237 |
238 | func resizeNRGBA64(in *image.NRGBA64, out *image.RGBA64, scale float64, coeffs []int32, offset []int, filterLength int) {
239 | newBounds := out.Bounds()
240 | maxX := in.Bounds().Dx() - 1
241 |
242 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
243 | row := in.Pix[x*in.Stride:]
244 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
245 | var rgba [4]int64
246 | var sum int64
247 | start := offset[y]
248 | ci := y * filterLength
249 | for i := 0; i < filterLength; i++ {
250 | coeff := coeffs[ci+i]
251 | if coeff != 0 {
252 | xi := start + i
253 | switch {
254 | case uint(xi) < uint(maxX):
255 | xi *= 8
256 | case xi >= maxX:
257 | xi = 8 * maxX
258 | default:
259 | xi = 0
260 | }
261 |
262 | // Forward alpha-premultiplication
263 | a := int64(uint16(row[xi+6])<<8 | uint16(row[xi+7]))
264 | r := int64(uint16(row[xi+0])<<8|uint16(row[xi+1])) * a
265 | r /= 0xffff
266 | g := int64(uint16(row[xi+2])<<8|uint16(row[xi+3])) * a
267 | g /= 0xffff
268 | b := int64(uint16(row[xi+4])<<8|uint16(row[xi+5])) * a
269 | b /= 0xffff
270 |
271 | rgba[0] += int64(coeff) * r
272 | rgba[1] += int64(coeff) * g
273 | rgba[2] += int64(coeff) * b
274 | rgba[3] += int64(coeff) * a
275 | sum += int64(coeff)
276 | }
277 | }
278 |
279 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
280 |
281 | value := clampUint16(rgba[0] / sum)
282 | out.Pix[xo+0] = uint8(value >> 8)
283 | out.Pix[xo+1] = uint8(value)
284 | value = clampUint16(rgba[1] / sum)
285 | out.Pix[xo+2] = uint8(value >> 8)
286 | out.Pix[xo+3] = uint8(value)
287 | value = clampUint16(rgba[2] / sum)
288 | out.Pix[xo+4] = uint8(value >> 8)
289 | out.Pix[xo+5] = uint8(value)
290 | value = clampUint16(rgba[3] / sum)
291 | out.Pix[xo+6] = uint8(value >> 8)
292 | out.Pix[xo+7] = uint8(value)
293 | }
294 | }
295 | }
296 |
297 | func resizeGray(in *image.Gray, out *image.Gray, scale float64, coeffs []int16, offset []int, filterLength int) {
298 | newBounds := out.Bounds()
299 | maxX := in.Bounds().Dx() - 1
300 |
301 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
302 | row := in.Pix[(x-newBounds.Min.X)*in.Stride:]
303 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
304 | var gray int32
305 | var sum int32
306 | start := offset[y]
307 | ci := y * filterLength
308 | for i := 0; i < filterLength; i++ {
309 | coeff := coeffs[ci+i]
310 | if coeff != 0 {
311 | xi := start + i
312 | switch {
313 | case xi < 0:
314 | xi = 0
315 | case xi >= maxX:
316 | xi = maxX
317 | }
318 | gray += int32(coeff) * int32(row[xi])
319 | sum += int32(coeff)
320 | }
321 | }
322 |
323 | offset := (y-newBounds.Min.Y)*out.Stride + (x - newBounds.Min.X)
324 | out.Pix[offset] = clampUint8(gray / sum)
325 | }
326 | }
327 | }
328 |
329 | func resizeGray16(in *image.Gray16, out *image.Gray16, scale float64, coeffs []int32, offset []int, filterLength int) {
330 | newBounds := out.Bounds()
331 | maxX := in.Bounds().Dx() - 1
332 |
333 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
334 | row := in.Pix[x*in.Stride:]
335 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
336 | var gray int64
337 | var sum int64
338 | start := offset[y]
339 | ci := y * filterLength
340 | for i := 0; i < filterLength; i++ {
341 | coeff := coeffs[ci+i]
342 | if coeff != 0 {
343 | xi := start + i
344 | switch {
345 | case uint(xi) < uint(maxX):
346 | xi *= 2
347 | case xi >= maxX:
348 | xi = 2 * maxX
349 | default:
350 | xi = 0
351 | }
352 | gray += int64(coeff) * int64(uint16(row[xi+0])<<8|uint16(row[xi+1]))
353 | sum += int64(coeff)
354 | }
355 | }
356 |
357 | offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*2
358 | value := clampUint16(gray / sum)
359 | out.Pix[offset+0] = uint8(value >> 8)
360 | out.Pix[offset+1] = uint8(value)
361 | }
362 | }
363 | }
364 |
365 | func resizeYCbCr(in *ycc, out *ycc, scale float64, coeffs []int16, offset []int, filterLength int) {
366 | newBounds := out.Bounds()
367 | maxX := in.Bounds().Dx() - 1
368 |
369 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
370 | row := in.Pix[x*in.Stride:]
371 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
372 | var p [3]int32
373 | var sum int32
374 | start := offset[y]
375 | ci := y * filterLength
376 | for i := 0; i < filterLength; i++ {
377 | coeff := coeffs[ci+i]
378 | if coeff != 0 {
379 | xi := start + i
380 | switch {
381 | case uint(xi) < uint(maxX):
382 | xi *= 3
383 | case xi >= maxX:
384 | xi = 3 * maxX
385 | default:
386 | xi = 0
387 | }
388 | p[0] += int32(coeff) * int32(row[xi+0])
389 | p[1] += int32(coeff) * int32(row[xi+1])
390 | p[2] += int32(coeff) * int32(row[xi+2])
391 | sum += int32(coeff)
392 | }
393 | }
394 |
395 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*3
396 | out.Pix[xo+0] = clampUint8(p[0] / sum)
397 | out.Pix[xo+1] = clampUint8(p[1] / sum)
398 | out.Pix[xo+2] = clampUint8(p[2] / sum)
399 | }
400 | }
401 | }
402 |
403 | func nearestYCbCr(in *ycc, out *ycc, scale float64, coeffs []bool, offset []int, filterLength int) {
404 | newBounds := out.Bounds()
405 | maxX := in.Bounds().Dx() - 1
406 |
407 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
408 | row := in.Pix[x*in.Stride:]
409 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
410 | var p [3]float32
411 | var sum float32
412 | start := offset[y]
413 | ci := y * filterLength
414 | for i := 0; i < filterLength; i++ {
415 | if coeffs[ci+i] {
416 | xi := start + i
417 | switch {
418 | case uint(xi) < uint(maxX):
419 | xi *= 3
420 | case xi >= maxX:
421 | xi = 3 * maxX
422 | default:
423 | xi = 0
424 | }
425 | p[0] += float32(row[xi+0])
426 | p[1] += float32(row[xi+1])
427 | p[2] += float32(row[xi+2])
428 | sum++
429 | }
430 | }
431 |
432 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*3
433 | out.Pix[xo+0] = floatToUint8(p[0] / sum)
434 | out.Pix[xo+1] = floatToUint8(p[1] / sum)
435 | out.Pix[xo+2] = floatToUint8(p[2] / sum)
436 | }
437 | }
438 | }
439 |
--------------------------------------------------------------------------------
/vendor/github.com/nfnt/resize/filters.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright (c) 2012, Jan Schlicht
3 |
4 | Permission to use, copy, modify, and/or distribute this software for any purpose
5 | with or without fee is hereby granted, provided that the above copyright notice
6 | and this permission notice appear in all copies.
7 |
8 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
10 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
12 | OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
13 | TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
14 | THIS SOFTWARE.
15 | */
16 |
17 | package resize
18 |
19 | import (
20 | "math"
21 | )
22 |
23 | func nearest(in float64) float64 {
24 | if in >= -0.5 && in < 0.5 {
25 | return 1
26 | }
27 | return 0
28 | }
29 |
30 | func linear(in float64) float64 {
31 | in = math.Abs(in)
32 | if in <= 1 {
33 | return 1 - in
34 | }
35 | return 0
36 | }
37 |
38 | func cubic(in float64) float64 {
39 | in = math.Abs(in)
40 | if in <= 1 {
41 | return in*in*(1.5*in-2.5) + 1.0
42 | }
43 | if in <= 2 {
44 | return in*(in*(2.5-0.5*in)-4.0) + 2.0
45 | }
46 | return 0
47 | }
48 |
49 | func mitchellnetravali(in float64) float64 {
50 | in = math.Abs(in)
51 | if in <= 1 {
52 | return (7.0*in*in*in - 12.0*in*in + 5.33333333333) * 0.16666666666
53 | }
54 | if in <= 2 {
55 | return (-2.33333333333*in*in*in + 12.0*in*in - 20.0*in + 10.6666666667) * 0.16666666666
56 | }
57 | return 0
58 | }
59 |
60 | func sinc(x float64) float64 {
61 | x = math.Abs(x) * math.Pi
62 | if x >= 1.220703e-4 {
63 | return math.Sin(x) / x
64 | }
65 | return 1
66 | }
67 |
68 | func lanczos2(in float64) float64 {
69 | if in > -2 && in < 2 {
70 | return sinc(in) * sinc(in*0.5)
71 | }
72 | return 0
73 | }
74 |
75 | func lanczos3(in float64) float64 {
76 | if in > -3 && in < 3 {
77 | return sinc(in) * sinc(in*0.3333333333333333)
78 | }
79 | return 0
80 | }
81 |
82 | // range [-256,256]
83 | func createWeights8(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int16, []int, int) {
84 | filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
85 | filterFactor := math.Min(1./(blur*scale), 1)
86 |
87 | coeffs := make([]int16, dy*filterLength)
88 | start := make([]int, dy)
89 | for y := 0; y < dy; y++ {
90 | interpX := scale*(float64(y)+0.5) - 0.5
91 | start[y] = int(interpX) - filterLength/2 + 1
92 | interpX -= float64(start[y])
93 | for i := 0; i < filterLength; i++ {
94 | in := (interpX - float64(i)) * filterFactor
95 | coeffs[y*filterLength+i] = int16(kernel(in) * 256)
96 | }
97 | }
98 |
99 | return coeffs, start, filterLength
100 | }
101 |
102 | // range [-65536,65536]
103 | func createWeights16(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int32, []int, int) {
104 | filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
105 | filterFactor := math.Min(1./(blur*scale), 1)
106 |
107 | coeffs := make([]int32, dy*filterLength)
108 | start := make([]int, dy)
109 | for y := 0; y < dy; y++ {
110 | interpX := scale*(float64(y)+0.5) - 0.5
111 | start[y] = int(interpX) - filterLength/2 + 1
112 | interpX -= float64(start[y])
113 | for i := 0; i < filterLength; i++ {
114 | in := (interpX - float64(i)) * filterFactor
115 | coeffs[y*filterLength+i] = int32(kernel(in) * 65536)
116 | }
117 | }
118 |
119 | return coeffs, start, filterLength
120 | }
121 |
122 | func createWeightsNearest(dy, filterLength int, blur, scale float64) ([]bool, []int, int) {
123 | filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
124 | filterFactor := math.Min(1./(blur*scale), 1)
125 |
126 | coeffs := make([]bool, dy*filterLength)
127 | start := make([]int, dy)
128 | for y := 0; y < dy; y++ {
129 | interpX := scale*(float64(y)+0.5) - 0.5
130 | start[y] = int(interpX) - filterLength/2 + 1
131 | interpX -= float64(start[y])
132 | for i := 0; i < filterLength; i++ {
133 | in := (interpX - float64(i)) * filterFactor
134 | if in >= -0.5 && in < 0.5 {
135 | coeffs[y*filterLength+i] = true
136 | } else {
137 | coeffs[y*filterLength+i] = false
138 | }
139 | }
140 | }
141 |
142 | return coeffs, start, filterLength
143 | }
144 |
--------------------------------------------------------------------------------
/vendor/github.com/nfnt/resize/nearest.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright (c) 2014, Charlie Vieth
3 |
4 | Permission to use, copy, modify, and/or distribute this software for any purpose
5 | with or without fee is hereby granted, provided that the above copyright notice
6 | and this permission notice appear in all copies.
7 |
8 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
10 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
12 | OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
13 | TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
14 | THIS SOFTWARE.
15 | */
16 |
17 | package resize
18 |
19 | import "image"
20 |
21 | func floatToUint8(x float32) uint8 {
22 | // Nearest-neighbor values are always
23 | // positive no need to check lower-bound.
24 | if x > 0xfe {
25 | return 0xff
26 | }
27 | return uint8(x)
28 | }
29 |
30 | func floatToUint16(x float32) uint16 {
31 | if x > 0xfffe {
32 | return 0xffff
33 | }
34 | return uint16(x)
35 | }
36 |
37 | func nearestGeneric(in image.Image, out *image.RGBA64, scale float64, coeffs []bool, offset []int, filterLength int) {
38 | newBounds := out.Bounds()
39 | maxX := in.Bounds().Dx() - 1
40 |
41 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
42 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
43 | var rgba [4]float32
44 | var sum float32
45 | start := offset[y]
46 | ci := y * filterLength
47 | for i := 0; i < filterLength; i++ {
48 | if coeffs[ci+i] {
49 | xi := start + i
50 | switch {
51 | case xi < 0:
52 | xi = 0
53 | case xi >= maxX:
54 | xi = maxX
55 | }
56 | r, g, b, a := in.At(xi+in.Bounds().Min.X, x+in.Bounds().Min.Y).RGBA()
57 | rgba[0] += float32(r)
58 | rgba[1] += float32(g)
59 | rgba[2] += float32(b)
60 | rgba[3] += float32(a)
61 | sum++
62 | }
63 | }
64 |
65 | offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
66 | value := floatToUint16(rgba[0] / sum)
67 | out.Pix[offset+0] = uint8(value >> 8)
68 | out.Pix[offset+1] = uint8(value)
69 | value = floatToUint16(rgba[1] / sum)
70 | out.Pix[offset+2] = uint8(value >> 8)
71 | out.Pix[offset+3] = uint8(value)
72 | value = floatToUint16(rgba[2] / sum)
73 | out.Pix[offset+4] = uint8(value >> 8)
74 | out.Pix[offset+5] = uint8(value)
75 | value = floatToUint16(rgba[3] / sum)
76 | out.Pix[offset+6] = uint8(value >> 8)
77 | out.Pix[offset+7] = uint8(value)
78 | }
79 | }
80 | }
81 |
82 | func nearestRGBA(in *image.RGBA, out *image.RGBA, scale float64, coeffs []bool, offset []int, filterLength int) {
83 | newBounds := out.Bounds()
84 | maxX := in.Bounds().Dx() - 1
85 |
86 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
87 | row := in.Pix[x*in.Stride:]
88 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
89 | var rgba [4]float32
90 | var sum float32
91 | start := offset[y]
92 | ci := y * filterLength
93 | for i := 0; i < filterLength; i++ {
94 | if coeffs[ci+i] {
95 | xi := start + i
96 | switch {
97 | case uint(xi) < uint(maxX):
98 | xi *= 4
99 | case xi >= maxX:
100 | xi = 4 * maxX
101 | default:
102 | xi = 0
103 | }
104 | rgba[0] += float32(row[xi+0])
105 | rgba[1] += float32(row[xi+1])
106 | rgba[2] += float32(row[xi+2])
107 | rgba[3] += float32(row[xi+3])
108 | sum++
109 | }
110 | }
111 |
112 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4
113 | out.Pix[xo+0] = floatToUint8(rgba[0] / sum)
114 | out.Pix[xo+1] = floatToUint8(rgba[1] / sum)
115 | out.Pix[xo+2] = floatToUint8(rgba[2] / sum)
116 | out.Pix[xo+3] = floatToUint8(rgba[3] / sum)
117 | }
118 | }
119 | }
120 |
121 | func nearestNRGBA(in *image.NRGBA, out *image.NRGBA, scale float64, coeffs []bool, offset []int, filterLength int) {
122 | newBounds := out.Bounds()
123 | maxX := in.Bounds().Dx() - 1
124 |
125 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
126 | row := in.Pix[x*in.Stride:]
127 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
128 | var rgba [4]float32
129 | var sum float32
130 | start := offset[y]
131 | ci := y * filterLength
132 | for i := 0; i < filterLength; i++ {
133 | if coeffs[ci+i] {
134 | xi := start + i
135 | switch {
136 | case uint(xi) < uint(maxX):
137 | xi *= 4
138 | case xi >= maxX:
139 | xi = 4 * maxX
140 | default:
141 | xi = 0
142 | }
143 | rgba[0] += float32(row[xi+0])
144 | rgba[1] += float32(row[xi+1])
145 | rgba[2] += float32(row[xi+2])
146 | rgba[3] += float32(row[xi+3])
147 | sum++
148 | }
149 | }
150 |
151 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4
152 | out.Pix[xo+0] = floatToUint8(rgba[0] / sum)
153 | out.Pix[xo+1] = floatToUint8(rgba[1] / sum)
154 | out.Pix[xo+2] = floatToUint8(rgba[2] / sum)
155 | out.Pix[xo+3] = floatToUint8(rgba[3] / sum)
156 | }
157 | }
158 | }
159 |
160 | func nearestRGBA64(in *image.RGBA64, out *image.RGBA64, scale float64, coeffs []bool, offset []int, filterLength int) {
161 | newBounds := out.Bounds()
162 | maxX := in.Bounds().Dx() - 1
163 |
164 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
165 | row := in.Pix[x*in.Stride:]
166 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
167 | var rgba [4]float32
168 | var sum float32
169 | start := offset[y]
170 | ci := y * filterLength
171 | for i := 0; i < filterLength; i++ {
172 | if coeffs[ci+i] {
173 | xi := start + i
174 | switch {
175 | case uint(xi) < uint(maxX):
176 | xi *= 8
177 | case xi >= maxX:
178 | xi = 8 * maxX
179 | default:
180 | xi = 0
181 | }
182 | rgba[0] += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1]))
183 | rgba[1] += float32(uint16(row[xi+2])<<8 | uint16(row[xi+3]))
184 | rgba[2] += float32(uint16(row[xi+4])<<8 | uint16(row[xi+5]))
185 | rgba[3] += float32(uint16(row[xi+6])<<8 | uint16(row[xi+7]))
186 | sum++
187 | }
188 | }
189 |
190 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
191 | value := floatToUint16(rgba[0] / sum)
192 | out.Pix[xo+0] = uint8(value >> 8)
193 | out.Pix[xo+1] = uint8(value)
194 | value = floatToUint16(rgba[1] / sum)
195 | out.Pix[xo+2] = uint8(value >> 8)
196 | out.Pix[xo+3] = uint8(value)
197 | value = floatToUint16(rgba[2] / sum)
198 | out.Pix[xo+4] = uint8(value >> 8)
199 | out.Pix[xo+5] = uint8(value)
200 | value = floatToUint16(rgba[3] / sum)
201 | out.Pix[xo+6] = uint8(value >> 8)
202 | out.Pix[xo+7] = uint8(value)
203 | }
204 | }
205 | }
206 |
207 | func nearestNRGBA64(in *image.NRGBA64, out *image.NRGBA64, scale float64, coeffs []bool, offset []int, filterLength int) {
208 | newBounds := out.Bounds()
209 | maxX := in.Bounds().Dx() - 1
210 |
211 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
212 | row := in.Pix[x*in.Stride:]
213 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
214 | var rgba [4]float32
215 | var sum float32
216 | start := offset[y]
217 | ci := y * filterLength
218 | for i := 0; i < filterLength; i++ {
219 | if coeffs[ci+i] {
220 | xi := start + i
221 | switch {
222 | case uint(xi) < uint(maxX):
223 | xi *= 8
224 | case xi >= maxX:
225 | xi = 8 * maxX
226 | default:
227 | xi = 0
228 | }
229 | rgba[0] += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1]))
230 | rgba[1] += float32(uint16(row[xi+2])<<8 | uint16(row[xi+3]))
231 | rgba[2] += float32(uint16(row[xi+4])<<8 | uint16(row[xi+5]))
232 | rgba[3] += float32(uint16(row[xi+6])<<8 | uint16(row[xi+7]))
233 | sum++
234 | }
235 | }
236 |
237 | xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
238 | value := floatToUint16(rgba[0] / sum)
239 | out.Pix[xo+0] = uint8(value >> 8)
240 | out.Pix[xo+1] = uint8(value)
241 | value = floatToUint16(rgba[1] / sum)
242 | out.Pix[xo+2] = uint8(value >> 8)
243 | out.Pix[xo+3] = uint8(value)
244 | value = floatToUint16(rgba[2] / sum)
245 | out.Pix[xo+4] = uint8(value >> 8)
246 | out.Pix[xo+5] = uint8(value)
247 | value = floatToUint16(rgba[3] / sum)
248 | out.Pix[xo+6] = uint8(value >> 8)
249 | out.Pix[xo+7] = uint8(value)
250 | }
251 | }
252 | }
253 |
254 | func nearestGray(in *image.Gray, out *image.Gray, scale float64, coeffs []bool, offset []int, filterLength int) {
255 | newBounds := out.Bounds()
256 | maxX := in.Bounds().Dx() - 1
257 |
258 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
259 | row := in.Pix[x*in.Stride:]
260 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
261 | var gray float32
262 | var sum float32
263 | start := offset[y]
264 | ci := y * filterLength
265 | for i := 0; i < filterLength; i++ {
266 | if coeffs[ci+i] {
267 | xi := start + i
268 | switch {
269 | case xi < 0:
270 | xi = 0
271 | case xi >= maxX:
272 | xi = maxX
273 | }
274 | gray += float32(row[xi])
275 | sum++
276 | }
277 | }
278 |
279 | offset := (y-newBounds.Min.Y)*out.Stride + (x - newBounds.Min.X)
280 | out.Pix[offset] = floatToUint8(gray / sum)
281 | }
282 | }
283 | }
284 |
285 | func nearestGray16(in *image.Gray16, out *image.Gray16, scale float64, coeffs []bool, offset []int, filterLength int) {
286 | newBounds := out.Bounds()
287 | maxX := in.Bounds().Dx() - 1
288 |
289 | for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
290 | row := in.Pix[x*in.Stride:]
291 | for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
292 | var gray float32
293 | var sum float32
294 | start := offset[y]
295 | ci := y * filterLength
296 | for i := 0; i < filterLength; i++ {
297 | if coeffs[ci+i] {
298 | xi := start + i
299 | switch {
300 | case uint(xi) < uint(maxX):
301 | xi *= 2
302 | case xi >= maxX:
303 | xi = 2 * maxX
304 | default:
305 | xi = 0
306 | }
307 | gray += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1]))
308 | sum++
309 | }
310 | }
311 |
312 | offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*2
313 | value := floatToUint16(gray / sum)
314 | out.Pix[offset+0] = uint8(value >> 8)
315 | out.Pix[offset+1] = uint8(value)
316 | }
317 | }
318 | }
319 |
--------------------------------------------------------------------------------
/vendor/github.com/nfnt/resize/thumbnail.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright (c) 2012, Jan Schlicht
3 |
4 | Permission to use, copy, modify, and/or distribute this software for any purpose
5 | with or without fee is hereby granted, provided that the above copyright notice
6 | and this permission notice appear in all copies.
7 |
8 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
10 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
12 | OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
13 | TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
14 | THIS SOFTWARE.
15 | */
16 |
17 | package resize
18 |
19 | import (
20 | "image"
21 | )
22 |
23 | // Thumbnail will downscale provided image to max width and height preserving
24 | // original aspect ratio and using the interpolation function interp.
25 | // It will return original image, without processing it, if original sizes
26 | // are already smaller than provided constraints.
27 | func Thumbnail(maxWidth, maxHeight uint, img image.Image, interp InterpolationFunction) image.Image {
28 | origBounds := img.Bounds()
29 | origWidth := uint(origBounds.Dx())
30 | origHeight := uint(origBounds.Dy())
31 | newWidth, newHeight := origWidth, origHeight
32 |
33 | // Return original image if it have same or smaller size as constraints
34 | if maxWidth >= origWidth && maxHeight >= origHeight {
35 | return img
36 | }
37 |
38 | // Preserve aspect ratio
39 | if origWidth > maxWidth {
40 | newHeight = uint(origHeight * maxWidth / origWidth)
41 | if newHeight < 1 {
42 | newHeight = 1
43 | }
44 | newWidth = maxWidth
45 | }
46 |
47 | if newHeight > maxHeight {
48 | newWidth = uint(newWidth * maxHeight / newHeight)
49 | if newWidth < 1 {
50 | newWidth = 1
51 | }
52 | newHeight = maxHeight
53 | }
54 | return Resize(newWidth, newHeight, img, interp)
55 | }
56 |
--------------------------------------------------------------------------------
/vendor/github.com/nfnt/resize/ycc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright (c) 2014, Charlie Vieth
3 |
4 | Permission to use, copy, modify, and/or distribute this software for any purpose
5 | with or without fee is hereby granted, provided that the above copyright notice
6 | and this permission notice appear in all copies.
7 |
8 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
10 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
12 | OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
13 | TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
14 | THIS SOFTWARE.
15 | */
16 |
17 | package resize
18 |
19 | import (
20 | "image"
21 | "image/color"
22 | )
23 |
24 | // ycc is an in memory YCbCr image. The Y, Cb and Cr samples are held in a
25 | // single slice to increase resizing performance.
26 | type ycc struct {
27 | // Pix holds the image's pixels, in Y, Cb, Cr order. The pixel at
28 | // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*3].
29 | Pix []uint8
30 | // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
31 | Stride int
32 | // Rect is the image's bounds.
33 | Rect image.Rectangle
34 | // SubsampleRatio is the subsample ratio of the original YCbCr image.
35 | SubsampleRatio image.YCbCrSubsampleRatio
36 | }
37 |
38 | // PixOffset returns the index of the first element of Pix that corresponds to
39 | // the pixel at (x, y).
40 | func (p *ycc) PixOffset(x, y int) int {
41 | return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3
42 | }
43 |
44 | func (p *ycc) Bounds() image.Rectangle {
45 | return p.Rect
46 | }
47 |
48 | func (p *ycc) ColorModel() color.Model {
49 | return color.YCbCrModel
50 | }
51 |
52 | func (p *ycc) At(x, y int) color.Color {
53 | if !(image.Point{x, y}.In(p.Rect)) {
54 | return color.YCbCr{}
55 | }
56 | i := p.PixOffset(x, y)
57 | return color.YCbCr{
58 | p.Pix[i+0],
59 | p.Pix[i+1],
60 | p.Pix[i+2],
61 | }
62 | }
63 |
64 | func (p *ycc) Opaque() bool {
65 | return true
66 | }
67 |
68 | // SubImage returns an image representing the portion of the image p visible
69 | // through r. The returned value shares pixels with the original image.
70 | func (p *ycc) SubImage(r image.Rectangle) image.Image {
71 | r = r.Intersect(p.Rect)
72 | if r.Empty() {
73 | return &ycc{SubsampleRatio: p.SubsampleRatio}
74 | }
75 | i := p.PixOffset(r.Min.X, r.Min.Y)
76 | return &ycc{
77 | Pix: p.Pix[i:],
78 | Stride: p.Stride,
79 | Rect: r,
80 | SubsampleRatio: p.SubsampleRatio,
81 | }
82 | }
83 |
84 | // newYCC returns a new ycc with the given bounds and subsample ratio.
85 | func newYCC(r image.Rectangle, s image.YCbCrSubsampleRatio) *ycc {
86 | w, h := r.Dx(), r.Dy()
87 | buf := make([]uint8, 3*w*h)
88 | return &ycc{Pix: buf, Stride: 3 * w, Rect: r, SubsampleRatio: s}
89 | }
90 |
91 | // Copy of image.YCbCrSubsampleRatio constants - this allows us to support
92 | // older versions of Go where these constants are not defined (i.e. Go 1.4)
93 | const (
94 | ycbcrSubsampleRatio444 image.YCbCrSubsampleRatio = iota
95 | ycbcrSubsampleRatio422
96 | ycbcrSubsampleRatio420
97 | ycbcrSubsampleRatio440
98 | ycbcrSubsampleRatio411
99 | ycbcrSubsampleRatio410
100 | )
101 |
102 | // YCbCr converts ycc to a YCbCr image with the same subsample ratio
103 | // as the YCbCr image that ycc was generated from.
104 | func (p *ycc) YCbCr() *image.YCbCr {
105 | ycbcr := image.NewYCbCr(p.Rect, p.SubsampleRatio)
106 | switch ycbcr.SubsampleRatio {
107 | case ycbcrSubsampleRatio422:
108 | return p.ycbcr422(ycbcr)
109 | case ycbcrSubsampleRatio420:
110 | return p.ycbcr420(ycbcr)
111 | case ycbcrSubsampleRatio440:
112 | return p.ycbcr440(ycbcr)
113 | case ycbcrSubsampleRatio444:
114 | return p.ycbcr444(ycbcr)
115 | case ycbcrSubsampleRatio411:
116 | return p.ycbcr411(ycbcr)
117 | case ycbcrSubsampleRatio410:
118 | return p.ycbcr410(ycbcr)
119 | }
120 | return ycbcr
121 | }
122 |
123 | // imageYCbCrToYCC converts a YCbCr image to a ycc image for resizing.
124 | func imageYCbCrToYCC(in *image.YCbCr) *ycc {
125 | w, h := in.Rect.Dx(), in.Rect.Dy()
126 | p := ycc{
127 | Pix: make([]uint8, 3*w*h),
128 | Stride: 3 * w,
129 | Rect: image.Rect(0, 0, w, h),
130 | SubsampleRatio: in.SubsampleRatio,
131 | }
132 | switch in.SubsampleRatio {
133 | case ycbcrSubsampleRatio422:
134 | return convertToYCC422(in, &p)
135 | case ycbcrSubsampleRatio420:
136 | return convertToYCC420(in, &p)
137 | case ycbcrSubsampleRatio440:
138 | return convertToYCC440(in, &p)
139 | case ycbcrSubsampleRatio444:
140 | return convertToYCC444(in, &p)
141 | case ycbcrSubsampleRatio411:
142 | return convertToYCC411(in, &p)
143 | case ycbcrSubsampleRatio410:
144 | return convertToYCC410(in, &p)
145 | }
146 | return &p
147 | }
148 |
149 | func (p *ycc) ycbcr422(ycbcr *image.YCbCr) *image.YCbCr {
150 | var off int
151 | Pix := p.Pix
152 | Y := ycbcr.Y
153 | Cb := ycbcr.Cb
154 | Cr := ycbcr.Cr
155 | for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
156 | yy := y * ycbcr.YStride
157 | cy := y * ycbcr.CStride
158 | for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
159 | ci := cy + x/2
160 | Y[yy+x] = Pix[off+0]
161 | Cb[ci] = Pix[off+1]
162 | Cr[ci] = Pix[off+2]
163 | off += 3
164 | }
165 | }
166 | return ycbcr
167 | }
168 |
169 | func (p *ycc) ycbcr420(ycbcr *image.YCbCr) *image.YCbCr {
170 | var off int
171 | Pix := p.Pix
172 | Y := ycbcr.Y
173 | Cb := ycbcr.Cb
174 | Cr := ycbcr.Cr
175 | for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
176 | yy := y * ycbcr.YStride
177 | cy := (y / 2) * ycbcr.CStride
178 | for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
179 | ci := cy + x/2
180 | Y[yy+x] = Pix[off+0]
181 | Cb[ci] = Pix[off+1]
182 | Cr[ci] = Pix[off+2]
183 | off += 3
184 | }
185 | }
186 | return ycbcr
187 | }
188 |
189 | func (p *ycc) ycbcr440(ycbcr *image.YCbCr) *image.YCbCr {
190 | var off int
191 | Pix := p.Pix
192 | Y := ycbcr.Y
193 | Cb := ycbcr.Cb
194 | Cr := ycbcr.Cr
195 | for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
196 | yy := y * ycbcr.YStride
197 | cy := (y / 2) * ycbcr.CStride
198 | for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
199 | ci := cy + x
200 | Y[yy+x] = Pix[off+0]
201 | Cb[ci] = Pix[off+1]
202 | Cr[ci] = Pix[off+2]
203 | off += 3
204 | }
205 | }
206 | return ycbcr
207 | }
208 |
209 | func (p *ycc) ycbcr444(ycbcr *image.YCbCr) *image.YCbCr {
210 | var off int
211 | Pix := p.Pix
212 | Y := ycbcr.Y
213 | Cb := ycbcr.Cb
214 | Cr := ycbcr.Cr
215 | for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
216 | yy := y * ycbcr.YStride
217 | cy := y * ycbcr.CStride
218 | for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
219 | ci := cy + x
220 | Y[yy+x] = Pix[off+0]
221 | Cb[ci] = Pix[off+1]
222 | Cr[ci] = Pix[off+2]
223 | off += 3
224 | }
225 | }
226 | return ycbcr
227 | }
228 |
229 | func (p *ycc) ycbcr411(ycbcr *image.YCbCr) *image.YCbCr {
230 | var off int
231 | Pix := p.Pix
232 | Y := ycbcr.Y
233 | Cb := ycbcr.Cb
234 | Cr := ycbcr.Cr
235 | for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
236 | yy := y * ycbcr.YStride
237 | cy := y * ycbcr.CStride
238 | for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
239 | ci := cy + x/4
240 | Y[yy+x] = Pix[off+0]
241 | Cb[ci] = Pix[off+1]
242 | Cr[ci] = Pix[off+2]
243 | off += 3
244 | }
245 | }
246 | return ycbcr
247 | }
248 |
249 | func (p *ycc) ycbcr410(ycbcr *image.YCbCr) *image.YCbCr {
250 | var off int
251 | Pix := p.Pix
252 | Y := ycbcr.Y
253 | Cb := ycbcr.Cb
254 | Cr := ycbcr.Cr
255 | for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
256 | yy := y * ycbcr.YStride
257 | cy := (y / 2) * ycbcr.CStride
258 | for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
259 | ci := cy + x/4
260 | Y[yy+x] = Pix[off+0]
261 | Cb[ci] = Pix[off+1]
262 | Cr[ci] = Pix[off+2]
263 | off += 3
264 | }
265 | }
266 | return ycbcr
267 | }
268 |
269 | func convertToYCC422(in *image.YCbCr, p *ycc) *ycc {
270 | var off int
271 | Pix := p.Pix
272 | Y := in.Y
273 | Cb := in.Cb
274 | Cr := in.Cr
275 | for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
276 | yy := y * in.YStride
277 | cy := y * in.CStride
278 | for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
279 | ci := cy + x/2
280 | Pix[off+0] = Y[yy+x]
281 | Pix[off+1] = Cb[ci]
282 | Pix[off+2] = Cr[ci]
283 | off += 3
284 | }
285 | }
286 | return p
287 | }
288 |
289 | func convertToYCC420(in *image.YCbCr, p *ycc) *ycc {
290 | var off int
291 | Pix := p.Pix
292 | Y := in.Y
293 | Cb := in.Cb
294 | Cr := in.Cr
295 | for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
296 | yy := y * in.YStride
297 | cy := (y / 2) * in.CStride
298 | for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
299 | ci := cy + x/2
300 | Pix[off+0] = Y[yy+x]
301 | Pix[off+1] = Cb[ci]
302 | Pix[off+2] = Cr[ci]
303 | off += 3
304 | }
305 | }
306 | return p
307 | }
308 |
309 | func convertToYCC440(in *image.YCbCr, p *ycc) *ycc {
310 | var off int
311 | Pix := p.Pix
312 | Y := in.Y
313 | Cb := in.Cb
314 | Cr := in.Cr
315 | for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
316 | yy := y * in.YStride
317 | cy := (y / 2) * in.CStride
318 | for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
319 | ci := cy + x
320 | Pix[off+0] = Y[yy+x]
321 | Pix[off+1] = Cb[ci]
322 | Pix[off+2] = Cr[ci]
323 | off += 3
324 | }
325 | }
326 | return p
327 | }
328 |
329 | func convertToYCC444(in *image.YCbCr, p *ycc) *ycc {
330 | var off int
331 | Pix := p.Pix
332 | Y := in.Y
333 | Cb := in.Cb
334 | Cr := in.Cr
335 | for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
336 | yy := y * in.YStride
337 | cy := y * in.CStride
338 | for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
339 | ci := cy + x
340 | Pix[off+0] = Y[yy+x]
341 | Pix[off+1] = Cb[ci]
342 | Pix[off+2] = Cr[ci]
343 | off += 3
344 | }
345 | }
346 | return p
347 | }
348 |
349 | func convertToYCC411(in *image.YCbCr, p *ycc) *ycc {
350 | var off int
351 | Pix := p.Pix
352 | Y := in.Y
353 | Cb := in.Cb
354 | Cr := in.Cr
355 | for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
356 | yy := y * in.YStride
357 | cy := y * in.CStride
358 | for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
359 | ci := cy + x/4
360 | Pix[off+0] = Y[yy+x]
361 | Pix[off+1] = Cb[ci]
362 | Pix[off+2] = Cr[ci]
363 | off += 3
364 | }
365 | }
366 | return p
367 | }
368 |
369 | func convertToYCC410(in *image.YCbCr, p *ycc) *ycc {
370 | var off int
371 | Pix := p.Pix
372 | Y := in.Y
373 | Cb := in.Cb
374 | Cr := in.Cr
375 | for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
376 | yy := y * in.YStride
377 | cy := (y / 2) * in.CStride
378 | for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
379 | ci := cy + x/4
380 | Pix[off+0] = Y[yy+x]
381 | Pix[off+1] = Cb[ci]
382 | Pix[off+2] = Cr[ci]
383 | off += 3
384 | }
385 | }
386 | return p
387 | }
388 |
--------------------------------------------------------------------------------
/vendor/github.com/rivo/duplo/LICENSE.txt:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2015 Oliver Rivo
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/vendor/github.com/rivo/duplo/README.md:
--------------------------------------------------------------------------------
1 | # Duplo - Detect Similar or Duplicate Images
2 |
3 | [](https://godoc.org/github.com/rivo/duplo)
4 | [](https://goreportcard.com/report/github.com/rivo/duplo)
5 |
6 | This Go library allows you to perform a visual query on a set of images, returning the results in the order of similarity. This allows you to effectively detect duplicates with minor modifications (e.g. some colour correction or watermarks).
7 |
8 | It is an implementation of [Fast Multiresolution Image Querying](http://grail.cs.washington.edu/projects/query/mrquery.pdf) by Jacobs et al. which uses truncated Haar wavelet transforms to create visual hashes of the images. The same method has previously been used in the [imgSeek](http://www.imgseek.net) software and the [retrievr](http://labs.systemone.at/retrievr) website.
9 |
10 | ## Installation
11 |
12 | ```
13 | go get github.com/rivo/duplo
14 | ```
15 |
16 | ## Usage
17 |
18 | ```go
19 | import "github.com/rivo/duplo"
20 |
21 | // Create an empty store.
22 | store := duplo.New()
23 |
24 | // Add image "img" to the store.
25 | hash, _ := duplo.CreateHash(img)
26 | store.Add("myimage", hash)
27 |
28 | // Query the store based on image "query".
29 | hash, _ = duplo.CreateHash(query)
30 | matches := store.Query(hash)
31 | sort.Sort(matches)
32 | // matches[0] is the best match.
33 | ```
34 |
35 | ## Documentation
36 |
37 | http://godoc.org/github.com/rivo/duplo
38 |
39 | ## Possible Applications
40 |
41 | * Identify copyright violations
42 | * Save disk space by detecting and removing duplicate images
43 | * Search for images by similarity
44 |
45 | ## Projects Using This Package
46 |
47 | * [imgdup2go](https://github.com/rif/imgdup2go): A visual image duplicate finder.
48 |
49 | ## More Information
50 |
51 | For more information, please go to http://rentafounder.com/find-similar-images-with-duplo/ or get in touch.
52 |
--------------------------------------------------------------------------------
/vendor/github.com/rivo/duplo/candidate.go:
--------------------------------------------------------------------------------
1 | package duplo
2 |
3 | import (
4 | "github.com/rivo/duplo/haar"
5 | )
6 |
7 | // candidate represents an image in the store or, rather, a candidate to be
8 | // selected as the winner in a similarity query.
9 | type candidate struct {
10 | // id is the unique ID that identifies an image.
11 | id interface{}
12 |
13 | // scaleCoef is the scaling function coefficient, the coefficients at index
14 | // (0,0) of the Haar matrix.
15 | scaleCoef haar.Coef
16 |
17 | // ratio is image width / image height.
18 | ratio float64
19 |
20 | // The dHash bit vector (see Hash for more information).
21 | dHash [2]uint64
22 |
23 | // The histogram bit vector (see Hash for more information).
24 | histogram uint64
25 |
26 | // The histogram maximum (see Hash for more information).
27 | histoMax [3]float32
28 | }
29 |
--------------------------------------------------------------------------------
/vendor/github.com/rivo/duplo/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Package duplo provides tools to efficiently query large sets of images for
3 | visual duplicates. The technique is based on the paper "Fast Multiresolution
4 | Image Querying" by Charles E. Jacobs, Adam Finkelstein, and David H. Salesin,
5 | with a few modifications and additions, such as the addition of a width to
6 | height ratio, the dHash metric by Dr. Neal Krawetz as well as some
7 | histogram-based metrics.
8 |
9 | Quering the data structure will return a list of potential matches, sorted by
10 | the score described in the main paper. The user can make searching for
11 | duplicates stricter, however, by filtering based on the additional metrics.
12 | */
13 | package duplo
14 |
--------------------------------------------------------------------------------
/vendor/github.com/rivo/duplo/haar/haar.go:
--------------------------------------------------------------------------------
1 | /*
2 | Package haar provides a Haar wavelet function for bitmap images.
3 | */
4 | package haar
5 |
6 | import (
7 | "image"
8 | "image/color"
9 | "math"
10 | )
11 |
12 | // ColourChannels is the number of channels for one color. We will be using
13 | // three colour channels per pixel at all times.
14 | const ColourChannels = 3
15 |
16 | // Coef is the union of coefficients for all channels of the original image.
17 | type Coef [ColourChannels]float64
18 |
19 | // Add adds another coefficient in place.
20 | func (coef *Coef) Add(offset Coef) {
21 | for index := range coef {
22 | coef[index] += offset[index]
23 | }
24 | }
25 |
26 | // Subtract subtracts another coefficient in place.
27 | func (coef *Coef) Subtract(offset Coef) {
28 | for index := range coef {
29 | coef[index] -= offset[index]
30 | }
31 | }
32 |
33 | // Divide divides all elements of the coefficient by a value, in place.
34 | func (coef *Coef) Divide(value float64) {
35 | factor := 1.0 / value
36 | for index := range coef {
37 | coef[index] *= factor // Slightly faster.
38 | }
39 | }
40 |
41 | // Matrix is the result of the Haar transform, a two-dimensional matrix of
42 | // coefficients.
43 | type Matrix struct {
44 | // Coefs is the slice of coefficients resulting from a forward 2D Haar
45 | // transform. The position of a coefficient (x,y) is (y * Width + x).
46 | Coefs []Coef
47 |
48 | // The number of columns in the matrix.
49 | Width uint
50 |
51 | // The number of rows in the matrix.
52 | Height uint
53 | }
54 |
55 | // colorToCoef converts a native Color type into a YIQ Coef. We are using
56 | // YIQ because we only have weights for them. (Apart from the score weights,
57 | // the store is built to handle different sized Coef's so any length may be
58 | // returned.)
59 | func colorToCoef(gen color.Color) Coef {
60 | // Convert into YIQ. (We may want to convert from YCbCr directly one day.)
61 | r32, g32, b32, _ := gen.RGBA()
62 | r, g, b := float64(r32>>8), float64(g32>>8), float64(b32>>8)
63 | return Coef{
64 | (0.299900*r + 0.587000*g + 0.114000*b) / 0x100,
65 | (0.595716*r - 0.274453*g - 0.321263*b) / 0x100,
66 | (0.211456*r - 0.522591*g + 0.311135*b) / 0x100}
67 | }
68 |
69 | // Transform performs a forward 2D Haar transform on the provided image after
70 | // converting it to YIQ space.
71 | func Transform(img image.Image) Matrix {
72 | bounds := img.Bounds()
73 | width := bounds.Max.X - bounds.Min.X
74 | height := bounds.Max.Y - bounds.Min.Y
75 | if width > 2 {
76 | // We can't handle odd widths.
77 | width = width &^ 1
78 | }
79 | if height > 2 {
80 | // We can't handle odd heights.
81 | height = height &^ 1
82 | }
83 | matrix := Matrix{
84 | Coefs: make([]Coef, width*height),
85 | Width: uint(width),
86 | Height: uint(height)}
87 |
88 | // Convert colours to coefficients.
89 | for row := bounds.Min.Y; row < bounds.Min.Y+height; row++ {
90 | for column := bounds.Min.X; column < bounds.Min.X+width; column++ {
91 | matrix.Coefs[(row-bounds.Min.Y)*width+(column-bounds.Min.X)] = colorToCoef(img.At(column, row))
92 | }
93 | }
94 |
95 | // Apply 1D Haar transform on rows.
96 | tempRow := make([]Coef, width)
97 | for row := 0; row < height; row++ {
98 | for step := width / 2; step >= 1; step /= 2 {
99 | for column := 0; column < step; column++ {
100 | high := matrix.Coefs[row*width+2*column]
101 | low := high
102 | offset := matrix.Coefs[row*width+2*column+1]
103 | high.Add(offset)
104 | low.Subtract(offset)
105 | high.Divide(math.Sqrt2)
106 | low.Divide(math.Sqrt2)
107 | tempRow[column] = high
108 | tempRow[column+step] = low
109 | }
110 | for column := 0; column < width; column++ {
111 | matrix.Coefs[row*width+column] = tempRow[column]
112 | }
113 | }
114 | }
115 |
116 | // Apply 1D Haar transform on columns.
117 | tempColumn := make([]Coef, height)
118 | for column := 0; column < width; column++ {
119 | for step := height / 2; step >= 1; step /= 2 {
120 | for row := 0; row < step; row++ {
121 | high := matrix.Coefs[(2*row)*width+column]
122 | low := high
123 | offset := matrix.Coefs[(2*row+1)*width+column]
124 | high.Add(offset)
125 | low.Subtract(offset)
126 | high.Divide(math.Sqrt2)
127 | low.Divide(math.Sqrt2)
128 | tempColumn[row] = high
129 | tempColumn[row+step] = low
130 | }
131 | for row := 0; row < height; row++ {
132 | matrix.Coefs[row*width+column] = tempColumn[row]
133 | }
134 | }
135 | }
136 |
137 | return matrix
138 | }
139 |
--------------------------------------------------------------------------------
/vendor/github.com/rivo/duplo/hamming.go:
--------------------------------------------------------------------------------
1 | package duplo
2 |
3 | const (
4 | m1 = 0x5555555555555555 //binary: 0101...
5 | m2 = 0x3333333333333333 //binary: 00110011..
6 | m4 = 0x0f0f0f0f0f0f0f0f //binary: 4 zeros, 4 ones ...
7 | m8 = 0x00ff00ff00ff00ff //binary: 8 zeros, 8 ones ...
8 | m16 = 0x0000ffff0000ffff //binary: 16 zeros, 16 ones ...
9 | m32 = 0x00000000ffffffff //binary: 32 zeros, 32 ones
10 | hff = 0xffffffffffffffff //binary: all ones
11 | h01 = 0x0101010101010101 //the sum of 256 to the power of 0,1,2,3...
12 | )
13 |
14 | // hammingDistance calculates the hamming distance between two 64-bit values.
15 | // The implementation is based on the code found on:
16 | // http://en.wikipedia.org/wiki/Hamming_weight#Efficient_implementation
17 | func hammingDistance(left, right uint64) int {
18 | x := left ^ right
19 | x -= (x >> 1) & m1 //put count of each 2 bits into those 2 bits
20 | x = (x & m2) + ((x >> 2) & m2) //put count of each 4 bits into those 4 bits
21 | x = (x + (x >> 4)) & m4 //put count of each 8 bits into those 8 bits
22 | return int((x * h01) >> 56) //returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ...
23 | }
24 |
--------------------------------------------------------------------------------
/vendor/github.com/rivo/duplo/hash.go:
--------------------------------------------------------------------------------
1 | package duplo
2 |
3 | import (
4 | "image"
5 | "image/color"
6 | "math"
7 | "math/rand"
8 | "sort"
9 |
10 | "github.com/nfnt/resize"
11 | "github.com/rivo/duplo/haar"
12 | )
13 |
14 | // Hash represents the visual hash of an image.
15 | type Hash struct {
16 | haar.Matrix
17 |
18 | // Thresholds contains the coefficient threholds. If you discard all
19 | // coefficients with abs(coef) < threshold, you end up with TopCoefs
20 | // coefficients.
21 | Thresholds haar.Coef
22 |
23 | // Ratio is image width / image height or 0 if height is 0.
24 | Ratio float64
25 |
26 | // DHash is a 128 bit vector where each bit value depends on the monotonicity
27 | // of two adjacent pixels. The first 64 bits are based on a 8x8 version of
28 | // the Y colour channel. The other two 32 bits are each based on a 8x4 version
29 | // of the Cb, and Cr colour channel, respectively.
30 | DHash [2]uint64
31 |
32 | // Histogram is histogram quantized into 64 bits (32 for Y and 16 each for
33 | // Cb and Cr). A bit is set to 1 if the intensity's occurence count is large
34 | // than the median (for that colour channel) and set to 0 otherwise.
35 | Histogram uint64
36 |
37 | // HistoMax is the maximum value of the histogram (for each channel Y, Cb,
38 | // and Cr).
39 | HistoMax [3]float32
40 | }
41 |
42 | // CreateHash calculates and returns the visual hash of the provided image as
43 | // well as a resized version of it (ImageScale x ImageScale) which may be
44 | // ignored if not needed anymore.
45 | func CreateHash(img image.Image) (Hash, image.Image) {
46 | // Determine image ratio.
47 | bounds := img.Bounds()
48 | width := bounds.Max.X - bounds.Min.X
49 | height := bounds.Max.Y - bounds.Min.Y
50 | var ratio float64
51 | if height > 0 {
52 | ratio = float64(width) / float64(height)
53 | }
54 |
55 | // Resize the image for the Wavelet transform.
56 | scaled := resize.Resize(ImageScale, ImageScale, img, resize.Bicubic)
57 |
58 | // Then perform a 2D Haar Wavelet transform.
59 | matrix := haar.Transform(scaled)
60 |
61 | // Find the kth largest coefficients for each colour channel.
62 | thresholds := coefThresholds(matrix.Coefs, TopCoefs)
63 |
64 | // Create the dHash bit vector.
65 | d := dHash(img)
66 |
67 | // Create histogram bit vector.
68 | h, hm := histogram(img)
69 |
70 | return Hash{haar.Matrix{
71 | Coefs: matrix.Coefs,
72 | Width: ImageScale,
73 | Height: ImageScale,
74 | }, thresholds, ratio, d, h, hm}, scaled
75 | }
76 |
77 | // coefThreshold returns, for the given coefficients, the kth largest absolute
78 | // value. Only the nth element in each Coef is considered. If you discard all
79 | // values v with abs(v) < threshold, you will end up with k values.
80 | func coefThreshold(coefs []haar.Coef, k int, n int) float64 {
81 | // It's the QuickSelect algorithm.
82 | randomIndex := rand.Intn(len(coefs))
83 | pivot := math.Abs(coefs[randomIndex][n])
84 | leftCoefs := make([]haar.Coef, 0, len(coefs))
85 | rightCoefs := make([]haar.Coef, 0, len(coefs))
86 |
87 | for _, coef := range coefs {
88 | if math.Abs(coef[n]) > pivot {
89 | leftCoefs = append(leftCoefs, coef)
90 | } else if math.Abs(coef[n]) < pivot {
91 | rightCoefs = append(rightCoefs, coef)
92 | }
93 | }
94 |
95 | if k <= len(leftCoefs) {
96 | return coefThreshold(leftCoefs, k, n)
97 | } else if k > len(coefs)-len(rightCoefs) {
98 | return coefThreshold(rightCoefs, k-(len(coefs)-len(rightCoefs)), n)
99 | } else {
100 | return pivot
101 | }
102 | }
103 |
104 | // coefThreshold returns, for the given coefficients, the kth largest absolute
105 | // values per colour channel. If you discard all values v with
106 | // abs(v) < threshold, you will end up with k values.
107 | func coefThresholds(coefs []haar.Coef, k int) haar.Coef {
108 | // No data, no thresholds.
109 | if len(coefs) == 0 {
110 | return haar.Coef{}
111 | }
112 |
113 | // Select thresholds.
114 | var thresholds haar.Coef
115 | for index := range thresholds {
116 | thresholds[index] = coefThreshold(coefs, k, index)
117 | }
118 |
119 | return thresholds
120 | }
121 |
122 | // ycbcr returns the YCbCr values for the given colour, converting to them if
123 | // necessary.
124 | func ycbcr(colour color.Color) (y, cb, cr uint8) {
125 | switch spec := colour.(type) {
126 | case color.YCbCr:
127 | return spec.Y, spec.Cb, spec.Cr
128 | default:
129 | r, g, b, _ := colour.RGBA()
130 | return color.RGBToYCbCr(uint8(r), uint8(g), uint8(b))
131 | }
132 | }
133 |
134 | // dHash computes a 128 bit vector by comparing adjacent pixels of a downsized
135 | // version of img. The first 64 bits correspond to a 8x8 version of the Y colour
136 | // channel. A bit is set to 1 if a pixel value is higher than that of its left
137 | // neighbour (the first bit is 1 if its colour value is > 0.5). The other two 32
138 | // bits correspond to the Cb and Cr colour channels, based on a 8x4 version
139 | // each.
140 | func dHash(img image.Image) (bits [2]uint64) {
141 | // Resize the image to 9x8.
142 | scaled := resize.Resize(8, 8, img, resize.Bicubic)
143 |
144 | // Scan it.
145 | yPos := uint(0)
146 | cbPos := uint(0)
147 | crPos := uint(32)
148 | for y := 0; y < 8; y++ {
149 | for x := 0; x < 8; x++ {
150 | yTR, cbTR, crTR := ycbcr(scaled.At(x, y))
151 | if x == 0 {
152 | // The first bit is a rough approximation of the colour value.
153 | if yTR&0x80 > 0 {
154 | bits[0] |= 1 << yPos
155 | yPos++
156 | }
157 | if y&1 == 0 {
158 | _, cbBR, crBR := ycbcr(scaled.At(x, y+1))
159 | if (cbBR+cbTR)>>1&0x80 > 0 {
160 | bits[1] |= 1 << cbPos
161 | cbPos++
162 | }
163 | if (crBR+crTR)>>1&0x80 > 0 {
164 | bits[1] |= 1 << crPos
165 | crPos++
166 | }
167 | }
168 | } else {
169 | // Use a rough first derivative for the other bits.
170 | yTL, cbTL, crTL := ycbcr(scaled.At(x-1, y))
171 | if yTR > yTL {
172 | bits[0] |= 1 << yPos
173 | yPos++
174 | }
175 | if y&1 == 0 {
176 | _, cbBR, crBR := ycbcr(scaled.At(x, y+1))
177 | _, cbBL, crBL := ycbcr(scaled.At(x-1, y+1))
178 | if (cbBR+cbTR)>>1 > (cbBL+cbTL)>>1 {
179 | bits[1] |= 1 << cbPos
180 | cbPos++
181 | }
182 | if (crBR+crTR)>>1 > (crBL+crTL)>>1 {
183 | bits[1] |= 1 << crPos
184 | crPos++
185 | }
186 | }
187 | }
188 | }
189 | }
190 |
191 | return
192 | }
193 |
194 | // histogram calculates a histogram based on the YCbCr values of img and returns
195 | // a rough approximation of it in 64 bits. For each colour channel, a bit is
196 | // set if a histogram value is greater than the median. The Y channel gets 32
197 | // bits, the Cb and Cr values each get 16 bits.
198 | func histogram(img image.Image) (bits uint64, histoMax [3]float32) {
199 | h := new([64]int)
200 |
201 | // Create histogram.
202 | bounds := img.Bounds()
203 | for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
204 | for x := bounds.Min.X; x < bounds.Max.X; x++ {
205 | y, cb, cr := ycbcr(img.At(x, y))
206 | h[y>>3]++
207 | h[32+cb>>4]++
208 | h[48+cr>>4]++
209 | }
210 | }
211 |
212 | // Calculate medians and maximums.
213 | median := func(v []int) (int, float32) {
214 | sorted := make([]int, len(v))
215 | copy(sorted, v)
216 | sort.Ints(sorted)
217 | return sorted[len(v)/2], float32(sorted[len(v)-1]) /
218 | float32((bounds.Max.X-bounds.Min.X)*(bounds.Max.Y-bounds.Min.Y))
219 | }
220 | my, yMax := median(h[:32])
221 | mcb, cbMax := median(h[32:48])
222 | mcr, crMax := median(h[48:])
223 | histoMax[0] = yMax
224 | histoMax[1] = cbMax
225 | histoMax[2] = crMax
226 |
227 | // Quantize histogram.
228 | for index, value := range h {
229 | if index < 32 {
230 | if value > my {
231 | bits |= 1 << uint(index)
232 | }
233 | } else if index < 48 {
234 | if value > mcb {
235 | bits |= 1 << uint(index-32)
236 | }
237 | } else {
238 | if value > mcr {
239 | bits |= 1 << uint(index-32)
240 | }
241 | }
242 | }
243 |
244 | return
245 | }
246 |
--------------------------------------------------------------------------------
/vendor/github.com/rivo/duplo/match.go:
--------------------------------------------------------------------------------
1 | package duplo
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | // Match represents an image matched by a similarity query.
8 | type Match struct {
9 | // The ID of the matched image, as specified in the pool.Add() function.
10 | ID interface{}
11 |
12 | // The score calculated during the similarity query. The lower, the better
13 | // the match.
14 | Score float64
15 |
16 | // The absolute difference between the two image ratios' log values.
17 | RatioDiff float64
18 |
19 | // The hamming distance between the two dHash bit vectors.
20 | DHashDistance int
21 |
22 | // The hamming distance between the two histogram bit vectors.
23 | HistogramDistance int
24 | }
25 |
26 | // Matches is a slice of match results.
27 | type Matches []*Match
28 |
29 | func (m Matches) Len() int { return len(m) }
30 | func (m Matches) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
31 | func (m Matches) Less(i, j int) bool { return m[j] == nil || (m[i] != nil && m[i].Score < m[j].Score) }
32 |
33 | func (m *Match) String() string {
34 | return fmt.Sprintf("%s: score=%.4f, ratio-diff=%.1f, dHash-dist=%d, histDist=%d",
35 | m.ID, m.Score, m.RatioDiff, m.DHashDistance, m.HistogramDistance)
36 | }
37 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: false
2 | language: go
3 | go:
4 | - 1.x
5 | - master
6 | matrix:
7 | allow_failures:
8 | - go: master
9 | fast_finish: true
10 | install:
11 | - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
12 | script:
13 | - go get -t -v ./...
14 | - diff -u <(echo -n) <(gofmt -d -s .)
15 | - go tool vet .
16 | - go test -v -race ./...
17 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Dmitri Shuralyov
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/README.md:
--------------------------------------------------------------------------------
1 | graphql
2 | =======
3 |
4 | [](https://travis-ci.org/shurcooL/graphql) [](https://godoc.org/github.com/shurcooL/graphql)
5 |
6 | Package `graphql` provides a GraphQL client implementation.
7 |
8 | For more information, see package [`github.com/shurcooL/githubv4`](https://github.com/shurcooL/githubv4), which is a specialized version targeting GitHub GraphQL API v4. That package is driving the feature development.
9 |
10 | **Status:** In active early research and development. The API will change when opportunities for improvement are discovered; it is not yet frozen.
11 |
12 | Installation
13 | ------------
14 |
15 | `graphql` requires Go version 1.8 or later.
16 |
17 | ```bash
18 | go get -u github.com/shurcooL/graphql
19 | ```
20 |
21 | Usage
22 | -----
23 |
24 | Construct a GraphQL client, specifying the GraphQL server URL. Then, you can use it to make GraphQL queries and mutations.
25 |
26 | ```Go
27 | client := graphql.NewClient("https://example.com/graphql", nil)
28 | // Use client...
29 | ```
30 |
31 | ### Authentication
32 |
33 | Some GraphQL servers may require authentication. The `graphql` package does not directly handle authentication. Instead, when creating a new client, you're expected to pass an `http.Client` that performs authentication. The easiest and recommended way to do this is to use the [`golang.org/x/oauth2`](https://golang.org/x/oauth2) package. You'll need an OAuth token with the right scopes. Then:
34 |
35 | ```Go
36 | import "golang.org/x/oauth2"
37 |
38 | func main() {
39 | src := oauth2.StaticTokenSource(
40 | &oauth2.Token{AccessToken: os.Getenv("GRAPHQL_TOKEN")},
41 | )
42 | httpClient := oauth2.NewClient(context.Background(), src)
43 |
44 | client := graphql.NewClient("https://example.com/graphql", httpClient)
45 | // Use client...
46 | ```
47 |
48 | ### Simple Query
49 |
50 | To make a GraphQL query, you need to define a corresponding Go type.
51 |
52 | For example, to make the following GraphQL query:
53 |
54 | ```GraphQL
55 | query {
56 | me {
57 | name
58 | }
59 | }
60 | ```
61 |
62 | You can define this variable:
63 |
64 | ```Go
65 | var query struct {
66 | Me struct {
67 | Name graphql.String
68 | }
69 | }
70 | ```
71 |
72 | Then call `client.Query`, passing a pointer to it:
73 |
74 | ```Go
75 | err := client.Query(context.Background(), &query, nil)
76 | if err != nil {
77 | // Handle error.
78 | }
79 | fmt.Println(query.Me.Name)
80 |
81 | // Output: Luke Skywalker
82 | ```
83 |
84 | ### Arguments and Variables
85 |
86 | Often, you'll want to specify arguments on some fields. You can use the `graphql` struct field tag for this.
87 |
88 | For example, to make the following GraphQL query:
89 |
90 | ```GraphQL
91 | {
92 | human(id: "1000") {
93 | name
94 | height(unit: METER)
95 | }
96 | }
97 | ```
98 |
99 | You can define this variable:
100 |
101 | ```Go
102 | var q struct {
103 | Human struct {
104 | Name graphql.String
105 | Height graphql.Float `graphql:"height(unit: METER)"`
106 | } `graphql:"human(id: \"1000\")"`
107 | }
108 | ```
109 |
110 | Then call `client.Query`:
111 |
112 | ```Go
113 | err := client.Query(context.Background(), &q, nil)
114 | if err != nil {
115 | // Handle error.
116 | }
117 | fmt.Println(q.Human.Name)
118 | fmt.Println(q.Human.Height)
119 |
120 | // Output:
121 | // Luke Skywalker
122 | // 1.72
123 | ```
124 |
125 | However, that'll only work if the arguments are constant and known in advance. Otherwise, you will need to make use of variables. Replace the constants in the struct field tag with variable names:
126 |
127 | ```Go
128 | var q struct {
129 | Human struct {
130 | Name graphql.String
131 | Height graphql.Float `graphql:"height(unit: $unit)"`
132 | } `graphql:"human(id: $id)"`
133 | }
134 | ```
135 |
136 | Then, define a `variables` map with their values:
137 |
138 | ```Go
139 | variables := map[string]interface{}{
140 | "id": graphql.ID(id),
141 | "unit": starwars.LengthUnit("METER"),
142 | }
143 | ```
144 |
145 | Finally, call `client.Query` providing `variables`:
146 |
147 | ```Go
148 | err := client.Query(context.Background(), &q, variables)
149 | if err != nil {
150 | // Handle error.
151 | }
152 | ```
153 |
154 | ### Inline Fragments
155 |
156 | Some GraphQL queries contain inline fragments. You can use the `graphql` struct field tag to express them.
157 |
158 | For example, to make the following GraphQL query:
159 |
160 | ```GraphQL
161 | {
162 | hero(episode: "JEDI") {
163 | name
164 | ... on Droid {
165 | primaryFunction
166 | }
167 | ... on Human {
168 | height
169 | }
170 | }
171 | }
172 | ```
173 |
174 | You can define this variable:
175 |
176 | ```Go
177 | var q struct {
178 | Hero struct {
179 | Name graphql.String
180 | Droid struct {
181 | PrimaryFunction graphql.String
182 | } `graphql:"... on Droid"`
183 | Human struct {
184 | Height graphql.Float
185 | } `graphql:"... on Human"`
186 | } `graphql:"hero(episode: \"JEDI\")"`
187 | }
188 | ```
189 |
190 | Alternatively, you can define the struct types corresponding to inline fragments, and use them as embedded fields in your query:
191 |
192 | ```Go
193 | type (
194 | DroidFragment struct {
195 | PrimaryFunction graphql.String
196 | }
197 | HumanFragment struct {
198 | Height graphql.Float
199 | }
200 | )
201 |
202 | var q struct {
203 | Hero struct {
204 | Name graphql.String
205 | DroidFragment `graphql:"... on Droid"`
206 | HumanFragment `graphql:"... on Human"`
207 | } `graphql:"hero(episode: \"JEDI\")"`
208 | }
209 | ```
210 |
211 | Then call `client.Query`:
212 |
213 | ```Go
214 | err := client.Query(context.Background(), &q, nil)
215 | if err != nil {
216 | // Handle error.
217 | }
218 | fmt.Println(q.Hero.Name)
219 | fmt.Println(q.Hero.PrimaryFunction)
220 | fmt.Println(q.Hero.Height)
221 |
222 | // Output:
223 | // R2-D2
224 | // Astromech
225 | // 0
226 | ```
227 |
228 | ### Mutations
229 |
230 | Mutations often require information that you can only find out by performing a query first. Let's suppose you've already done that.
231 |
232 | For example, to make the following GraphQL mutation:
233 |
234 | ```GraphQL
235 | mutation($ep: Episode!, $review: ReviewInput!) {
236 | createReview(episode: $ep, review: $review) {
237 | stars
238 | commentary
239 | }
240 | }
241 | variables {
242 | "ep": "JEDI",
243 | "review": {
244 | "stars": 5,
245 | "commentary": "This is a great movie!"
246 | }
247 | }
248 | ```
249 |
250 | You can define:
251 |
252 | ```Go
253 | var m struct {
254 | CreateReview struct {
255 | Stars graphql.Int
256 | Commentary graphql.String
257 | } `graphql:"createReview(episode: $ep, review: $review)"`
258 | }
259 | variables := map[string]interface{}{
260 | "ep": starwars.Episode("JEDI"),
261 | "review": starwars.ReviewInput{
262 | Stars: graphql.Int(5),
263 | Commentary: graphql.String("This is a great movie!"),
264 | },
265 | }
266 | ```
267 |
268 | Then call `client.Mutate`:
269 |
270 | ```Go
271 | err := client.Mutate(context.Background(), &m, variables)
272 | if err != nil {
273 | // Handle error.
274 | }
275 | fmt.Printf("Created a %v star review: %v\n", m.CreateReview.Stars, m.CreateReview.Commentary)
276 |
277 | // Output:
278 | // Created a 5 star review: This is a great movie!
279 | ```
280 |
281 | Directories
282 | -----------
283 |
284 | | Path | Synopsis |
285 | |----------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------|
286 | | [example/graphqldev](https://godoc.org/github.com/shurcooL/graphql/example/graphqldev) | graphqldev is a test program currently being used for developing graphql package. |
287 | | [ident](https://godoc.org/github.com/shurcooL/graphql/ident) | Package ident provides functions for parsing and converting identifier names between various naming convention. |
288 | | [internal/jsonutil](https://godoc.org/github.com/shurcooL/graphql/internal/jsonutil) | Package jsonutil provides a function for decoding JSON into a GraphQL query data structure. |
289 |
290 | License
291 | -------
292 |
293 | - [MIT License](LICENSE)
294 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/doc.go:
--------------------------------------------------------------------------------
1 | // Package graphql provides a GraphQL client implementation.
2 | //
3 | // For more information, see package github.com/shurcooL/githubv4,
4 | // which is a specialized version targeting GitHub GraphQL API v4.
5 | // That package is driving the feature development.
6 | //
7 | // Status: In active early research and development. The API will change when
8 | // opportunities for improvement are discovered; it is not yet frozen.
9 | //
10 | // For now, see README for more details.
11 | package graphql // import "github.com/shurcooL/graphql"
12 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/graphql.go:
--------------------------------------------------------------------------------
1 | package graphql
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "fmt"
8 | "io/ioutil"
9 | "net/http"
10 |
11 | "github.com/shurcooL/graphql/internal/jsonutil"
12 | "golang.org/x/net/context/ctxhttp"
13 | )
14 |
15 | // Client is a GraphQL client.
16 | type Client struct {
17 | url string // GraphQL server URL.
18 | httpClient *http.Client
19 | }
20 |
21 | // NewClient creates a GraphQL client targeting the specified GraphQL server URL.
22 | // If httpClient is nil, then http.DefaultClient is used.
23 | func NewClient(url string, httpClient *http.Client) *Client {
24 | if httpClient == nil {
25 | httpClient = http.DefaultClient
26 | }
27 | return &Client{
28 | url: url,
29 | httpClient: httpClient,
30 | }
31 | }
32 |
33 | // Query executes a single GraphQL query request,
34 | // with a query derived from q, populating the response into it.
35 | // q should be a pointer to struct that corresponds to the GraphQL schema.
36 | func (c *Client) Query(ctx context.Context, q interface{}, variables map[string]interface{}) error {
37 | return c.do(ctx, queryOperation, q, variables)
38 | }
39 |
40 | // Mutate executes a single GraphQL mutation request,
41 | // with a mutation derived from m, populating the response into it.
42 | // m should be a pointer to struct that corresponds to the GraphQL schema.
43 | func (c *Client) Mutate(ctx context.Context, m interface{}, variables map[string]interface{}) error {
44 | return c.do(ctx, mutationOperation, m, variables)
45 | }
46 |
47 | // do executes a single GraphQL operation.
48 | func (c *Client) do(ctx context.Context, op operationType, v interface{}, variables map[string]interface{}) error {
49 | var query string
50 | switch op {
51 | case queryOperation:
52 | query = constructQuery(v, variables)
53 | case mutationOperation:
54 | query = constructMutation(v, variables)
55 | }
56 | in := struct {
57 | Query string `json:"query"`
58 | Variables map[string]interface{} `json:"variables,omitempty"`
59 | }{
60 | Query: query,
61 | Variables: variables,
62 | }
63 | var buf bytes.Buffer
64 | err := json.NewEncoder(&buf).Encode(in)
65 | if err != nil {
66 | return err
67 | }
68 | resp, err := ctxhttp.Post(ctx, c.httpClient, c.url, "application/json", &buf)
69 | if err != nil {
70 | return err
71 | }
72 | defer resp.Body.Close()
73 | if resp.StatusCode != http.StatusOK {
74 | body, _ := ioutil.ReadAll(resp.Body)
75 | return fmt.Errorf("non-200 OK status code: %v body: %q", resp.Status, body)
76 | }
77 | var out struct {
78 | Data *json.RawMessage
79 | Errors errors
80 | //Extensions interface{} // Unused.
81 | }
82 | err = json.NewDecoder(resp.Body).Decode(&out)
83 | if err != nil {
84 | // TODO: Consider including response body in returned error, if deemed helpful.
85 | return err
86 | }
87 | if out.Data != nil {
88 | err := jsonutil.UnmarshalGraphQL(*out.Data, v)
89 | if err != nil {
90 | // TODO: Consider including response body in returned error, if deemed helpful.
91 | return err
92 | }
93 | }
94 | if len(out.Errors) > 0 {
95 | return out.Errors
96 | }
97 | return nil
98 | }
99 |
100 | // errors represents the "errors" array in a response from a GraphQL server.
101 | // If returned via error interface, the slice is expected to contain at least 1 element.
102 | //
103 | // Specification: https://facebook.github.io/graphql/#sec-Errors.
104 | type errors []struct {
105 | Message string
106 | Locations []struct {
107 | Line int
108 | Column int
109 | }
110 | }
111 |
112 | // Error implements error interface.
113 | func (e errors) Error() string {
114 | return e[0].Message
115 | }
116 |
117 | type operationType uint8
118 |
119 | const (
120 | queryOperation operationType = iota
121 | mutationOperation
122 | //subscriptionOperation // Unused.
123 | )
124 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/ident/ident.go:
--------------------------------------------------------------------------------
1 | // Package ident provides functions for parsing and converting identifier names
2 | // between various naming convention. It has support for MixedCaps, lowerCamelCase,
3 | // and SCREAMING_SNAKE_CASE naming conventions.
4 | package ident
5 |
6 | import (
7 | "strings"
8 | "unicode"
9 | "unicode/utf8"
10 | )
11 |
12 | // ParseMixedCaps parses a MixedCaps identifier name.
13 | //
14 | // E.g., "ClientMutationID" -> {"Client", "Mutation", "ID"}.
15 | func ParseMixedCaps(name string) Name {
16 | var words Name
17 |
18 | // Split name at any lower -> Upper or Upper -> Upper,lower transitions.
19 | // Check each word for initialisms.
20 | runes := []rune(name)
21 | w, i := 0, 0 // Index of start of word, scan.
22 | for i+1 <= len(runes) {
23 | eow := false // Whether we hit the end of a word.
24 | if i+1 == len(runes) {
25 | eow = true
26 | } else if unicode.IsLower(runes[i]) && unicode.IsUpper(runes[i+1]) {
27 | // lower -> Upper.
28 | eow = true
29 | } else if i+2 < len(runes) && unicode.IsUpper(runes[i]) && unicode.IsUpper(runes[i+1]) && unicode.IsLower(runes[i+2]) {
30 | // Upper -> Upper,lower. End of acronym, followed by a word.
31 | eow = true
32 |
33 | if string(runes[i:i+3]) == "IDs" { // Special case, plural form of ID initialism.
34 | eow = false
35 | }
36 | }
37 | i++
38 | if !eow {
39 | continue
40 | }
41 |
42 | // [w, i) is a word.
43 | word := string(runes[w:i])
44 | if initialism, ok := isInitialism(word); ok {
45 | words = append(words, initialism)
46 | } else if i1, i2, ok := isTwoInitialisms(word); ok {
47 | words = append(words, i1, i2)
48 | } else {
49 | words = append(words, word)
50 | }
51 | w = i
52 | }
53 | return words
54 | }
55 |
56 | // ParseLowerCamelCase parses a lowerCamelCase identifier name.
57 | //
58 | // E.g., "clientMutationId" -> {"client", "Mutation", "Id"}.
59 | func ParseLowerCamelCase(name string) Name {
60 | var words Name
61 |
62 | // Split name at any Upper letters.
63 | runes := []rune(name)
64 | w, i := 0, 0 // Index of start of word, scan.
65 | for i+1 <= len(runes) {
66 | eow := false // Whether we hit the end of a word.
67 | if i+1 == len(runes) {
68 | eow = true
69 | } else if unicode.IsUpper(runes[i+1]) {
70 | // Upper letter.
71 | eow = true
72 | }
73 | i++
74 | if !eow {
75 | continue
76 | }
77 |
78 | // [w, i) is a word.
79 | words = append(words, string(runes[w:i]))
80 | w = i
81 | }
82 | return words
83 | }
84 |
85 | // ParseScreamingSnakeCase parses a SCREAMING_SNAKE_CASE identifier name.
86 | //
87 | // E.g., "CLIENT_MUTATION_ID" -> {"CLIENT", "MUTATION", "ID"}.
88 | func ParseScreamingSnakeCase(name string) Name {
89 | var words Name
90 |
91 | // Split name at '_' characters.
92 | runes := []rune(name)
93 | w, i := 0, 0 // Index of start of word, scan.
94 | for i+1 <= len(runes) {
95 | eow := false // Whether we hit the end of a word.
96 | if i+1 == len(runes) {
97 | eow = true
98 | } else if runes[i+1] == '_' {
99 | // Underscore.
100 | eow = true
101 | }
102 | i++
103 | if !eow {
104 | continue
105 | }
106 |
107 | // [w, i) is a word.
108 | words = append(words, string(runes[w:i]))
109 | if i < len(runes) && runes[i] == '_' {
110 | // Skip underscore.
111 | i++
112 | }
113 | w = i
114 | }
115 | return words
116 | }
117 |
118 | // Name is an identifier name, broken up into individual words.
119 | type Name []string
120 |
121 | // ToMixedCaps expresses identifer name in MixedCaps naming convention.
122 | //
123 | // E.g., "ClientMutationID".
124 | func (n Name) ToMixedCaps() string {
125 | for i, word := range n {
126 | if strings.EqualFold(word, "IDs") { // Special case, plural form of ID initialism.
127 | n[i] = "IDs"
128 | continue
129 | }
130 | if initialism, ok := isInitialism(word); ok {
131 | n[i] = initialism
132 | continue
133 | }
134 | if brand, ok := isBrand(word); ok {
135 | n[i] = brand
136 | continue
137 | }
138 | r, size := utf8.DecodeRuneInString(word)
139 | n[i] = string(unicode.ToUpper(r)) + strings.ToLower(word[size:])
140 | }
141 | return strings.Join(n, "")
142 | }
143 |
144 | // ToLowerCamelCase expresses identifer name in lowerCamelCase naming convention.
145 | //
146 | // E.g., "clientMutationId".
147 | func (n Name) ToLowerCamelCase() string {
148 | for i, word := range n {
149 | if i == 0 {
150 | n[i] = strings.ToLower(word)
151 | continue
152 | }
153 | r, size := utf8.DecodeRuneInString(word)
154 | n[i] = string(unicode.ToUpper(r)) + strings.ToLower(word[size:])
155 | }
156 | return strings.Join(n, "")
157 | }
158 |
159 | // isInitialism reports whether word is an initialism.
160 | func isInitialism(word string) (string, bool) {
161 | initialism := strings.ToUpper(word)
162 | _, ok := initialisms[initialism]
163 | return initialism, ok
164 | }
165 |
166 | // isTwoInitialisms reports whether word is two initialisms.
167 | func isTwoInitialisms(word string) (string, string, bool) {
168 | word = strings.ToUpper(word)
169 | for i := 2; i <= len(word)-2; i++ { // Shortest initialism is 2 characters long.
170 | _, ok1 := initialisms[word[:i]]
171 | _, ok2 := initialisms[word[i:]]
172 | if ok1 && ok2 {
173 | return word[:i], word[i:], true
174 | }
175 | }
176 | return "", "", false
177 | }
178 |
179 | // initialisms is the set of initialisms in the MixedCaps naming convention.
180 | // Only add entries that are highly unlikely to be non-initialisms.
181 | // For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
182 | var initialisms = map[string]struct{}{
183 | // These are the common initialisms from golint. Keep them in sync
184 | // with https://gotools.org/github.com/golang/lint#commonInitialisms.
185 | "ACL": {},
186 | "API": {},
187 | "ASCII": {},
188 | "CPU": {},
189 | "CSS": {},
190 | "DNS": {},
191 | "EOF": {},
192 | "GUID": {},
193 | "HTML": {},
194 | "HTTP": {},
195 | "HTTPS": {},
196 | "ID": {},
197 | "IP": {},
198 | "JSON": {},
199 | "LHS": {},
200 | "QPS": {},
201 | "RAM": {},
202 | "RHS": {},
203 | "RPC": {},
204 | "SLA": {},
205 | "SMTP": {},
206 | "SQL": {},
207 | "SSH": {},
208 | "TCP": {},
209 | "TLS": {},
210 | "TTL": {},
211 | "UDP": {},
212 | "UI": {},
213 | "UID": {},
214 | "UUID": {},
215 | "URI": {},
216 | "URL": {},
217 | "UTF8": {},
218 | "VM": {},
219 | "XML": {},
220 | "XMPP": {},
221 | "XSRF": {},
222 | "XSS": {},
223 |
224 | // Additional common initialisms.
225 | "RSS": {},
226 | }
227 |
228 | // isBrand reports whether word is a brand.
229 | func isBrand(word string) (string, bool) {
230 | brand, ok := brands[strings.ToLower(word)]
231 | return brand, ok
232 | }
233 |
234 | // brands is the map of brands in the MixedCaps naming convention;
235 | // see https://dmitri.shuralyov.com/idiomatic-go#for-brands-or-words-with-more-than-1-capital-letter-lowercase-all-letters.
236 | // Key is the lower case version of the brand, value is the canonical brand spelling.
237 | // Only add entries that are highly unlikely to be non-brands.
238 | var brands = map[string]string{
239 | "github": "GitHub",
240 | }
241 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/internal/jsonutil/graphql.go:
--------------------------------------------------------------------------------
1 | // Package jsonutil provides a function for decoding JSON
2 | // into a GraphQL query data structure.
3 | package jsonutil
4 |
5 | import (
6 | "bytes"
7 | "encoding/json"
8 | "errors"
9 | "fmt"
10 | "io"
11 | "reflect"
12 | "strings"
13 | )
14 |
15 | // UnmarshalGraphQL parses the JSON-encoded GraphQL response data and stores
16 | // the result in the GraphQL query data structure pointed to by v.
17 | //
18 | // The implementation is created on top of the JSON tokenizer available
19 | // in "encoding/json".Decoder.
20 | func UnmarshalGraphQL(data []byte, v interface{}) error {
21 | dec := json.NewDecoder(bytes.NewReader(data))
22 | dec.UseNumber()
23 | err := (&decoder{tokenizer: dec}).Decode(v)
24 | if err != nil {
25 | return err
26 | }
27 | tok, err := dec.Token()
28 | switch err {
29 | case io.EOF:
30 | // Expect to get io.EOF. There shouldn't be any more
31 | // tokens left after we've decoded v successfully.
32 | return nil
33 | case nil:
34 | return fmt.Errorf("invalid token '%v' after top-level value", tok)
35 | default:
36 | return err
37 | }
38 | }
39 |
40 | // decoder is a JSON decoder that performs custom unmarshaling behavior
41 | // for GraphQL query data structures. It's implemented on top of a JSON tokenizer.
42 | type decoder struct {
43 | tokenizer interface {
44 | Token() (json.Token, error)
45 | }
46 |
47 | // Stack of what part of input JSON we're in the middle of - objects, arrays.
48 | parseState []json.Delim
49 |
50 | // Stacks of values where to unmarshal.
51 | // The top of each stack is the reflect.Value where to unmarshal next JSON value.
52 | //
53 | // The reason there's more than one stack is because we might be unmarshaling
54 | // a single JSON value into multiple GraphQL fragments or embedded structs, so
55 | // we keep track of them all.
56 | vs [][]reflect.Value
57 | }
58 |
59 | // Decode decodes a single JSON value from d.tokenizer into v.
60 | func (d *decoder) Decode(v interface{}) error {
61 | rv := reflect.ValueOf(v)
62 | if rv.Kind() != reflect.Ptr {
63 | return fmt.Errorf("cannot decode into non-pointer %T", v)
64 | }
65 | d.vs = [][]reflect.Value{{rv.Elem()}}
66 | return d.decode()
67 | }
68 |
69 | // decode decodes a single JSON value from d.tokenizer into d.vs.
70 | func (d *decoder) decode() error {
71 | // The loop invariant is that the top of each d.vs stack
72 | // is where we try to unmarshal the next JSON value we see.
73 | for len(d.vs) > 0 {
74 | tok, err := d.tokenizer.Token()
75 | if err == io.EOF {
76 | return errors.New("unexpected end of JSON input")
77 | } else if err != nil {
78 | return err
79 | }
80 |
81 | switch {
82 |
83 | // Are we inside an object and seeing next key (rather than end of object)?
84 | case d.state() == '{' && tok != json.Delim('}'):
85 | key, ok := tok.(string)
86 | if !ok {
87 | return errors.New("unexpected non-key in JSON input")
88 | }
89 | someFieldExist := false
90 | for i := range d.vs {
91 | v := d.vs[i][len(d.vs[i])-1]
92 | if v.Kind() == reflect.Ptr {
93 | v = v.Elem()
94 | }
95 | var f reflect.Value
96 | if v.Kind() == reflect.Struct {
97 | f = fieldByGraphQLName(v, key)
98 | if f.IsValid() {
99 | someFieldExist = true
100 | }
101 | }
102 | d.vs[i] = append(d.vs[i], f)
103 | }
104 | if !someFieldExist {
105 | return fmt.Errorf("struct field for %q doesn't exist in any of %v places to unmarshal", key, len(d.vs))
106 | }
107 |
108 | // We've just consumed the current token, which was the key.
109 | // Read the next token, which should be the value, and let the rest of code process it.
110 | tok, err = d.tokenizer.Token()
111 | if err == io.EOF {
112 | return errors.New("unexpected end of JSON input")
113 | } else if err != nil {
114 | return err
115 | }
116 |
117 | // Are we inside an array and seeing next value (rather than end of array)?
118 | case d.state() == '[' && tok != json.Delim(']'):
119 | someSliceExist := false
120 | for i := range d.vs {
121 | v := d.vs[i][len(d.vs[i])-1]
122 | if v.Kind() == reflect.Ptr {
123 | v = v.Elem()
124 | }
125 | var f reflect.Value
126 | if v.Kind() == reflect.Slice {
127 | v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) // v = append(v, T).
128 | f = v.Index(v.Len() - 1)
129 | someSliceExist = true
130 | }
131 | d.vs[i] = append(d.vs[i], f)
132 | }
133 | if !someSliceExist {
134 | return fmt.Errorf("slice doesn't exist in any of %v places to unmarshal", len(d.vs))
135 | }
136 | }
137 |
138 | switch tok := tok.(type) {
139 | case string, json.Number, bool, nil:
140 | // Value.
141 |
142 | for i := range d.vs {
143 | v := d.vs[i][len(d.vs[i])-1]
144 | if !v.IsValid() {
145 | continue
146 | }
147 | err := unmarshalValue(tok, v)
148 | if err != nil {
149 | return err
150 | }
151 | }
152 | d.popAllVs()
153 |
154 | case json.Delim:
155 | switch tok {
156 | case '{':
157 | // Start of object.
158 |
159 | d.pushState(tok)
160 |
161 | frontier := make([]reflect.Value, len(d.vs)) // Places to look for GraphQL fragments/embedded structs.
162 | for i := range d.vs {
163 | v := d.vs[i][len(d.vs[i])-1]
164 | frontier[i] = v
165 | // TODO: Do this recursively or not? Add a test case if needed.
166 | if v.Kind() == reflect.Ptr && v.IsNil() {
167 | v.Set(reflect.New(v.Type().Elem())) // v = new(T).
168 | }
169 | }
170 | // Find GraphQL fragments/embedded structs recursively, adding to frontier
171 | // as new ones are discovered and exploring them further.
172 | for len(frontier) > 0 {
173 | v := frontier[0]
174 | frontier = frontier[1:]
175 | if v.Kind() == reflect.Ptr {
176 | v = v.Elem()
177 | }
178 | if v.Kind() != reflect.Struct {
179 | continue
180 | }
181 | for i := 0; i < v.NumField(); i++ {
182 | if isGraphQLFragment(v.Type().Field(i)) || v.Type().Field(i).Anonymous {
183 | // Add GraphQL fragment or embedded struct.
184 | d.vs = append(d.vs, []reflect.Value{v.Field(i)})
185 | frontier = append(frontier, v.Field(i))
186 | }
187 | }
188 | }
189 | case '[':
190 | // Start of array.
191 |
192 | d.pushState(tok)
193 |
194 | for i := range d.vs {
195 | v := d.vs[i][len(d.vs[i])-1]
196 | // TODO: Confirm this is needed, write a test case.
197 | //if v.Kind() == reflect.Ptr && v.IsNil() {
198 | // v.Set(reflect.New(v.Type().Elem())) // v = new(T).
199 | //}
200 |
201 | // Reset slice to empty (in case it had non-zero initial value).
202 | if v.Kind() == reflect.Ptr {
203 | v = v.Elem()
204 | }
205 | if v.Kind() != reflect.Slice {
206 | continue
207 | }
208 | v.Set(reflect.MakeSlice(v.Type(), 0, 0)) // v = make(T, 0, 0).
209 | }
210 | case '}', ']':
211 | // End of object or array.
212 | d.popAllVs()
213 | d.popState()
214 | default:
215 | return errors.New("unexpected delimiter in JSON input")
216 | }
217 | default:
218 | return errors.New("unexpected token in JSON input")
219 | }
220 | }
221 | return nil
222 | }
223 |
224 | // pushState pushes a new parse state s onto the stack.
225 | func (d *decoder) pushState(s json.Delim) {
226 | d.parseState = append(d.parseState, s)
227 | }
228 |
229 | // popState pops a parse state (already obtained) off the stack.
230 | // The stack must be non-empty.
231 | func (d *decoder) popState() {
232 | d.parseState = d.parseState[:len(d.parseState)-1]
233 | }
234 |
235 | // state reports the parse state on top of stack, or 0 if empty.
236 | func (d *decoder) state() json.Delim {
237 | if len(d.parseState) == 0 {
238 | return 0
239 | }
240 | return d.parseState[len(d.parseState)-1]
241 | }
242 |
243 | // popAllVs pops from all d.vs stacks, keeping only non-empty ones.
244 | func (d *decoder) popAllVs() {
245 | var nonEmpty [][]reflect.Value
246 | for i := range d.vs {
247 | d.vs[i] = d.vs[i][:len(d.vs[i])-1]
248 | if len(d.vs[i]) > 0 {
249 | nonEmpty = append(nonEmpty, d.vs[i])
250 | }
251 | }
252 | d.vs = nonEmpty
253 | }
254 |
255 | // fieldByGraphQLName returns an exported struct field of struct v
256 | // that matches GraphQL name, or invalid reflect.Value if none found.
257 | func fieldByGraphQLName(v reflect.Value, name string) reflect.Value {
258 | for i := 0; i < v.NumField(); i++ {
259 | if v.Type().Field(i).PkgPath != "" {
260 | // Skip unexported field.
261 | continue
262 | }
263 | if hasGraphQLName(v.Type().Field(i), name) {
264 | return v.Field(i)
265 | }
266 | }
267 | return reflect.Value{}
268 | }
269 |
270 | // hasGraphQLName reports whether struct field f has GraphQL name.
271 | func hasGraphQLName(f reflect.StructField, name string) bool {
272 | value, ok := f.Tag.Lookup("graphql")
273 | if !ok {
274 | // TODO: caseconv package is relatively slow. Optimize it, then consider using it here.
275 | //return caseconv.MixedCapsToLowerCamelCase(f.Name) == name
276 | return strings.EqualFold(f.Name, name)
277 | }
278 | value = strings.TrimSpace(value) // TODO: Parse better.
279 | if strings.HasPrefix(value, "...") {
280 | // GraphQL fragment. It doesn't have a name.
281 | return false
282 | }
283 | if i := strings.Index(value, "("); i != -1 {
284 | value = value[:i]
285 | }
286 | if i := strings.Index(value, ":"); i != -1 {
287 | value = value[:i]
288 | }
289 | return strings.TrimSpace(value) == name
290 | }
291 |
292 | // isGraphQLFragment reports whether struct field f is a GraphQL fragment.
293 | func isGraphQLFragment(f reflect.StructField) bool {
294 | value, ok := f.Tag.Lookup("graphql")
295 | if !ok {
296 | return false
297 | }
298 | value = strings.TrimSpace(value) // TODO: Parse better.
299 | return strings.HasPrefix(value, "...")
300 | }
301 |
302 | // unmarshalValue unmarshals JSON value into v.
303 | // v must be addressable and not obtained by the use of unexported
304 | // struct fields, otherwise unmarshalValue will panic.
305 | func unmarshalValue(value json.Token, v reflect.Value) error {
306 | b, err := json.Marshal(value) // TODO: Short-circuit (if profiling says it's worth it).
307 | if err != nil {
308 | return err
309 | }
310 | return json.Unmarshal(b, v.Addr().Interface())
311 | }
312 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/query.go:
--------------------------------------------------------------------------------
1 | package graphql
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "io"
7 | "reflect"
8 | "sort"
9 |
10 | "github.com/shurcooL/graphql/ident"
11 | )
12 |
13 | func constructQuery(v interface{}, variables map[string]interface{}) string {
14 | query := query(v)
15 | if len(variables) > 0 {
16 | return "query(" + queryArguments(variables) + ")" + query
17 | }
18 | return query
19 | }
20 |
21 | func constructMutation(v interface{}, variables map[string]interface{}) string {
22 | query := query(v)
23 | if len(variables) > 0 {
24 | return "mutation(" + queryArguments(variables) + ")" + query
25 | }
26 | return "mutation" + query
27 | }
28 |
29 | // queryArguments constructs a minified arguments string for variables.
30 | //
31 | // E.g., map[string]interface{}{"a": Int(123), "b": NewBoolean(true)} -> "$a:Int!$b:Boolean".
32 | func queryArguments(variables map[string]interface{}) string {
33 | // Sort keys in order to produce deterministic output for testing purposes.
34 | // TODO: If tests can be made to work with non-deterministic output, then no need to sort.
35 | keys := make([]string, 0, len(variables))
36 | for k := range variables {
37 | keys = append(keys, k)
38 | }
39 | sort.Strings(keys)
40 |
41 | var buf bytes.Buffer
42 | for _, k := range keys {
43 | io.WriteString(&buf, "$")
44 | io.WriteString(&buf, k)
45 | io.WriteString(&buf, ":")
46 | writeArgumentType(&buf, reflect.TypeOf(variables[k]), true)
47 | // Don't insert a comma here.
48 | // Commas in GraphQL are insignificant, and we want minified output.
49 | // See https://facebook.github.io/graphql/October2016/#sec-Insignificant-Commas.
50 | }
51 | return buf.String()
52 | }
53 |
54 | // writeArgumentType writes a minified GraphQL type for t to w.
55 | // value indicates whether t is a value (required) type or pointer (optional) type.
56 | // If value is true, then "!" is written at the end of t.
57 | func writeArgumentType(w io.Writer, t reflect.Type, value bool) {
58 | if t.Kind() == reflect.Ptr {
59 | // Pointer is an optional type, so no "!" at the end of the pointer's underlying type.
60 | writeArgumentType(w, t.Elem(), false)
61 | return
62 | }
63 |
64 | switch t.Kind() {
65 | case reflect.Slice, reflect.Array:
66 | // List. E.g., "[Int]".
67 | io.WriteString(w, "[")
68 | writeArgumentType(w, t.Elem(), true)
69 | io.WriteString(w, "]")
70 | default:
71 | // Named type. E.g., "Int".
72 | name := t.Name()
73 | if name == "string" { // HACK: Workaround for https://github.com/shurcooL/githubv4/issues/12.
74 | name = "ID"
75 | }
76 | io.WriteString(w, name)
77 | }
78 |
79 | if value {
80 | // Value is a required type, so add "!" to the end.
81 | io.WriteString(w, "!")
82 | }
83 | }
84 |
85 | // query uses writeQuery to recursively construct
86 | // a minified query string from the provided struct v.
87 | //
88 | // E.g., struct{Foo Int, BarBaz *Boolean} -> "{foo,barBaz}".
89 | func query(v interface{}) string {
90 | var buf bytes.Buffer
91 | writeQuery(&buf, reflect.TypeOf(v), false)
92 | return buf.String()
93 | }
94 |
95 | // writeQuery writes a minified query for t to w.
96 | // If inline is true, the struct fields of t are inlined into parent struct.
97 | func writeQuery(w io.Writer, t reflect.Type, inline bool) {
98 | switch t.Kind() {
99 | case reflect.Ptr, reflect.Slice:
100 | writeQuery(w, t.Elem(), false)
101 | case reflect.Struct:
102 | // If the type implements json.Unmarshaler, it's a scalar. Don't expand it.
103 | if reflect.PtrTo(t).Implements(jsonUnmarshaler) {
104 | return
105 | }
106 | if !inline {
107 | io.WriteString(w, "{")
108 | }
109 | for i := 0; i < t.NumField(); i++ {
110 | if i != 0 {
111 | io.WriteString(w, ",")
112 | }
113 | f := t.Field(i)
114 | value, ok := f.Tag.Lookup("graphql")
115 | inlineField := f.Anonymous && !ok
116 | if !inlineField {
117 | if ok {
118 | io.WriteString(w, value)
119 | } else {
120 | io.WriteString(w, ident.ParseMixedCaps(f.Name).ToLowerCamelCase())
121 | }
122 | }
123 | writeQuery(w, f.Type, inlineField)
124 | }
125 | if !inline {
126 | io.WriteString(w, "}")
127 | }
128 | }
129 | }
130 |
131 | var jsonUnmarshaler = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
132 |
--------------------------------------------------------------------------------
/vendor/github.com/shurcooL/graphql/scalar.go:
--------------------------------------------------------------------------------
1 | package graphql
2 |
3 | // Note: These custom types are meant to be used in queries for now.
4 | // But the plan is to switch to using native Go types (string, int, bool, time.Time, etc.).
5 | // See https://github.com/shurcooL/githubv4/issues/9 for details.
6 | //
7 | // These custom types currently provide documentation, and their use
8 | // is required for sending outbound queries. However, native Go types
9 | // can be used for unmarshaling. Once https://github.com/shurcooL/githubv4/issues/9
10 | // is resolved, native Go types can completely replace these.
11 |
12 | type (
13 | // Boolean represents true or false values.
14 | Boolean bool
15 |
16 | // Float represents signed double-precision fractional values as
17 | // specified by IEEE 754.
18 | Float float64
19 |
20 | // ID represents a unique identifier that is Base64 obfuscated. It
21 | // is often used to refetch an object or as key for a cache. The ID
22 | // type appears in a JSON response as a String; however, it is not
23 | // intended to be human-readable. When expected as an input type,
24 | // any string (such as "VXNlci0xMA==") or integer (such as 4) input
25 | // value will be accepted as an ID.
26 | ID interface{}
27 |
28 | // Int represents non-fractional signed whole numeric values.
29 | // Int can represent values between -(2^31) and 2^31 - 1.
30 | Int int32
31 |
32 | // String represents textual data as UTF-8 character sequences.
33 | // This type is most often used by GraphQL to represent free-form
34 | // human-readable text.
35 | String string
36 | )
37 |
38 | // NewBoolean is a helper to make a new *Boolean.
39 | func NewBoolean(v Boolean) *Boolean { return &v }
40 |
41 | // NewFloat is a helper to make a new *Float.
42 | func NewFloat(v Float) *Float { return &v }
43 |
44 | // NewID is a helper to make a new *ID.
45 | func NewID(v ID) *ID { return &v }
46 |
47 | // NewInt is a helper to make a new *Int.
48 | func NewInt(v Int) *Int { return &v }
49 |
50 | // NewString is a helper to make a new *String.
51 | func NewString(v String) *String { return &v }
52 |
--------------------------------------------------------------------------------
/vendor/golang.org/x/net/AUTHORS:
--------------------------------------------------------------------------------
1 | # This source code refers to The Go Authors for copyright purposes.
2 | # The master list of authors is in the main Go distribution,
3 | # visible at http://tip.golang.org/AUTHORS.
4 |
--------------------------------------------------------------------------------
/vendor/golang.org/x/net/CONTRIBUTORS:
--------------------------------------------------------------------------------
1 | # This source code was written by the Go contributors.
2 | # The master list of contributors is in the main Go distribution,
3 | # visible at http://tip.golang.org/CONTRIBUTORS.
4 |
--------------------------------------------------------------------------------
/vendor/golang.org/x/net/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2009 The Go Authors. All rights reserved.
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are
5 | met:
6 |
7 | * Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 | * Redistributions in binary form must reproduce the above
10 | copyright notice, this list of conditions and the following disclaimer
11 | in the documentation and/or other materials provided with the
12 | distribution.
13 | * Neither the name of Google Inc. nor the names of its
14 | contributors may be used to endorse or promote products derived from
15 | this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/vendor/golang.org/x/net/PATENTS:
--------------------------------------------------------------------------------
1 | Additional IP Rights Grant (Patents)
2 |
3 | "This implementation" means the copyrightable works distributed by
4 | Google as part of the Go project.
5 |
6 | Google hereby grants to You a perpetual, worldwide, non-exclusive,
7 | no-charge, royalty-free, irrevocable (except as stated in this section)
8 | patent license to make, have made, use, offer to sell, sell, import,
9 | transfer and otherwise run, modify and propagate the contents of this
10 | implementation of Go, where such license applies only to those patent
11 | claims, both currently owned or controlled by Google and acquired in
12 | the future, licensable by Google that are necessarily infringed by this
13 | implementation of Go. This grant does not include claims that would be
14 | infringed only as a consequence of further modification of this
15 | implementation. If you or your agent or exclusive licensee institute or
16 | order or agree to the institution of patent litigation against any
17 | entity (including a cross-claim or counterclaim in a lawsuit) alleging
18 | that this implementation of Go or any code incorporated within this
19 | implementation of Go constitutes direct or contributory patent
20 | infringement, or inducement of patent infringement, then any patent
21 | rights granted to you under this License for this implementation of Go
22 | shall terminate as of the date such litigation is filed.
23 |
--------------------------------------------------------------------------------
/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go:
--------------------------------------------------------------------------------
1 | // Copyright 2016 The Go Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | // Package ctxhttp provides helper functions for performing context-aware HTTP requests.
6 | package ctxhttp // import "golang.org/x/net/context/ctxhttp"
7 |
8 | import (
9 | "context"
10 | "io"
11 | "net/http"
12 | "net/url"
13 | "strings"
14 | )
15 |
16 | // Do sends an HTTP request with the provided http.Client and returns
17 | // an HTTP response.
18 | //
19 | // If the client is nil, http.DefaultClient is used.
20 | //
21 | // The provided ctx must be non-nil. If it is canceled or times out,
22 | // ctx.Err() will be returned.
23 | func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
24 | if client == nil {
25 | client = http.DefaultClient
26 | }
27 | resp, err := client.Do(req.WithContext(ctx))
28 | // If we got an error, and the context has been canceled,
29 | // the context's error is probably more useful.
30 | if err != nil {
31 | select {
32 | case <-ctx.Done():
33 | err = ctx.Err()
34 | default:
35 | }
36 | }
37 | return resp, err
38 | }
39 |
40 | // Get issues a GET request via the Do function.
41 | func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
42 | req, err := http.NewRequest("GET", url, nil)
43 | if err != nil {
44 | return nil, err
45 | }
46 | return Do(ctx, client, req)
47 | }
48 |
49 | // Head issues a HEAD request via the Do function.
50 | func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
51 | req, err := http.NewRequest("HEAD", url, nil)
52 | if err != nil {
53 | return nil, err
54 | }
55 | return Do(ctx, client, req)
56 | }
57 |
58 | // Post issues a POST request via the Do function.
59 | func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
60 | req, err := http.NewRequest("POST", url, body)
61 | if err != nil {
62 | return nil, err
63 | }
64 | req.Header.Set("Content-Type", bodyType)
65 | return Do(ctx, client, req)
66 | }
67 |
68 | // PostForm issues a POST request via the Do function.
69 | func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
70 | return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
71 | }
72 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 |
3 | go:
4 | - "1.4.x"
5 | - "1.5.x"
6 | - "1.6.x"
7 | - "1.7.x"
8 | - "1.8.x"
9 | - "1.9.x"
10 | - "1.10.x"
11 | - "1.11.x"
12 | - "1.12.x"
13 | - "1.13.x"
14 | - "tip"
15 |
16 | go_import_path: gopkg.in/yaml.v2
17 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/LICENSE.libyaml:
--------------------------------------------------------------------------------
1 | The following files were ported to Go from C files of libyaml, and thus
2 | are still covered by their original copyright and license:
3 |
4 | apic.go
5 | emitterc.go
6 | parserc.go
7 | readerc.go
8 | scannerc.go
9 | writerc.go
10 | yamlh.go
11 | yamlprivateh.go
12 |
13 | Copyright (c) 2006 Kirill Simonov
14 |
15 | Permission is hereby granted, free of charge, to any person obtaining a copy of
16 | this software and associated documentation files (the "Software"), to deal in
17 | the Software without restriction, including without limitation the rights to
18 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
19 | of the Software, and to permit persons to whom the Software is furnished to do
20 | so, subject to the following conditions:
21 |
22 | The above copyright notice and this permission notice shall be included in all
23 | copies or substantial portions of the Software.
24 |
25 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 | SOFTWARE.
32 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/NOTICE:
--------------------------------------------------------------------------------
1 | Copyright 2011-2016 Canonical Ltd.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/README.md:
--------------------------------------------------------------------------------
1 | # YAML support for the Go language
2 |
3 | Introduction
4 | ------------
5 |
6 | The yaml package enables Go programs to comfortably encode and decode YAML
7 | values. It was developed within [Canonical](https://www.canonical.com) as
8 | part of the [juju](https://juju.ubuntu.com) project, and is based on a
9 | pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
10 | C library to parse and generate YAML data quickly and reliably.
11 |
12 | Compatibility
13 | -------------
14 |
15 | The yaml package supports most of YAML 1.1 and 1.2, including support for
16 | anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
17 | implemented, and base-60 floats from YAML 1.1 are purposefully not
18 | supported since they're a poor design and are gone in YAML 1.2.
19 |
20 | Installation and usage
21 | ----------------------
22 |
23 | The import path for the package is *gopkg.in/yaml.v2*.
24 |
25 | To install it, run:
26 |
27 | go get gopkg.in/yaml.v2
28 |
29 | API documentation
30 | -----------------
31 |
32 | If opened in a browser, the import path itself leads to the API documentation:
33 |
34 | * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
35 |
36 | API stability
37 | -------------
38 |
39 | The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
40 |
41 |
42 | License
43 | -------
44 |
45 | The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
46 |
47 |
48 | Example
49 | -------
50 |
51 | ```Go
52 | package main
53 |
54 | import (
55 | "fmt"
56 | "log"
57 |
58 | "gopkg.in/yaml.v2"
59 | )
60 |
61 | var data = `
62 | a: Easy!
63 | b:
64 | c: 2
65 | d: [3, 4]
66 | `
67 |
68 | // Note: struct fields must be public in order for unmarshal to
69 | // correctly populate the data.
70 | type T struct {
71 | A string
72 | B struct {
73 | RenamedC int `yaml:"c"`
74 | D []int `yaml:",flow"`
75 | }
76 | }
77 |
78 | func main() {
79 | t := T{}
80 |
81 | err := yaml.Unmarshal([]byte(data), &t)
82 | if err != nil {
83 | log.Fatalf("error: %v", err)
84 | }
85 | fmt.Printf("--- t:\n%v\n\n", t)
86 |
87 | d, err := yaml.Marshal(&t)
88 | if err != nil {
89 | log.Fatalf("error: %v", err)
90 | }
91 | fmt.Printf("--- t dump:\n%s\n\n", string(d))
92 |
93 | m := make(map[interface{}]interface{})
94 |
95 | err = yaml.Unmarshal([]byte(data), &m)
96 | if err != nil {
97 | log.Fatalf("error: %v", err)
98 | }
99 | fmt.Printf("--- m:\n%v\n\n", m)
100 |
101 | d, err = yaml.Marshal(&m)
102 | if err != nil {
103 | log.Fatalf("error: %v", err)
104 | }
105 | fmt.Printf("--- m dump:\n%s\n\n", string(d))
106 | }
107 | ```
108 |
109 | This example will generate the following output:
110 |
111 | ```
112 | --- t:
113 | {Easy! {2 [3 4]}}
114 |
115 | --- t dump:
116 | a: Easy!
117 | b:
118 | c: 2
119 | d: [3, 4]
120 |
121 |
122 | --- m:
123 | map[a:Easy! b:map[c:2 d:[3 4]]]
124 |
125 | --- m dump:
126 | a: Easy!
127 | b:
128 | c: 2
129 | d:
130 | - 3
131 | - 4
132 | ```
133 |
134 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/encode.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "encoding"
5 | "fmt"
6 | "io"
7 | "reflect"
8 | "regexp"
9 | "sort"
10 | "strconv"
11 | "strings"
12 | "time"
13 | "unicode/utf8"
14 | )
15 |
16 | // jsonNumber is the interface of the encoding/json.Number datatype.
17 | // Repeating the interface here avoids a dependency on encoding/json, and also
18 | // supports other libraries like jsoniter, which use a similar datatype with
19 | // the same interface. Detecting this interface is useful when dealing with
20 | // structures containing json.Number, which is a string under the hood. The
21 | // encoder should prefer the use of Int64(), Float64() and string(), in that
22 | // order, when encoding this type.
23 | type jsonNumber interface {
24 | Float64() (float64, error)
25 | Int64() (int64, error)
26 | String() string
27 | }
28 |
29 | type encoder struct {
30 | emitter yaml_emitter_t
31 | event yaml_event_t
32 | out []byte
33 | flow bool
34 | // doneInit holds whether the initial stream_start_event has been
35 | // emitted.
36 | doneInit bool
37 | }
38 |
39 | func newEncoder() *encoder {
40 | e := &encoder{}
41 | yaml_emitter_initialize(&e.emitter)
42 | yaml_emitter_set_output_string(&e.emitter, &e.out)
43 | yaml_emitter_set_unicode(&e.emitter, true)
44 | return e
45 | }
46 |
47 | func newEncoderWithWriter(w io.Writer) *encoder {
48 | e := &encoder{}
49 | yaml_emitter_initialize(&e.emitter)
50 | yaml_emitter_set_output_writer(&e.emitter, w)
51 | yaml_emitter_set_unicode(&e.emitter, true)
52 | return e
53 | }
54 |
55 | func (e *encoder) init() {
56 | if e.doneInit {
57 | return
58 | }
59 | yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
60 | e.emit()
61 | e.doneInit = true
62 | }
63 |
64 | func (e *encoder) finish() {
65 | e.emitter.open_ended = false
66 | yaml_stream_end_event_initialize(&e.event)
67 | e.emit()
68 | }
69 |
70 | func (e *encoder) destroy() {
71 | yaml_emitter_delete(&e.emitter)
72 | }
73 |
74 | func (e *encoder) emit() {
75 | // This will internally delete the e.event value.
76 | e.must(yaml_emitter_emit(&e.emitter, &e.event))
77 | }
78 |
79 | func (e *encoder) must(ok bool) {
80 | if !ok {
81 | msg := e.emitter.problem
82 | if msg == "" {
83 | msg = "unknown problem generating YAML content"
84 | }
85 | failf("%s", msg)
86 | }
87 | }
88 |
89 | func (e *encoder) marshalDoc(tag string, in reflect.Value) {
90 | e.init()
91 | yaml_document_start_event_initialize(&e.event, nil, nil, true)
92 | e.emit()
93 | e.marshal(tag, in)
94 | yaml_document_end_event_initialize(&e.event, true)
95 | e.emit()
96 | }
97 |
98 | func (e *encoder) marshal(tag string, in reflect.Value) {
99 | if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
100 | e.nilv()
101 | return
102 | }
103 | iface := in.Interface()
104 | switch m := iface.(type) {
105 | case jsonNumber:
106 | integer, err := m.Int64()
107 | if err == nil {
108 | // In this case the json.Number is a valid int64
109 | in = reflect.ValueOf(integer)
110 | break
111 | }
112 | float, err := m.Float64()
113 | if err == nil {
114 | // In this case the json.Number is a valid float64
115 | in = reflect.ValueOf(float)
116 | break
117 | }
118 | // fallback case - no number could be obtained
119 | in = reflect.ValueOf(m.String())
120 | case time.Time, *time.Time:
121 | // Although time.Time implements TextMarshaler,
122 | // we don't want to treat it as a string for YAML
123 | // purposes because YAML has special support for
124 | // timestamps.
125 | case Marshaler:
126 | v, err := m.MarshalYAML()
127 | if err != nil {
128 | fail(err)
129 | }
130 | if v == nil {
131 | e.nilv()
132 | return
133 | }
134 | in = reflect.ValueOf(v)
135 | case encoding.TextMarshaler:
136 | text, err := m.MarshalText()
137 | if err != nil {
138 | fail(err)
139 | }
140 | in = reflect.ValueOf(string(text))
141 | case nil:
142 | e.nilv()
143 | return
144 | }
145 | switch in.Kind() {
146 | case reflect.Interface:
147 | e.marshal(tag, in.Elem())
148 | case reflect.Map:
149 | e.mapv(tag, in)
150 | case reflect.Ptr:
151 | if in.Type() == ptrTimeType {
152 | e.timev(tag, in.Elem())
153 | } else {
154 | e.marshal(tag, in.Elem())
155 | }
156 | case reflect.Struct:
157 | if in.Type() == timeType {
158 | e.timev(tag, in)
159 | } else {
160 | e.structv(tag, in)
161 | }
162 | case reflect.Slice, reflect.Array:
163 | if in.Type().Elem() == mapItemType {
164 | e.itemsv(tag, in)
165 | } else {
166 | e.slicev(tag, in)
167 | }
168 | case reflect.String:
169 | e.stringv(tag, in)
170 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
171 | if in.Type() == durationType {
172 | e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
173 | } else {
174 | e.intv(tag, in)
175 | }
176 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
177 | e.uintv(tag, in)
178 | case reflect.Float32, reflect.Float64:
179 | e.floatv(tag, in)
180 | case reflect.Bool:
181 | e.boolv(tag, in)
182 | default:
183 | panic("cannot marshal type: " + in.Type().String())
184 | }
185 | }
186 |
187 | func (e *encoder) mapv(tag string, in reflect.Value) {
188 | e.mappingv(tag, func() {
189 | keys := keyList(in.MapKeys())
190 | sort.Sort(keys)
191 | for _, k := range keys {
192 | e.marshal("", k)
193 | e.marshal("", in.MapIndex(k))
194 | }
195 | })
196 | }
197 |
198 | func (e *encoder) itemsv(tag string, in reflect.Value) {
199 | e.mappingv(tag, func() {
200 | slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
201 | for _, item := range slice {
202 | e.marshal("", reflect.ValueOf(item.Key))
203 | e.marshal("", reflect.ValueOf(item.Value))
204 | }
205 | })
206 | }
207 |
208 | func (e *encoder) structv(tag string, in reflect.Value) {
209 | sinfo, err := getStructInfo(in.Type())
210 | if err != nil {
211 | panic(err)
212 | }
213 | e.mappingv(tag, func() {
214 | for _, info := range sinfo.FieldsList {
215 | var value reflect.Value
216 | if info.Inline == nil {
217 | value = in.Field(info.Num)
218 | } else {
219 | value = in.FieldByIndex(info.Inline)
220 | }
221 | if info.OmitEmpty && isZero(value) {
222 | continue
223 | }
224 | e.marshal("", reflect.ValueOf(info.Key))
225 | e.flow = info.Flow
226 | e.marshal("", value)
227 | }
228 | if sinfo.InlineMap >= 0 {
229 | m := in.Field(sinfo.InlineMap)
230 | if m.Len() > 0 {
231 | e.flow = false
232 | keys := keyList(m.MapKeys())
233 | sort.Sort(keys)
234 | for _, k := range keys {
235 | if _, found := sinfo.FieldsMap[k.String()]; found {
236 | panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
237 | }
238 | e.marshal("", k)
239 | e.flow = false
240 | e.marshal("", m.MapIndex(k))
241 | }
242 | }
243 | }
244 | })
245 | }
246 |
247 | func (e *encoder) mappingv(tag string, f func()) {
248 | implicit := tag == ""
249 | style := yaml_BLOCK_MAPPING_STYLE
250 | if e.flow {
251 | e.flow = false
252 | style = yaml_FLOW_MAPPING_STYLE
253 | }
254 | yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
255 | e.emit()
256 | f()
257 | yaml_mapping_end_event_initialize(&e.event)
258 | e.emit()
259 | }
260 |
261 | func (e *encoder) slicev(tag string, in reflect.Value) {
262 | implicit := tag == ""
263 | style := yaml_BLOCK_SEQUENCE_STYLE
264 | if e.flow {
265 | e.flow = false
266 | style = yaml_FLOW_SEQUENCE_STYLE
267 | }
268 | e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
269 | e.emit()
270 | n := in.Len()
271 | for i := 0; i < n; i++ {
272 | e.marshal("", in.Index(i))
273 | }
274 | e.must(yaml_sequence_end_event_initialize(&e.event))
275 | e.emit()
276 | }
277 |
278 | // isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
279 | //
280 | // The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
281 | // in YAML 1.2 and by this package, but these should be marshalled quoted for
282 | // the time being for compatibility with other parsers.
283 | func isBase60Float(s string) (result bool) {
284 | // Fast path.
285 | if s == "" {
286 | return false
287 | }
288 | c := s[0]
289 | if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
290 | return false
291 | }
292 | // Do the full match.
293 | return base60float.MatchString(s)
294 | }
295 |
296 | // From http://yaml.org/type/float.html, except the regular expression there
297 | // is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
298 | var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
299 |
300 | func (e *encoder) stringv(tag string, in reflect.Value) {
301 | var style yaml_scalar_style_t
302 | s := in.String()
303 | canUsePlain := true
304 | switch {
305 | case !utf8.ValidString(s):
306 | if tag == yaml_BINARY_TAG {
307 | failf("explicitly tagged !!binary data must be base64-encoded")
308 | }
309 | if tag != "" {
310 | failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
311 | }
312 | // It can't be encoded directly as YAML so use a binary tag
313 | // and encode it as base64.
314 | tag = yaml_BINARY_TAG
315 | s = encodeBase64(s)
316 | case tag == "":
317 | // Check to see if it would resolve to a specific
318 | // tag when encoded unquoted. If it doesn't,
319 | // there's no need to quote it.
320 | rtag, _ := resolve("", s)
321 | canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
322 | }
323 | // Note: it's possible for user code to emit invalid YAML
324 | // if they explicitly specify a tag and a string containing
325 | // text that's incompatible with that tag.
326 | switch {
327 | case strings.Contains(s, "\n"):
328 | style = yaml_LITERAL_SCALAR_STYLE
329 | case canUsePlain:
330 | style = yaml_PLAIN_SCALAR_STYLE
331 | default:
332 | style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
333 | }
334 | e.emitScalar(s, "", tag, style)
335 | }
336 |
337 | func (e *encoder) boolv(tag string, in reflect.Value) {
338 | var s string
339 | if in.Bool() {
340 | s = "true"
341 | } else {
342 | s = "false"
343 | }
344 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
345 | }
346 |
347 | func (e *encoder) intv(tag string, in reflect.Value) {
348 | s := strconv.FormatInt(in.Int(), 10)
349 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
350 | }
351 |
352 | func (e *encoder) uintv(tag string, in reflect.Value) {
353 | s := strconv.FormatUint(in.Uint(), 10)
354 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
355 | }
356 |
357 | func (e *encoder) timev(tag string, in reflect.Value) {
358 | t := in.Interface().(time.Time)
359 | s := t.Format(time.RFC3339Nano)
360 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
361 | }
362 |
363 | func (e *encoder) floatv(tag string, in reflect.Value) {
364 | // Issue #352: When formatting, use the precision of the underlying value
365 | precision := 64
366 | if in.Kind() == reflect.Float32 {
367 | precision = 32
368 | }
369 |
370 | s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
371 | switch s {
372 | case "+Inf":
373 | s = ".inf"
374 | case "-Inf":
375 | s = "-.inf"
376 | case "NaN":
377 | s = ".nan"
378 | }
379 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
380 | }
381 |
382 | func (e *encoder) nilv() {
383 | e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
384 | }
385 |
386 | func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
387 | implicit := tag == ""
388 | e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
389 | e.emit()
390 | }
391 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/go.mod:
--------------------------------------------------------------------------------
1 | module "gopkg.in/yaml.v2"
2 |
3 | require (
4 | "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
5 | )
6 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/resolve.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "encoding/base64"
5 | "math"
6 | "regexp"
7 | "strconv"
8 | "strings"
9 | "time"
10 | )
11 |
12 | type resolveMapItem struct {
13 | value interface{}
14 | tag string
15 | }
16 |
17 | var resolveTable = make([]byte, 256)
18 | var resolveMap = make(map[string]resolveMapItem)
19 |
20 | func init() {
21 | t := resolveTable
22 | t[int('+')] = 'S' // Sign
23 | t[int('-')] = 'S'
24 | for _, c := range "0123456789" {
25 | t[int(c)] = 'D' // Digit
26 | }
27 | for _, c := range "yYnNtTfFoO~" {
28 | t[int(c)] = 'M' // In map
29 | }
30 | t[int('.')] = '.' // Float (potentially in map)
31 |
32 | var resolveMapList = []struct {
33 | v interface{}
34 | tag string
35 | l []string
36 | }{
37 | {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
38 | {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
39 | {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
40 | {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
41 | {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
42 | {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
43 | {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
44 | {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
45 | {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
46 | {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
47 | {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
48 | {"<<", yaml_MERGE_TAG, []string{"<<"}},
49 | }
50 |
51 | m := resolveMap
52 | for _, item := range resolveMapList {
53 | for _, s := range item.l {
54 | m[s] = resolveMapItem{item.v, item.tag}
55 | }
56 | }
57 | }
58 |
59 | const longTagPrefix = "tag:yaml.org,2002:"
60 |
61 | func shortTag(tag string) string {
62 | // TODO This can easily be made faster and produce less garbage.
63 | if strings.HasPrefix(tag, longTagPrefix) {
64 | return "!!" + tag[len(longTagPrefix):]
65 | }
66 | return tag
67 | }
68 |
69 | func longTag(tag string) string {
70 | if strings.HasPrefix(tag, "!!") {
71 | return longTagPrefix + tag[2:]
72 | }
73 | return tag
74 | }
75 |
76 | func resolvableTag(tag string) bool {
77 | switch tag {
78 | case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
79 | return true
80 | }
81 | return false
82 | }
83 |
84 | var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
85 |
86 | func resolve(tag string, in string) (rtag string, out interface{}) {
87 | if !resolvableTag(tag) {
88 | return tag, in
89 | }
90 |
91 | defer func() {
92 | switch tag {
93 | case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
94 | return
95 | case yaml_FLOAT_TAG:
96 | if rtag == yaml_INT_TAG {
97 | switch v := out.(type) {
98 | case int64:
99 | rtag = yaml_FLOAT_TAG
100 | out = float64(v)
101 | return
102 | case int:
103 | rtag = yaml_FLOAT_TAG
104 | out = float64(v)
105 | return
106 | }
107 | }
108 | }
109 | failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
110 | }()
111 |
112 | // Any data is accepted as a !!str or !!binary.
113 | // Otherwise, the prefix is enough of a hint about what it might be.
114 | hint := byte('N')
115 | if in != "" {
116 | hint = resolveTable[in[0]]
117 | }
118 | if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
119 | // Handle things we can lookup in a map.
120 | if item, ok := resolveMap[in]; ok {
121 | return item.tag, item.value
122 | }
123 |
124 | // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
125 | // are purposefully unsupported here. They're still quoted on
126 | // the way out for compatibility with other parser, though.
127 |
128 | switch hint {
129 | case 'M':
130 | // We've already checked the map above.
131 |
132 | case '.':
133 | // Not in the map, so maybe a normal float.
134 | floatv, err := strconv.ParseFloat(in, 64)
135 | if err == nil {
136 | return yaml_FLOAT_TAG, floatv
137 | }
138 |
139 | case 'D', 'S':
140 | // Int, float, or timestamp.
141 | // Only try values as a timestamp if the value is unquoted or there's an explicit
142 | // !!timestamp tag.
143 | if tag == "" || tag == yaml_TIMESTAMP_TAG {
144 | t, ok := parseTimestamp(in)
145 | if ok {
146 | return yaml_TIMESTAMP_TAG, t
147 | }
148 | }
149 |
150 | plain := strings.Replace(in, "_", "", -1)
151 | intv, err := strconv.ParseInt(plain, 0, 64)
152 | if err == nil {
153 | if intv == int64(int(intv)) {
154 | return yaml_INT_TAG, int(intv)
155 | } else {
156 | return yaml_INT_TAG, intv
157 | }
158 | }
159 | uintv, err := strconv.ParseUint(plain, 0, 64)
160 | if err == nil {
161 | return yaml_INT_TAG, uintv
162 | }
163 | if yamlStyleFloat.MatchString(plain) {
164 | floatv, err := strconv.ParseFloat(plain, 64)
165 | if err == nil {
166 | return yaml_FLOAT_TAG, floatv
167 | }
168 | }
169 | if strings.HasPrefix(plain, "0b") {
170 | intv, err := strconv.ParseInt(plain[2:], 2, 64)
171 | if err == nil {
172 | if intv == int64(int(intv)) {
173 | return yaml_INT_TAG, int(intv)
174 | } else {
175 | return yaml_INT_TAG, intv
176 | }
177 | }
178 | uintv, err := strconv.ParseUint(plain[2:], 2, 64)
179 | if err == nil {
180 | return yaml_INT_TAG, uintv
181 | }
182 | } else if strings.HasPrefix(plain, "-0b") {
183 | intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
184 | if err == nil {
185 | if true || intv == int64(int(intv)) {
186 | return yaml_INT_TAG, int(intv)
187 | } else {
188 | return yaml_INT_TAG, intv
189 | }
190 | }
191 | }
192 | default:
193 | panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
194 | }
195 | }
196 | return yaml_STR_TAG, in
197 | }
198 |
199 | // encodeBase64 encodes s as base64 that is broken up into multiple lines
200 | // as appropriate for the resulting length.
201 | func encodeBase64(s string) string {
202 | const lineLen = 70
203 | encLen := base64.StdEncoding.EncodedLen(len(s))
204 | lines := encLen/lineLen + 1
205 | buf := make([]byte, encLen*2+lines)
206 | in := buf[0:encLen]
207 | out := buf[encLen:]
208 | base64.StdEncoding.Encode(in, []byte(s))
209 | k := 0
210 | for i := 0; i < len(in); i += lineLen {
211 | j := i + lineLen
212 | if j > len(in) {
213 | j = len(in)
214 | }
215 | k += copy(out[k:], in[i:j])
216 | if lines > 1 {
217 | out[k] = '\n'
218 | k++
219 | }
220 | }
221 | return string(out[:k])
222 | }
223 |
224 | // This is a subset of the formats allowed by the regular expression
225 | // defined at http://yaml.org/type/timestamp.html.
226 | var allowedTimestampFormats = []string{
227 | "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
228 | "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
229 | "2006-1-2 15:4:5.999999999", // space separated with no time zone
230 | "2006-1-2", // date only
231 | // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
232 | // from the set of examples.
233 | }
234 |
235 | // parseTimestamp parses s as a timestamp string and
236 | // returns the timestamp and reports whether it succeeded.
237 | // Timestamp formats are defined at http://yaml.org/type/timestamp.html
238 | func parseTimestamp(s string) (time.Time, bool) {
239 | // TODO write code to check all the formats supported by
240 | // http://yaml.org/type/timestamp.html instead of using time.Parse.
241 |
242 | // Quick check: all date formats start with YYYY-.
243 | i := 0
244 | for ; i < len(s); i++ {
245 | if c := s[i]; c < '0' || c > '9' {
246 | break
247 | }
248 | }
249 | if i != 4 || i == len(s) || s[i] != '-' {
250 | return time.Time{}, false
251 | }
252 | for _, format := range allowedTimestampFormats {
253 | if t, err := time.Parse(format, s); err == nil {
254 | return t, true
255 | }
256 | }
257 | return time.Time{}, false
258 | }
259 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/sorter.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | import (
4 | "reflect"
5 | "unicode"
6 | )
7 |
8 | type keyList []reflect.Value
9 |
10 | func (l keyList) Len() int { return len(l) }
11 | func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
12 | func (l keyList) Less(i, j int) bool {
13 | a := l[i]
14 | b := l[j]
15 | ak := a.Kind()
16 | bk := b.Kind()
17 | for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
18 | a = a.Elem()
19 | ak = a.Kind()
20 | }
21 | for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
22 | b = b.Elem()
23 | bk = b.Kind()
24 | }
25 | af, aok := keyFloat(a)
26 | bf, bok := keyFloat(b)
27 | if aok && bok {
28 | if af != bf {
29 | return af < bf
30 | }
31 | if ak != bk {
32 | return ak < bk
33 | }
34 | return numLess(a, b)
35 | }
36 | if ak != reflect.String || bk != reflect.String {
37 | return ak < bk
38 | }
39 | ar, br := []rune(a.String()), []rune(b.String())
40 | for i := 0; i < len(ar) && i < len(br); i++ {
41 | if ar[i] == br[i] {
42 | continue
43 | }
44 | al := unicode.IsLetter(ar[i])
45 | bl := unicode.IsLetter(br[i])
46 | if al && bl {
47 | return ar[i] < br[i]
48 | }
49 | if al || bl {
50 | return bl
51 | }
52 | var ai, bi int
53 | var an, bn int64
54 | if ar[i] == '0' || br[i] == '0' {
55 | for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
56 | if ar[j] != '0' {
57 | an = 1
58 | bn = 1
59 | break
60 | }
61 | }
62 | }
63 | for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
64 | an = an*10 + int64(ar[ai]-'0')
65 | }
66 | for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
67 | bn = bn*10 + int64(br[bi]-'0')
68 | }
69 | if an != bn {
70 | return an < bn
71 | }
72 | if ai != bi {
73 | return ai < bi
74 | }
75 | return ar[i] < br[i]
76 | }
77 | return len(ar) < len(br)
78 | }
79 |
80 | // keyFloat returns a float value for v if it is a number/bool
81 | // and whether it is a number/bool or not.
82 | func keyFloat(v reflect.Value) (f float64, ok bool) {
83 | switch v.Kind() {
84 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
85 | return float64(v.Int()), true
86 | case reflect.Float32, reflect.Float64:
87 | return v.Float(), true
88 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
89 | return float64(v.Uint()), true
90 | case reflect.Bool:
91 | if v.Bool() {
92 | return 1, true
93 | }
94 | return 0, true
95 | }
96 | return 0, false
97 | }
98 |
99 | // numLess returns whether a < b.
100 | // a and b must necessarily have the same kind.
101 | func numLess(a, b reflect.Value) bool {
102 | switch a.Kind() {
103 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
104 | return a.Int() < b.Int()
105 | case reflect.Float32, reflect.Float64:
106 | return a.Float() < b.Float()
107 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
108 | return a.Uint() < b.Uint()
109 | case reflect.Bool:
110 | return !a.Bool() && b.Bool()
111 | }
112 | panic("not a number")
113 | }
114 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/writerc.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | // Set the writer error and return false.
4 | func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
5 | emitter.error = yaml_WRITER_ERROR
6 | emitter.problem = problem
7 | return false
8 | }
9 |
10 | // Flush the output buffer.
11 | func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
12 | if emitter.write_handler == nil {
13 | panic("write handler not set")
14 | }
15 |
16 | // Check if the buffer is empty.
17 | if emitter.buffer_pos == 0 {
18 | return true
19 | }
20 |
21 | if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
22 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
23 | }
24 | emitter.buffer_pos = 0
25 | return true
26 | }
27 |
--------------------------------------------------------------------------------
/vendor/gopkg.in/yaml.v2/yamlprivateh.go:
--------------------------------------------------------------------------------
1 | package yaml
2 |
3 | const (
4 | // The size of the input raw buffer.
5 | input_raw_buffer_size = 512
6 |
7 | // The size of the input buffer.
8 | // It should be possible to decode the whole raw buffer.
9 | input_buffer_size = input_raw_buffer_size * 3
10 |
11 | // The size of the output buffer.
12 | output_buffer_size = 128
13 |
14 | // The size of the output raw buffer.
15 | // It should be possible to encode the whole output buffer.
16 | output_raw_buffer_size = (output_buffer_size*2 + 2)
17 |
18 | // The size of other stacks and queues.
19 | initial_stack_size = 16
20 | initial_queue_size = 16
21 | initial_string_size = 16
22 | )
23 |
24 | // Check if the character at the specified position is an alphabetical
25 | // character, a digit, '_', or '-'.
26 | func is_alpha(b []byte, i int) bool {
27 | return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
28 | }
29 |
30 | // Check if the character at the specified position is a digit.
31 | func is_digit(b []byte, i int) bool {
32 | return b[i] >= '0' && b[i] <= '9'
33 | }
34 |
35 | // Get the value of a digit.
36 | func as_digit(b []byte, i int) int {
37 | return int(b[i]) - '0'
38 | }
39 |
40 | // Check if the character at the specified position is a hex-digit.
41 | func is_hex(b []byte, i int) bool {
42 | return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
43 | }
44 |
45 | // Get the value of a hex-digit.
46 | func as_hex(b []byte, i int) int {
47 | bi := b[i]
48 | if bi >= 'A' && bi <= 'F' {
49 | return int(bi) - 'A' + 10
50 | }
51 | if bi >= 'a' && bi <= 'f' {
52 | return int(bi) - 'a' + 10
53 | }
54 | return int(bi) - '0'
55 | }
56 |
57 | // Check if the character is ASCII.
58 | func is_ascii(b []byte, i int) bool {
59 | return b[i] <= 0x7F
60 | }
61 |
62 | // Check if the character at the start of the buffer can be printed unescaped.
63 | func is_printable(b []byte, i int) bool {
64 | return ((b[i] == 0x0A) || // . == #x0A
65 | (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
66 | (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
67 | (b[i] > 0xC2 && b[i] < 0xED) ||
68 | (b[i] == 0xED && b[i+1] < 0xA0) ||
69 | (b[i] == 0xEE) ||
70 | (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
71 | !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
72 | !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
73 | }
74 |
75 | // Check if the character at the specified position is NUL.
76 | func is_z(b []byte, i int) bool {
77 | return b[i] == 0x00
78 | }
79 |
80 | // Check if the beginning of the buffer is a BOM.
81 | func is_bom(b []byte, i int) bool {
82 | return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
83 | }
84 |
85 | // Check if the character at the specified position is space.
86 | func is_space(b []byte, i int) bool {
87 | return b[i] == ' '
88 | }
89 |
90 | // Check if the character at the specified position is tab.
91 | func is_tab(b []byte, i int) bool {
92 | return b[i] == '\t'
93 | }
94 |
95 | // Check if the character at the specified position is blank (space or tab).
96 | func is_blank(b []byte, i int) bool {
97 | //return is_space(b, i) || is_tab(b, i)
98 | return b[i] == ' ' || b[i] == '\t'
99 | }
100 |
101 | // Check if the character at the specified position is a line break.
102 | func is_break(b []byte, i int) bool {
103 | return (b[i] == '\r' || // CR (#xD)
104 | b[i] == '\n' || // LF (#xA)
105 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
106 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
107 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
108 | }
109 |
110 | func is_crlf(b []byte, i int) bool {
111 | return b[i] == '\r' && b[i+1] == '\n'
112 | }
113 |
114 | // Check if the character is a line break or NUL.
115 | func is_breakz(b []byte, i int) bool {
116 | //return is_break(b, i) || is_z(b, i)
117 | return ( // is_break:
118 | b[i] == '\r' || // CR (#xD)
119 | b[i] == '\n' || // LF (#xA)
120 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
121 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
122 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
123 | // is_z:
124 | b[i] == 0)
125 | }
126 |
127 | // Check if the character is a line break, space, or NUL.
128 | func is_spacez(b []byte, i int) bool {
129 | //return is_space(b, i) || is_breakz(b, i)
130 | return ( // is_space:
131 | b[i] == ' ' ||
132 | // is_breakz:
133 | b[i] == '\r' || // CR (#xD)
134 | b[i] == '\n' || // LF (#xA)
135 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
136 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
137 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
138 | b[i] == 0)
139 | }
140 |
141 | // Check if the character is a line break, space, tab, or NUL.
142 | func is_blankz(b []byte, i int) bool {
143 | //return is_blank(b, i) || is_breakz(b, i)
144 | return ( // is_blank:
145 | b[i] == ' ' || b[i] == '\t' ||
146 | // is_breakz:
147 | b[i] == '\r' || // CR (#xD)
148 | b[i] == '\n' || // LF (#xA)
149 | b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
150 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
151 | b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
152 | b[i] == 0)
153 | }
154 |
155 | // Determine the width of the character.
156 | func width(b byte) int {
157 | // Don't replace these by a switch without first
158 | // confirming that it is being inlined.
159 | if b&0x80 == 0x00 {
160 | return 1
161 | }
162 | if b&0xE0 == 0xC0 {
163 | return 2
164 | }
165 | if b&0xF0 == 0xE0 {
166 | return 3
167 | }
168 | if b&0xF8 == 0xF0 {
169 | return 4
170 | }
171 | return 0
172 |
173 | }
174 |
--------------------------------------------------------------------------------
/vendor/modules.txt:
--------------------------------------------------------------------------------
1 | # github.com/natefinch/pie v0.0.0-20170715172608-9a0d72014007
2 | github.com/natefinch/pie
3 | # github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
4 | github.com/nfnt/resize
5 | # github.com/rivo/duplo v0.0.0-20180323201418-c4ec823d58cd
6 | github.com/rivo/duplo
7 | github.com/rivo/duplo/haar
8 | # github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f
9 | github.com/shurcooL/graphql
10 | github.com/shurcooL/graphql/ident
11 | github.com/shurcooL/graphql/internal/jsonutil
12 | # golang.org/x/net v0.0.0-20200707034311-ab3426394381
13 | golang.org/x/net/context/ctxhttp
14 | # gopkg.in/yaml.v2 v2.3.0
15 | gopkg.in/yaml.v2
16 |
--------------------------------------------------------------------------------