├── .gitignore ├── .travis.yml ├── Gopkg.lock ├── Gopkg.toml ├── LICENSE.txt ├── Makefile ├── README.md ├── SECURITY.md ├── client.go ├── client_test.go ├── cmd └── hdfs │ ├── bash_completion │ ├── cat.go │ ├── checksum.go │ ├── chmod.go │ ├── chown.go │ ├── complete.go │ ├── df.go │ ├── du.go │ ├── get.go │ ├── ls.go │ ├── main.go │ ├── mkdir.go │ ├── mv.go │ ├── paths.go │ ├── put.go │ ├── rm.go │ ├── test │ ├── cat.bats │ ├── checksum.bats │ ├── df.bats │ ├── du.bats │ ├── glob.bats │ ├── head.bats │ ├── helper.bash │ ├── ls.bats │ ├── mkdir.bats │ ├── mv.bats │ ├── put.bats │ ├── rm.bats │ ├── tail.bats │ └── touch.bats │ ├── touch.go │ └── util.go ├── conf.go ├── conf_test.go ├── content_summary.go ├── content_summary_test.go ├── exceptions.go ├── file_reader.go ├── file_reader_test.go ├── file_writer.go ├── file_writer_test.go ├── hdfs.go ├── mkdir.go ├── mkdir_test.go ├── perms.go ├── perms_test.go ├── protocol ├── hadoop_common │ ├── GenericRefreshProtocol.pb.go │ ├── GenericRefreshProtocol.proto │ ├── GetUserMappingsProtocol.pb.go │ ├── GetUserMappingsProtocol.proto │ ├── HAServiceProtocol.pb.go │ ├── HAServiceProtocol.proto │ ├── IpcConnectionContext.pb.go │ ├── IpcConnectionContext.proto │ ├── ProtobufRpcEngine.pb.go │ ├── ProtobufRpcEngine.proto │ ├── ProtocolInfo.pb.go │ ├── ProtocolInfo.proto │ ├── RefreshAuthorizationPolicyProtocol.pb.go │ ├── RefreshAuthorizationPolicyProtocol.proto │ ├── RefreshCallQueueProtocol.pb.go │ ├── RefreshCallQueueProtocol.proto │ ├── RefreshUserMappingsProtocol.pb.go │ ├── RefreshUserMappingsProtocol.proto │ ├── RpcHeader.pb.go │ ├── RpcHeader.proto │ ├── Security.pb.go │ ├── Security.proto │ ├── TraceAdmin.pb.go │ ├── TraceAdmin.proto │ ├── ZKFCProtocol.pb.go │ └── ZKFCProtocol.proto └── hadoop_hdfs │ ├── ClientDatanodeProtocol.pb.go │ ├── ClientDatanodeProtocol.proto │ ├── ClientNamenodeProtocol.pb.go │ ├── ClientNamenodeProtocol.proto │ ├── ReconfigurationProtocol.pb.go │ ├── ReconfigurationProtocol.proto │ ├── acl.pb.go │ ├── acl.proto │ ├── datatransfer.pb.go │ ├── datatransfer.proto │ ├── encryption.pb.go │ ├── encryption.proto │ ├── erasurecoding.pb.go │ ├── erasurecoding.proto │ ├── hdfs.pb.go │ ├── hdfs.proto │ ├── inotify.pb.go │ ├── inotify.proto │ ├── xattr.pb.go │ └── xattr.proto ├── readdir.go ├── readdir_test.go ├── remove.go ├── remove_test.go ├── rename.go ├── rename_test.go ├── rpc ├── block_read_stream.go ├── block_reader.go ├── block_reader_test.go ├── block_write_stream.go ├── block_writer.go ├── block_writer_test.go ├── checksum_reader.go ├── checksum_reader_test.go ├── datanode_failover.go ├── datanode_failover_test.go ├── namenode.go └── rpc.go ├── setup_test_env.sh ├── stat.go ├── stat_fs.go ├── stat_fs_test.go ├── stat_test.go ├── test ├── conf │ └── hdfs-site.xml ├── conf2 │ └── core-site.xml ├── conf3 │ └── core-site.xml ├── foo.txt └── mobydick.txt └── vendor └── github.com ├── davecgh └── go-spew │ ├── LICENSE │ └── spew │ ├── bypass.go │ ├── bypasssafe.go │ ├── common.go │ ├── config.go │ ├── doc.go │ ├── dump.go │ ├── format.go │ └── spew.go ├── golang └── protobuf │ ├── AUTHORS │ ├── CONTRIBUTORS │ ├── LICENSE │ └── proto │ ├── Makefile │ ├── clone.go │ ├── decode.go │ ├── discard.go │ ├── encode.go │ ├── equal.go │ ├── extensions.go │ ├── lib.go │ ├── message_set.go │ ├── pointer_reflect.go │ ├── pointer_unsafe.go │ ├── properties.go │ ├── text.go │ └── text_parser.go ├── pborman └── getopt │ ├── AUTHORS │ ├── CONTRIBUTING.md │ ├── LICENSE │ ├── README.md │ ├── bool.go │ ├── counter.go │ ├── duration.go │ ├── enum.go │ ├── error.go │ ├── getopt.go │ ├── int.go │ ├── int16.go │ ├── int32.go │ ├── int64.go │ ├── list.go │ ├── option.go │ ├── set.go │ ├── signed.go │ ├── string.go │ ├── uint.go │ ├── uint16.go │ ├── uint32.go │ ├── uint64.go │ ├── unsigned.go │ └── var.go ├── pmezard └── go-difflib │ ├── LICENSE │ └── difflib │ └── difflib.go └── stretchr └── testify ├── LICENSE ├── assert ├── assertion_format.go ├── assertion_format.go.tmpl ├── assertion_forward.go ├── assertion_forward.go.tmpl ├── assertions.go ├── doc.go ├── errors.go ├── forward_assertions.go └── http_assertions.go └── require ├── doc.go ├── forward_requirements.go ├── require.go ├── require.go.tmpl ├── require_forward.go ├── require_forward.go.tmpl └── requirements.go /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | hdfs 3 | !hdfs/ 4 | minicluster.log 5 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | os: 2 | - linux 3 | - osx 4 | language: go 5 | go_import_path: github.com/colinmarc/hdfs 6 | go: 1.x 7 | before_install: 8 | - git clone https://github.com/sstephenson/bats $HOME/bats 9 | - mkdir -p $HOME/bats/build 10 | - "$HOME/bats/install.sh $HOME/bats/build" 11 | - export PATH="$PATH:$HOME/bats/build/bin" 12 | env: 13 | - HADOOP_DISTRO=cdh 14 | - HADOOP_DISTRO=hdp 15 | before_script: 16 | - export NN_PORT=9000 17 | - export HADOOP_NAMENODE="localhost:$NN_PORT" 18 | - export HADOOP_HOME="$HOME/hadoop-$HADOOP_DISTRO" 19 | - "./setup_test_env.sh" 20 | before_deploy: make release 21 | script: 22 | - find protocol -name *.pb.go | xargs touch # so make doesn't try to regen protobuf files 23 | - make test 24 | - cat minicluster.log 25 | sudo: false 26 | cache: 27 | - "$HOME/hadoop-$HADOOP_DISTRO" 28 | - "$HOME/bats" 29 | deploy: 30 | skip_cleanup: true 31 | provider: releases 32 | api_key: 33 | secure: HgyYfxoZfsZhDNeeL4Myi85aeyei80hQL29VhQKqkFrcoKL4V4+fJo7uG5XfKLCU0nQrRA98EtQO6w8AD+ULn/Ez8DA/RHey3Ny5GzX2ZaQ35KiuM71jPcvggxh8e2EJ14txxm7TAnqCxP7p5sJggiU0xj2w3vDUUJp5Q+vP3WE= 34 | file: gohdfs-*.tar.gz 35 | file_glob: true 36 | on: 37 | repo: colinmarc/hdfs 38 | tags: true 39 | all_branches: true 40 | condition: $HADOOP_DISTRO = cdh 41 | matrix: 42 | allow_failures: 43 | - os: osx 44 | -------------------------------------------------------------------------------- /Gopkg.lock: -------------------------------------------------------------------------------- 1 | # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. 2 | 3 | 4 | [[projects]] 5 | name = "github.com/davecgh/go-spew" 6 | packages = ["spew"] 7 | revision = "346938d642f2ec3594ed81d874461961cd0faa76" 8 | version = "v1.1.0" 9 | 10 | [[projects]] 11 | branch = "master" 12 | name = "github.com/golang/protobuf" 13 | packages = ["proto"] 14 | revision = "925541529c1fa6821df4e44ce2723319eb2be768" 15 | 16 | [[projects]] 17 | branch = "master" 18 | name = "github.com/pborman/getopt" 19 | packages = ["."] 20 | revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" 21 | 22 | [[projects]] 23 | name = "github.com/pmezard/go-difflib" 24 | packages = ["difflib"] 25 | revision = "792786c7400a136282c1664665ae0a8db921c6c2" 26 | version = "v1.0.0" 27 | 28 | [[projects]] 29 | name = "github.com/stretchr/testify" 30 | packages = [ 31 | "assert", 32 | "require" 33 | ] 34 | revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" 35 | version = "v1.2.0" 36 | 37 | [solve-meta] 38 | analyzer-name = "dep" 39 | analyzer-version = 1 40 | inputs-digest = "6a827061ac3d7c045d36ae7765d9c25d37753981315d02cdb6484bebb48375e7" 41 | solver-name = "gps-cdcl" 42 | solver-version = 1 43 | -------------------------------------------------------------------------------- /Gopkg.toml: -------------------------------------------------------------------------------- 1 | # Gopkg.toml example 2 | # 3 | # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md 4 | # for detailed Gopkg.toml documentation. 5 | # 6 | # required = ["github.com/user/thing/cmd/thing"] 7 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] 8 | # 9 | # [[constraint]] 10 | # name = "github.com/user/project" 11 | # version = "1.0.0" 12 | # 13 | # [[constraint]] 14 | # name = "github.com/user/project2" 15 | # branch = "dev" 16 | # source = "github.com/myfork/project2" 17 | # 18 | # [[override]] 19 | # name = "github.com/x/y" 20 | # version = "2.4.0" 21 | # 22 | # [prune] 23 | # non-go = false 24 | # go-tests = true 25 | # unused-packages = true 26 | 27 | 28 | [[constraint]] 29 | branch = "master" 30 | name = "github.com/pborman/getopt" 31 | 32 | [prune] 33 | go-tests = true 34 | unused-packages = true 35 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014 Colin Marc (colinmarc@gmail.com) 2 | 3 | MIT License 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | HADOOP_COMMON_PROTOS = $(shell find protocol/hadoop_common -name '*.proto') 2 | HADOOP_HDFS_PROTOS = $(shell find protocol/hadoop_hdfs -name '*.proto') 3 | GENERATED_PROTOS = $(shell echo "$(HADOOP_HDFS_PROTOS) $(HADOOP_COMMON_PROTOS)" | sed 's/\.proto/\.pb\.go/g') 4 | SOURCES = $(shell find . -name '*.go') $(GENERATED_PROTOS) 5 | 6 | # Protobuf needs one of these for every 'import "foo.proto"' in .protoc files. 7 | PROTO_MAPPING = MSecurity.proto=github.com/colinmarc/hdfs/protocol/hadoop_common 8 | 9 | TRAVIS_TAG ?= $(shell git rev-parse HEAD) 10 | ARCH = $(shell go env GOOS)-$(shell go env GOARCH) 11 | RELEASE_NAME = gohdfs-$(TRAVIS_TAG)-$(ARCH) 12 | 13 | all: hdfs 14 | 15 | %.pb.go: $(HADOOP_HDFS_PROTOS) $(HADOOP_COMMON_PROTOS) 16 | protoc --go_out='$(PROTO_MAPPING):protocol/hadoop_common' -Iprotocol/hadoop_common -Iprotocol/hadoop_hdfs $(HADOOP_COMMON_PROTOS) 17 | protoc --go_out='$(PROTO_MAPPING):protocol/hadoop_hdfs' -Iprotocol/hadoop_common -Iprotocol/hadoop_hdfs $(HADOOP_HDFS_PROTOS) 18 | 19 | clean-protos: 20 | find . -name *.pb.go | xargs rm 21 | 22 | hdfs: clean $(SOURCES) 23 | go build -ldflags "-X main.version=$(TRAVIS_TAG)" ./cmd/hdfs 24 | 25 | install: get-deps 26 | go install ./... 27 | 28 | test: hdfs 29 | go test -v -race $(shell go list ./... | grep -v vendor) 30 | bats ./cmd/hdfs/test/*.bats 31 | 32 | clean: 33 | rm -f ./hdfs 34 | rm -rf gohdfs-* 35 | 36 | release: hdfs 37 | mkdir -p $(RELEASE_NAME) 38 | cp hdfs README.md LICENSE.txt cmd/hdfs/bash_completion $(RELEASE_NAME)/ 39 | tar -cvzf $(RELEASE_NAME).tar.gz $(RELEASE_NAME) 40 | 41 | .PHONY: clean clean-protos install test release 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | HDFS for Go 2 | =========== 3 | 4 | [![GoDoc](https://godoc.org/github.com/colinmarc/hdfs/web?status.svg)](https://godoc.org/github.com/colinmarc/hdfs) [![build](https://travis-ci.org/colinmarc/hdfs.svg?branch=master)](https://travis-ci.org/colinmarc/hdfs) 5 | 6 | This is a native golang client for hdfs. It connects directly to the namenode using 7 | the protocol buffers API. 8 | 9 | It tries to be idiomatic by aping the stdlib `os` package, where possible, and 10 | implements the interfaces from it, including `os.FileInfo` and `os.PathError`. 11 | 12 | Here's what it looks like in action: 13 | 14 | ```go 15 | client, _ := hdfs.New("namenode:8020") 16 | 17 | file, _ := client.Open("/mobydick.txt") 18 | 19 | buf := make([]byte, 59) 20 | file.ReadAt(buf, 48847) 21 | 22 | fmt.Println(string(buf)) 23 | // => Abominable are the tumblers into which he pours his poison. 24 | ``` 25 | 26 | For complete documentation, check out the [Godoc][1]. 27 | 28 | The `hdfs` Binary 29 | ----------------- 30 | 31 | Along with the library, this repo contains a commandline client for HDFS. Like 32 | the library, its primary aim is to be idiomatic, by enabling your favorite unix 33 | verbs: 34 | 35 | 36 | $ hdfs --help 37 | Usage: hdfs COMMAND 38 | The flags available are a subset of the POSIX ones, but should behave similarly. 39 | 40 | Valid commands: 41 | ls [-lah] [FILE]... 42 | rm [-rf] FILE... 43 | mv [-fT] SOURCE... DEST 44 | mkdir [-p] FILE... 45 | touch [-amc] FILE... 46 | chmod [-R] OCTAL-MODE FILE... 47 | chown [-R] OWNER[:GROUP] FILE... 48 | cat SOURCE... 49 | head [-n LINES | -c BYTES] SOURCE... 50 | tail [-n LINES | -c BYTES] SOURCE... 51 | du [-sh] FILE... 52 | checksum FILE... 53 | get SOURCE [DEST] 54 | getmerge SOURCE DEST 55 | put SOURCE DEST 56 | 57 | Since it doesn't have to wait for the JVM to start up, it's also a lot faster 58 | `hadoop -fs`: 59 | 60 | $ time hadoop fs -ls / > /dev/null 61 | 62 | real 0m2.218s 63 | user 0m2.500s 64 | sys 0m0.376s 65 | 66 | $ time hdfs ls / > /dev/null 67 | 68 | real 0m0.015s 69 | user 0m0.004s 70 | sys 0m0.004s 71 | 72 | Best of all, it comes with bash tab completion for paths! 73 | 74 | Installing the library 75 | ---------------------- 76 | 77 | To install the library, once you have Go [all set up][2]: 78 | 79 | $ go get -u github.com/colinmarc/hdfs 80 | 81 | Installing the commandline client 82 | --------------------------------- 83 | 84 | Grab a tarball from the [releases page](https://github.com/colinmarc/hdfs/releases) 85 | and unzip it wherever you like. 86 | 87 | You'll want to add the following line to your `.bashrc` or `.profile`: 88 | 89 | export HADOOP_NAMENODE="namenode:8020" 90 | 91 | To install tab completion globally on linux, copy or link the `bash_completion` 92 | file which comes with the tarball into the right place: 93 | 94 | ln -sT bash_completion /etc/bash_completion.d/gohdfs 95 | 96 | By default, the HDFS user is set to the currently-logged-in user. You can 97 | override this in your `.bashrc` or `.profile`: 98 | 99 | export HADOOP_USER_NAME=username 100 | 101 | Compatibility 102 | ------------- 103 | 104 | This library uses "Version 9" of the HDFS protocol, which means it should work 105 | with hadoop distributions based on 2.2.x and above. The tests run against CDH 106 | 5.x and HDP 2.x. 107 | 108 | Acknowledgements 109 | ---------------- 110 | 111 | This library is heavily indebted to [snakebite][3]. 112 | 113 | [1]: https://godoc.org/github.com/colinmarc/hdfs 114 | [2]: https://golang.org/doc/install 115 | [3]: https://github.com/spotify/snakebite 116 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /client_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path/filepath" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | var cachedClients = make(map[string]*Client) 14 | 15 | func getClient(t *testing.T) *Client { 16 | username, err := Username() 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | 21 | return getClientForUser(t, username) 22 | } 23 | 24 | func getClientForUser(t *testing.T, user string) *Client { 25 | if c, ok := cachedClients[user]; ok { 26 | return c 27 | } 28 | 29 | nn := os.Getenv("HADOOP_NAMENODE") 30 | if nn == "" { 31 | t.Fatal("HADOOP_NAMENODE not set") 32 | } 33 | 34 | client, err := NewForUser(nn, user) 35 | if err != nil { 36 | t.Fatal(err) 37 | } 38 | 39 | cachedClients[user] = client 40 | return client 41 | } 42 | 43 | func touch(t *testing.T, path string) { 44 | c := getClient(t) 45 | 46 | err := c.CreateEmptyFile(path) 47 | if err != nil && !os.IsExist(err) { 48 | t.Fatal(err) 49 | } 50 | } 51 | 52 | func mkdirp(t *testing.T, path string) { 53 | c := getClient(t) 54 | 55 | err := c.MkdirAll(path, 0644) 56 | if err != nil && !os.IsExist(err) { 57 | t.Fatal(err) 58 | } 59 | } 60 | 61 | func baleet(t *testing.T, path string) { 62 | c := getClient(t) 63 | 64 | err := c.Remove(path) 65 | if err != nil && !os.IsNotExist(err) { 66 | t.Fatal(err) 67 | } 68 | } 69 | 70 | func assertPathError(t *testing.T, err error, op, path string, wrappedErr error) { 71 | require.NotNil(t, err) 72 | 73 | expected := &os.PathError{op, path, wrappedErr} 74 | require.Equal(t, expected.Error(), err.Error()) 75 | require.Equal(t, expected, err) 76 | } 77 | 78 | func TestNewWithMultipleNodes(t *testing.T) { 79 | nn := os.Getenv("HADOOP_NAMENODE") 80 | if nn == "" { 81 | t.Fatal("HADOOP_NAMENODE not set") 82 | } 83 | _, err := NewClient(ClientOptions{ 84 | Addresses: []string{"localhost:80", nn}, 85 | }) 86 | assert.Nil(t, err) 87 | } 88 | 89 | func TestNewWithFailingNode(t *testing.T) { 90 | _, err := New("localhost:80") 91 | assert.NotNil(t, err) 92 | } 93 | 94 | func TestReadFile(t *testing.T) { 95 | client := getClient(t) 96 | 97 | bytes, err := client.ReadFile("/_test/foo.txt") 98 | assert.NoError(t, err) 99 | assert.EqualValues(t, "bar\n", string(bytes)) 100 | } 101 | 102 | func TestCopyToLocal(t *testing.T) { 103 | client := getClient(t) 104 | 105 | dir, _ := ioutil.TempDir("", "hdfs-test") 106 | tmpfile := filepath.Join(dir, "foo.txt") 107 | err := client.CopyToLocal("/_test/foo.txt", tmpfile) 108 | require.NoError(t, err) 109 | 110 | f, err := os.Open(tmpfile) 111 | require.NoError(t, err) 112 | 113 | bytes, _ := ioutil.ReadAll(f) 114 | assert.EqualValues(t, "bar\n", string(bytes)) 115 | } 116 | 117 | func TestCopyToRemote(t *testing.T) { 118 | client := getClient(t) 119 | 120 | baleet(t, "/_test/copytoremote.txt") 121 | err := client.CopyToRemote("test/foo.txt", "/_test/copytoremote.txt") 122 | require.NoError(t, err) 123 | 124 | bytes, err := client.ReadFile("/_test/copytoremote.txt") 125 | require.NoError(t, err) 126 | 127 | assert.EqualValues(t, "bar\n", string(bytes)) 128 | } 129 | -------------------------------------------------------------------------------- /cmd/hdfs/bash_completion: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | _hdfs_complete() 4 | { 5 | local cur prev opts 6 | COMPREPLY=() 7 | cur="${COMP_WORDS[COMP_CWORD]}" 8 | words=$(IFS=$' '; echo "${COMP_WORDS[*]}") 9 | opts=$(${COMP_WORDS[0]} complete "$words") 10 | COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) 11 | 12 | if [[ $COMPREPLY != "" && $COMPREPLY != */ ]] 13 | then 14 | COMPREPLY="$COMPREPLY " 15 | fi 16 | 17 | return 0 18 | } 19 | 20 | complete -o nospace -F _hdfs_complete hdfs 21 | -------------------------------------------------------------------------------- /cmd/hdfs/cat.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "os" 9 | 10 | "github.com/colinmarc/hdfs" 11 | ) 12 | 13 | const tailSearchSize int64 = 16384 14 | 15 | func cat(paths []string) { 16 | expanded, client, err := getClientAndExpandedPaths(paths) 17 | if err != nil { 18 | fatal(err) 19 | } 20 | 21 | readers := make([]io.Reader, 0, len(expanded)) 22 | for _, p := range expanded { 23 | file, err := client.Open(p) 24 | if err != nil { 25 | fatal(err) 26 | } else if file.Stat().IsDir() { 27 | fatal(&os.PathError{"cat", p, errors.New("file is a directory")}) 28 | } 29 | 30 | readers = append(readers, file) 31 | } 32 | 33 | _, err = io.Copy(os.Stdout, io.MultiReader(readers...)) 34 | if err != nil { 35 | fatal(err) 36 | } 37 | } 38 | 39 | func printSection(paths []string, numLines, numBytes int64, fromEnd bool) { 40 | if numLines != -1 && numBytes != -1 { 41 | fatal("You can't specify both -n and -c.") 42 | } else if numLines == -1 && numBytes == -1 { 43 | numLines = 10 44 | } 45 | 46 | expanded, client, err := getClientAndExpandedPaths(paths) 47 | if err != nil { 48 | fatal(err) 49 | } 50 | 51 | for _, p := range expanded { 52 | file, err := client.Open(p) 53 | if err != nil || file.Stat().IsDir() { 54 | if err == nil && file.Stat().IsDir() { 55 | err = &os.PathError{"open", p, errors.New("file is a directory")} 56 | } 57 | 58 | fmt.Fprintln(os.Stderr, err) 59 | status = 1 60 | continue 61 | } 62 | 63 | if len(expanded) > 1 { 64 | fmt.Fprintf(os.Stderr, "%s:\n", file.Name()) 65 | } 66 | 67 | if numLines != -1 { 68 | if fromEnd { 69 | tailLines(file, numLines) 70 | } else { 71 | headLines(file, numLines) 72 | } 73 | } else { 74 | var offset int64 75 | if fromEnd { 76 | offset = file.Stat().Size() - numBytes 77 | } 78 | 79 | reader := io.NewSectionReader(file, offset, numBytes) 80 | io.Copy(os.Stdout, reader) 81 | } 82 | } 83 | } 84 | 85 | func headLines(file *hdfs.FileReader, numLines int64) { 86 | reader := bufio.NewReader(file) 87 | 88 | var newlines, offset int64 89 | for newlines < numLines { 90 | b, err := reader.ReadByte() 91 | if err == io.EOF { 92 | offset = -1 93 | break 94 | } else if err != nil { 95 | fatal(err) 96 | } 97 | 98 | if b == '\n' { 99 | newlines++ 100 | } 101 | 102 | offset++ 103 | } 104 | 105 | _, err := file.Seek(0, 0) 106 | if err != nil { 107 | fatal(err) 108 | } 109 | 110 | if offset < 0 { 111 | io.Copy(os.Stdout, file) 112 | } else { 113 | io.CopyN(os.Stdout, file, offset) 114 | } 115 | } 116 | 117 | func tailLines(file *hdfs.FileReader, numLines int64) { 118 | fileSize := file.Stat().Size() 119 | searchPoint := file.Stat().Size() - tailSearchSize 120 | if searchPoint < 0 { 121 | searchPoint = 0 122 | } 123 | readSize := tailSearchSize 124 | if readSize > fileSize { 125 | readSize = fileSize 126 | } 127 | 128 | var printOffset int64 129 | for searchPoint >= 0 { 130 | section := bufio.NewReader(io.NewSectionReader(file, searchPoint, readSize)) 131 | off := searchPoint 132 | newlines := make([]int64, 0, tailSearchSize/64) 133 | 134 | b, err := section.ReadByte() 135 | for err == nil { 136 | if b == '\n' && (off+1 != fileSize) { 137 | newlines = append(newlines, off) 138 | } 139 | 140 | off++ 141 | b, err = section.ReadByte() 142 | } 143 | 144 | if err != nil && err != io.EOF { 145 | fatal(err) 146 | } 147 | 148 | foundNewlines := int64(len(newlines)) 149 | if foundNewlines >= numLines { 150 | printOffset = newlines[foundNewlines-numLines] + 1 151 | break 152 | } 153 | 154 | numLines -= foundNewlines 155 | searchPoint -= tailSearchSize 156 | } 157 | 158 | _, err := file.Seek(printOffset, 0) 159 | if err != nil { 160 | fatal(err) 161 | } 162 | 163 | io.Copy(os.Stdout, file) 164 | } 165 | -------------------------------------------------------------------------------- /cmd/hdfs/checksum.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | ) 7 | 8 | func checksum(paths []string) { 9 | expanded, client, err := getClientAndExpandedPaths(paths) 10 | if err != nil { 11 | fatal(err) 12 | } 13 | 14 | for _, p := range expanded { 15 | reader, err := client.Open(p) 16 | if err != nil { 17 | fatal(err) 18 | } 19 | 20 | checksum, err := reader.Checksum() 21 | if err != nil { 22 | fatal(err) 23 | } 24 | 25 | fmt.Println(hex.EncodeToString(checksum), p) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /cmd/hdfs/chmod.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strconv" 7 | ) 8 | 9 | func chmod(args []string, recursive bool) { 10 | if len(args) < 2 { 11 | printHelp() 12 | } 13 | 14 | mode, err := strconv.ParseUint(args[0], 8, 32) 15 | if err != nil { 16 | fatal("invalid octal mode:", args[0]) 17 | } 18 | 19 | expanded, client, err := getClientAndExpandedPaths(args[1:]) 20 | if err != nil { 21 | fatal(err) 22 | } 23 | 24 | visit := func(p string, fi os.FileInfo) { 25 | err := client.Chmod(p, os.FileMode(mode)) 26 | 27 | if err != nil { 28 | fmt.Fprintln(os.Stderr, err) 29 | status = 1 30 | } 31 | } 32 | 33 | for _, p := range expanded { 34 | if recursive { 35 | err = walk(client, p, visit) 36 | if err != nil { 37 | fmt.Fprintln(os.Stderr, err) 38 | status = 1 39 | } 40 | } else { 41 | info, err := client.Stat(p) 42 | if err != nil { 43 | fatal(err) 44 | } 45 | 46 | visit(p, info) 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /cmd/hdfs/chown.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | ) 8 | 9 | func chown(args []string, recursive bool) { 10 | if len(args) < 2 { 11 | printHelp() 12 | } 13 | 14 | parts := strings.SplitN(args[0], ":", 2) 15 | owner := "" 16 | group := "" 17 | 18 | if len(parts) == 0 { 19 | fatal("invalid owner string:", args[0]) 20 | } else if len(parts) == 1 { 21 | owner = parts[0] 22 | group = owner 23 | } else { 24 | owner = parts[0] 25 | group = parts[1] 26 | } 27 | 28 | expanded, client, err := getClientAndExpandedPaths(args[1:]) 29 | if err != nil { 30 | fatal(err) 31 | } 32 | 33 | visit := func(p string, fi os.FileInfo) { 34 | err := client.Chown(p, owner, group) 35 | 36 | if err != nil { 37 | fmt.Fprintln(os.Stderr, err) 38 | status = 1 39 | } 40 | } 41 | 42 | for _, p := range expanded { 43 | if recursive { 44 | err = walk(client, p, visit) 45 | if err != nil { 46 | fmt.Fprintln(os.Stderr, err) 47 | status = 1 48 | } 49 | } else { 50 | info, err := client.Stat(p) 51 | if err != nil { 52 | fatal(err) 53 | } 54 | 55 | visit(p, info) 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /cmd/hdfs/complete.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "path" 8 | "strings" 9 | ) 10 | 11 | var knownCommands = []string{ 12 | "ls", 13 | "rm", 14 | "mv", 15 | "mkdir", 16 | "touch", 17 | "chmod", 18 | "chown", 19 | "cat", 20 | "head", 21 | "tail", 22 | "du", 23 | "checksum", 24 | "get", 25 | "getmerge", 26 | } 27 | 28 | func complete(args []string) { 29 | if len(args) == 2 { 30 | words := strings.Split(args[1], " ")[1:] 31 | 32 | if len(words) <= 1 { 33 | fmt.Println(strings.Join(knownCommands, " ")) 34 | } else { 35 | completePath(words[len(words)-1]) 36 | } 37 | } else { 38 | fmt.Println(strings.Join(knownCommands, " ")) 39 | } 40 | } 41 | 42 | func completePath(fragment string) { 43 | paths, namenode, err := normalizePaths([]string{fragment}) 44 | if err != nil { 45 | return 46 | } 47 | 48 | fullPath := paths[0] 49 | if hasGlob(fullPath) { 50 | return 51 | } 52 | 53 | client, err := getClient(namenode) 54 | if err != nil { 55 | return 56 | } 57 | 58 | var dir, prefix string 59 | if strings.HasSuffix(fragment, "/") { 60 | dir = fullPath 61 | prefix = "" 62 | } else { 63 | dir, prefix = path.Split(fullPath) 64 | } 65 | 66 | dirReader, err := client.Open(dir) 67 | if err != nil { 68 | return 69 | } 70 | 71 | var partial []os.FileInfo 72 | for ; err != io.EOF; partial, err = dirReader.Readdir(100) { 73 | if err != nil { 74 | return 75 | } 76 | 77 | for _, fi := range partial { 78 | name := fi.Name() 79 | 80 | if strings.HasPrefix(name, prefix) { 81 | p := path.Join(dir, name) 82 | if fi.IsDir() { 83 | p += "/" 84 | } 85 | 86 | fmt.Print(" " + p) 87 | } 88 | } 89 | } 90 | 91 | fmt.Println() 92 | } 93 | -------------------------------------------------------------------------------- /cmd/hdfs/df.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "text/tabwriter" 7 | 8 | "github.com/colinmarc/hdfs" 9 | ) 10 | 11 | func df(humanReadable bool) { 12 | client, err := getClient("") 13 | if err != nil { 14 | fatal(err) 15 | } 16 | 17 | var fs hdfs.FsInfo 18 | 19 | fs, err = client.StatFs() 20 | if err != nil { 21 | fatal(err) 22 | } 23 | 24 | tw := tabwriter.NewWriter(os.Stdout, 3, 8, 0, ' ', tabwriter.AlignRight) 25 | fmt.Fprintf(tw, "Filesystem \tSize \tUsed \tAvailable \t Use%%\n") 26 | if humanReadable { 27 | fmt.Fprintf(tw, "%v \t%v \t%v \t%v \t%d%%\n", 28 | os.Getenv("HADOOP_NAMENODE"), 29 | formatBytes(fs.Capacity), 30 | formatBytes(fs.Used), 31 | formatBytes(fs.Remaining), 32 | 100 * fs.Used / fs.Capacity) 33 | } else { 34 | fmt.Fprintf(tw, "%v \t%v \t %v \t %v \t%d%%\n", 35 | os.Getenv("HADOOP_NAMENODE"), 36 | fs.Capacity, 37 | fs.Used, 38 | fs.Remaining, 39 | 100 * fs.Used / fs.Capacity) 40 | } 41 | tw.Flush() 42 | } 43 | -------------------------------------------------------------------------------- /cmd/hdfs/du.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "path" 8 | "text/tabwriter" 9 | 10 | "github.com/colinmarc/hdfs" 11 | ) 12 | 13 | func du(args []string, summarize, humanReadable bool) { 14 | if len(args) == 0 { 15 | printHelp() 16 | } 17 | 18 | expanded, client, err := getClientAndExpandedPaths(args) 19 | if err != nil { 20 | fatal(err) 21 | } 22 | 23 | tw := tabwriter.NewWriter(os.Stdout, 8, 8, 0, ' ', 0) 24 | defer tw.Flush() 25 | 26 | for _, p := range expanded { 27 | info, err := client.Stat(p) 28 | if err != nil { 29 | fmt.Fprintln(os.Stderr, err) 30 | status = 1 31 | continue 32 | } 33 | 34 | var size int64 35 | if info.IsDir() { 36 | if summarize { 37 | cs, err := client.GetContentSummary(p) 38 | if err != nil { 39 | fmt.Fprintln(os.Stderr, err) 40 | status = 1 41 | continue 42 | } 43 | 44 | size = cs.Size() 45 | } else { 46 | size = duDir(client, tw, p, humanReadable) 47 | } 48 | } else { 49 | size = info.Size() 50 | } 51 | 52 | printSize(tw, size, p, humanReadable) 53 | } 54 | } 55 | 56 | func duDir(client *hdfs.Client, tw *tabwriter.Writer, dir string, humanReadable bool) int64 { 57 | dirReader, err := client.Open(dir) 58 | if err != nil { 59 | fmt.Fprintln(os.Stderr, err) 60 | return 0 61 | } 62 | 63 | var partial []os.FileInfo 64 | var dirSize int64 65 | for ; err != io.EOF; partial, err = dirReader.Readdir(100) { 66 | if err != nil { 67 | fmt.Fprintln(os.Stderr, err) 68 | return dirSize 69 | } 70 | 71 | for _, child := range partial { 72 | childPath := path.Join(dir, child.Name()) 73 | info, err := client.Stat(childPath) 74 | if err != nil { 75 | fmt.Fprintln(os.Stderr, err) 76 | return 0 77 | } 78 | 79 | var size int64 80 | if info.IsDir() { 81 | size = duDir(client, tw, childPath, humanReadable) 82 | } else { 83 | size = info.Size() 84 | } 85 | 86 | printSize(tw, size, childPath, humanReadable) 87 | dirSize += size 88 | } 89 | } 90 | 91 | return dirSize 92 | } 93 | 94 | func printSize(tw *tabwriter.Writer, size int64, name string, humanReadable bool) { 95 | if humanReadable { 96 | formattedSize := formatBytes(uint64(size)) 97 | fmt.Fprintf(tw, "%s \t%s\n", formattedSize, name) 98 | } else { 99 | fmt.Fprintf(tw, "%d \t%s\n", size, name) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /cmd/hdfs/get.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "os" 7 | "path" 8 | "path/filepath" 9 | "strings" 10 | ) 11 | 12 | func get(args []string) { 13 | if len(args) == 0 || len(args) > 2 { 14 | printHelp() 15 | } 16 | 17 | sources, nn, err := normalizePaths(args[0:1]) 18 | if err != nil { 19 | fatal(err) 20 | } 21 | 22 | source := sources[0] 23 | var dest string 24 | if len(args) == 2 { 25 | dest = args[1] 26 | } else { 27 | cwd, err := os.Getwd() 28 | if err != nil { 29 | fatal(err) 30 | } 31 | 32 | _, name := path.Split(source) 33 | dest = filepath.Join(cwd, name) 34 | } 35 | 36 | client, err := getClient(nn) 37 | if err != nil { 38 | fatal(err) 39 | } 40 | 41 | err = walk(client, source, func(p string, fi os.FileInfo) { 42 | fullDest := filepath.Join(dest, strings.TrimPrefix(p, source)) 43 | 44 | if fi.IsDir() { 45 | err = os.Mkdir(fullDest, 0755) 46 | if err != nil { 47 | fatal(err) 48 | } 49 | } else { 50 | err = client.CopyToLocal(p, fullDest) 51 | if pathErr, ok := err.(*os.PathError); ok { 52 | fatal(pathErr) 53 | } else if err != nil { 54 | fatal(err) 55 | } 56 | } 57 | }) 58 | 59 | if err != nil { 60 | fatal(err) 61 | } 62 | } 63 | 64 | func getmerge(args []string, addNewlines bool) { 65 | if len(args) != 2 { 66 | printHelp() 67 | } 68 | 69 | dest := args[1] 70 | sources, nn, err := normalizePaths(args[0:1]) 71 | if err != nil { 72 | fatal(err) 73 | } 74 | 75 | client, err := getClient(nn) 76 | if err != nil { 77 | fatal(err) 78 | } 79 | 80 | local, err := os.Create(dest) 81 | if err != nil { 82 | fatal(err) 83 | } 84 | 85 | source := sources[0] 86 | children, err := client.ReadDir(source) 87 | if err != nil { 88 | fatal(err) 89 | } 90 | 91 | readers := make([]io.Reader, 0, len(children)) 92 | for _, child := range children { 93 | if child.IsDir() { 94 | continue 95 | } 96 | 97 | childPath := path.Join(source, child.Name()) 98 | file, err := client.Open(childPath) 99 | if err != nil { 100 | fatal(err) 101 | } 102 | 103 | readers = append(readers, file) 104 | if addNewlines { 105 | readers = append(readers, bytes.NewBufferString("\n")) 106 | } 107 | } 108 | 109 | _, err = io.Copy(local, io.MultiReader(readers...)) 110 | if err != nil { 111 | fatal(err) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /cmd/hdfs/ls.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "path" 8 | "strconv" 9 | "strings" 10 | "text/tabwriter" 11 | "time" 12 | 13 | "github.com/colinmarc/hdfs" 14 | ) 15 | 16 | func ls(paths []string, long, all, humanReadable bool) { 17 | paths, client, err := getClientAndExpandedPaths(paths) 18 | if err != nil { 19 | fatal(err) 20 | } 21 | 22 | if len(paths) == 0 { 23 | paths = []string{userDir()} 24 | } 25 | 26 | files := make([]string, 0, len(paths)) 27 | fileInfos := make([]os.FileInfo, 0, len(paths)) 28 | dirs := make([]string, 0, len(paths)) 29 | for _, p := range paths { 30 | fi, err := client.Stat(p) 31 | if err != nil { 32 | fatal(err) 33 | } 34 | 35 | if fi.IsDir() { 36 | dirs = append(dirs, p) 37 | } else { 38 | files = append(files, p) 39 | fileInfos = append(fileInfos, fi) 40 | } 41 | } 42 | 43 | if len(files) == 0 && len(dirs) == 1 { 44 | printDir(client, dirs[0], long, all, humanReadable) 45 | } else { 46 | if long { 47 | tw := lsTabWriter() 48 | for i, p := range files { 49 | printLong(tw, p, fileInfos[i], humanReadable) 50 | } 51 | 52 | tw.Flush() 53 | } else { 54 | for _, p := range files { 55 | fmt.Println(p) 56 | } 57 | } 58 | 59 | for i, dir := range dirs { 60 | if i > 0 { 61 | fmt.Println() 62 | } 63 | 64 | fmt.Printf("%s/:\n", dir) 65 | printDir(client, dir, long, all, humanReadable) 66 | } 67 | } 68 | } 69 | 70 | func printDir(client *hdfs.Client, dir string, long, all, humanReadable bool) { 71 | dirReader, err := client.Open(dir) 72 | if err != nil { 73 | fatal(err) 74 | } 75 | 76 | var tw *tabwriter.Writer 77 | if long { 78 | tw = lsTabWriter() 79 | defer tw.Flush() 80 | } 81 | 82 | if all { 83 | if long { 84 | dirInfo, err := client.Stat(dir) 85 | if err != nil { 86 | fatal(err) 87 | } 88 | 89 | parentPath := path.Join(dir, "..") 90 | parentInfo, err := client.Stat(parentPath) 91 | if err != nil { 92 | fatal(err) 93 | } 94 | 95 | printLong(tw, ".", dirInfo, humanReadable) 96 | printLong(tw, "..", parentInfo, humanReadable) 97 | } else { 98 | fmt.Println(".") 99 | fmt.Println("..") 100 | } 101 | } 102 | 103 | var partial []os.FileInfo 104 | for ; err != io.EOF; partial, err = dirReader.Readdir(100) { 105 | if err != nil { 106 | fatal(err) 107 | } 108 | 109 | printFiles(tw, partial, long, all, humanReadable) 110 | } 111 | 112 | if long { 113 | tw.Flush() 114 | } 115 | } 116 | 117 | func printFiles(tw *tabwriter.Writer, files []os.FileInfo, long, all, humanReadable bool) { 118 | for _, file := range files { 119 | if !all && strings.HasPrefix(file.Name(), ".") { 120 | continue 121 | } 122 | 123 | if long { 124 | printLong(tw, file.Name(), file, humanReadable) 125 | } else { 126 | fmt.Println(file.Name()) 127 | } 128 | } 129 | } 130 | 131 | func printLong(tw *tabwriter.Writer, name string, info os.FileInfo, humanReadable bool) { 132 | fi := info.(*hdfs.FileInfo) 133 | // mode owner group size date(\w tab) time/year name 134 | mode := fi.Mode().String() 135 | owner := fi.Owner() 136 | group := fi.OwnerGroup() 137 | size := strconv.FormatInt(fi.Size(), 10) 138 | if humanReadable { 139 | size = formatBytes(uint64(fi.Size())) 140 | } 141 | 142 | modtime := fi.ModTime() 143 | date := modtime.Format("Jan _2") 144 | var timeOrYear string 145 | if modtime.Year() == time.Now().Year() { 146 | timeOrYear = modtime.Format("15:04") 147 | } else { 148 | timeOrYear = modtime.Format("2006") 149 | } 150 | 151 | fmt.Fprintf(tw, "%s \t%s \t %s \t %s \t%s \t%s \t%s\n", 152 | mode, owner, group, size, date, timeOrYear, name) 153 | } 154 | 155 | func lsTabWriter() *tabwriter.Writer { 156 | return tabwriter.NewWriter(os.Stdout, 3, 8, 0, ' ', tabwriter.AlignRight|tabwriter.TabIndent) 157 | } 158 | -------------------------------------------------------------------------------- /cmd/hdfs/mkdir.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | ) 6 | 7 | func mkdir(paths []string, all bool) { 8 | paths, nn, err := normalizePaths(paths) 9 | if err != nil { 10 | fatal(err) 11 | } 12 | 13 | if len(paths) == 0 { 14 | printHelp() 15 | } 16 | 17 | client, err := getClient(nn) 18 | if err != nil { 19 | fatal(err) 20 | } 21 | 22 | for _, p := range paths { 23 | if hasGlob(p) { 24 | fatal(&os.PathError{"mkdir", p, os.ErrNotExist}) 25 | } 26 | 27 | var mode = 0755 | os.ModeDir 28 | if all { 29 | err = client.MkdirAll(p, mode) 30 | } else { 31 | err = client.Mkdir(p, mode) 32 | } 33 | 34 | if err != nil && !(all && os.IsExist(err)) { 35 | fatal(err) 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /cmd/hdfs/mv.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "path" 6 | 7 | "github.com/colinmarc/hdfs" 8 | ) 9 | 10 | func mv(paths []string, force, treatDestAsFile bool) { 11 | paths, nn, err := normalizePaths(paths) 12 | if err != nil { 13 | fatal(err) 14 | } 15 | 16 | if len(paths) < 2 { 17 | fatalWithUsage("Both a source and destination are required.") 18 | } else if hasGlob(paths[len(paths)-1]) { 19 | fatal("The destination must be a single path.") 20 | } 21 | 22 | client, err := getClient(nn) 23 | if err != nil { 24 | fatal(err) 25 | } 26 | 27 | dest := paths[len(paths)-1] 28 | sources, err := expandPaths(client, paths[:len(paths)-1]) 29 | if err != nil { 30 | fatal(err) 31 | } 32 | 33 | destInfo, err := client.Stat(dest) 34 | if err != nil && !os.IsNotExist(err) { 35 | fatal(err) 36 | } 37 | 38 | exists := !os.IsNotExist(err) 39 | if exists && !treatDestAsFile && destInfo.IsDir() { 40 | moveInto(client, sources, dest, force) 41 | } else { 42 | if len(sources) > 1 { 43 | fatal("Can't move multiple sources into the same place.") 44 | } 45 | 46 | moveTo(client, sources[0], dest, force) 47 | } 48 | } 49 | 50 | func moveInto(client *hdfs.Client, sources []string, dest string, force bool) { 51 | for _, source := range sources { 52 | _, name := path.Split(source) 53 | 54 | fullDest := path.Join(dest, name) 55 | moveTo(client, source, fullDest, force) 56 | } 57 | } 58 | 59 | func moveTo(client *hdfs.Client, source, dest string, force bool) { 60 | sourceInfo, err := client.Stat(source) 61 | if err != nil { 62 | if pathErr, ok := err.(*os.PathError); ok { 63 | pathErr.Op = "rename" 64 | } 65 | 66 | fatal(err) 67 | } 68 | 69 | destInfo, err := client.Stat(dest) 70 | if err == nil { 71 | if destInfo.IsDir() && !sourceInfo.IsDir() { 72 | fatal("Can't replace directory with non-directory.") 73 | } else if !force { 74 | fatal(&os.PathError{"rename", dest, os.ErrExist}) 75 | } 76 | } else if !os.IsNotExist(err) { 77 | fatal(err) 78 | } 79 | 80 | err = client.Rename(source, dest) 81 | if err != nil { 82 | fatal(err) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /cmd/hdfs/put.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "path" 8 | "path/filepath" 9 | 10 | "github.com/colinmarc/hdfs" 11 | ) 12 | 13 | func put(args []string) { 14 | if len(args) != 2 { 15 | printHelp() 16 | } 17 | 18 | dests, nn, err := normalizePaths(args[1:]) 19 | if err != nil { 20 | fatal(err) 21 | } 22 | 23 | dest := dests[0] 24 | source, err := filepath.Abs(args[0]) 25 | if err != nil { 26 | fatal(err) 27 | } 28 | 29 | client, err := getClient(nn) 30 | if err != nil { 31 | fatal(err) 32 | } 33 | 34 | if filepath.Base(source) == "-" { 35 | putFromStdin(client, dest) 36 | } else { 37 | putFromFile(client, source, dest) 38 | } 39 | } 40 | 41 | func putFromStdin(client *hdfs.Client, dest string) { 42 | // If the destination exists, regardless of what it is, bail out. 43 | _, err := client.Stat(dest) 44 | if err == nil { 45 | fatal(&os.PathError{"put", dest, os.ErrExist}) 46 | } else if !os.IsNotExist(err) { 47 | fatal(err) 48 | } 49 | 50 | mode := 0755 | os.ModeDir 51 | parentDir := filepath.Dir(dest) 52 | if parentDir != "." && parentDir != "/" { 53 | if err := client.MkdirAll(parentDir, mode); err != nil { 54 | fatal(err) 55 | } 56 | } 57 | 58 | writer, err := client.Create(dest) 59 | if err != nil { 60 | fatal(err) 61 | } 62 | defer writer.Close() 63 | 64 | io.Copy(writer, os.Stdin) 65 | } 66 | 67 | func putFromFile(client *hdfs.Client, source string, dest string) { 68 | // If the destination is an existing directory, place it inside. Otherwise, 69 | // the destination is really the parent directory, and we need to rename the 70 | // source directory as we copy. 71 | existing, err := client.Stat(dest) 72 | if err == nil { 73 | if existing.IsDir() { 74 | dest = path.Join(dest, filepath.Base(source)) 75 | } else { 76 | fatal(&os.PathError{"mkdir", dest, os.ErrExist}) 77 | } 78 | } else if !os.IsNotExist(err) { 79 | fatal(err) 80 | } 81 | 82 | mode := 0755 | os.ModeDir 83 | err = filepath.Walk(source, func(p string, fi os.FileInfo, err error) error { 84 | if err != nil { 85 | fmt.Fprintln(os.Stderr, err) 86 | return nil 87 | } 88 | 89 | rel, err := filepath.Rel(source, p) 90 | if err != nil { 91 | fmt.Fprintln(os.Stderr, err) 92 | return nil 93 | } 94 | 95 | fullDest := path.Join(dest, rel) 96 | if fi.IsDir() { 97 | client.Mkdir(fullDest, mode) 98 | } else { 99 | writer, err := client.Create(fullDest) 100 | if err != nil { 101 | fmt.Fprintln(os.Stderr, err) 102 | return nil 103 | } 104 | 105 | defer writer.Close() 106 | reader, err := os.Open(p) 107 | if err != nil { 108 | fmt.Fprintln(os.Stderr, err) 109 | return nil 110 | } 111 | 112 | defer reader.Close() 113 | _, err = io.Copy(writer, reader) 114 | if err != nil { 115 | fmt.Fprintln(os.Stderr, err) 116 | } 117 | } 118 | 119 | return nil 120 | }) 121 | } 122 | -------------------------------------------------------------------------------- /cmd/hdfs/rm.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "os" 7 | ) 8 | 9 | func rm(paths []string, recursive bool, force bool) { 10 | expanded, client, err := getClientAndExpandedPaths(paths) 11 | if err != nil { 12 | fatal(err) 13 | } 14 | 15 | for _, p := range expanded { 16 | info, err := client.Stat(p) 17 | if err != nil { 18 | if force && os.IsNotExist(err) { 19 | continue 20 | } 21 | 22 | if pathErr, ok := err.(*os.PathError); ok { 23 | pathErr.Op = "remove" 24 | } 25 | 26 | fmt.Fprintln(os.Stderr, err) 27 | status = 1 28 | continue 29 | } 30 | 31 | if !recursive && info.IsDir() { 32 | fmt.Fprintln(os.Stderr, &os.PathError{"remove", p, errors.New("file is a directory")}) 33 | status = 1 34 | continue 35 | } 36 | 37 | err = client.Remove(p) 38 | if err != nil { 39 | fatal(err) 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /cmd/hdfs/test/cat.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load helper 4 | 5 | @test "cat" { 6 | run $HDFS cat /_test/foo.txt 7 | assert_success 8 | assert_output "bar" 9 | } 10 | 11 | @test "cat long" { 12 | run bash -c "$HDFS cat /_test/mobydick.txt > $BATS_TMPDIR/mobydick_test.txt" 13 | assert_success 14 | 15 | SHA=`shasum < $ROOT_TEST_DIR/test/mobydick.txt | awk '{ print $1 }'` 16 | assert_equal $SHA `shasum < $BATS_TMPDIR/mobydick_test.txt | awk '{ print $1 }'` 17 | } 18 | 19 | @test "cat nonexistent" { 20 | run $HDFS cat /_test_cmd/nonexistent 21 | assert_failure 22 | assert_output </dev/null; then 53 | assert_equal "$2" "${lines[$1]}" 54 | else 55 | local line 56 | for line in "${lines[@]}"; do 57 | if [ "$line" = "$1" ]; then return 0; fi 58 | done 59 | flunk "expected line \`$1'" 60 | fi 61 | } 62 | 63 | refute_line() { 64 | if [ "$1" -ge 0 ] 2>/dev/null; then 65 | local num_lines="${#lines[@]}" 66 | if [ "$1" -lt "$num_lines" ]; then 67 | flunk "output has $num_lines lines" 68 | fi 69 | else 70 | local line 71 | for line in "${lines[@]}"; do 72 | if [ "$line" = "$1" ]; then 73 | flunk "expected to not find line \`$line'" 74 | fi 75 | done 76 | fi 77 | } 78 | 79 | assert() { 80 | if ! "$@"; then 81 | flunk "failed: $@" 82 | fi 83 | } 84 | -------------------------------------------------------------------------------- /cmd/hdfs/test/ls.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load helper 4 | 5 | setup() { 6 | $HDFS mkdir -p /_test_cmd/ls/dir1 7 | $HDFS mkdir -p /_test_cmd/ls/dir2 8 | $HDFS mkdir -p /_test_cmd/ls/dir3 9 | $HDFS touch /_test_cmd/ls/dir1/a 10 | $HDFS touch /_test_cmd/ls/dir1/b 11 | $HDFS touch /_test_cmd/ls/dir1/c 12 | $HDFS touch /_test_cmd/ls/dir2/d 13 | } 14 | 15 | @test "ls" { 16 | run $HDFS ls /_test_cmd/ls/dir1 17 | assert_success 18 | assert_output < $BATS_TMPDIR/mobydick_test.txt" 24 | assert_success 25 | 26 | SHA=`shasum < $ROOT_TEST_DIR/test/mobydick.txt | awk '{ print $1 }'` 27 | assert_equal $SHA `shasum < $BATS_TMPDIR/mobydick_test.txt | awk '{ print $1 }'` 28 | } 29 | 30 | @test "put dir" { 31 | run $HDFS put $ROOT_TEST_DIR/test /_test_cmd/put/test2 32 | assert_success 33 | 34 | run $HDFS cat /_test_cmd/put/test2/foo.txt 35 | assert_output "bar" 36 | 37 | run bash -c "$HDFS cat /_test_cmd/put/test2/mobydick.txt > $BATS_TMPDIR/mobydick_test.txt" 38 | assert_success 39 | 40 | SHA=`shasum < $ROOT_TEST_DIR/test/mobydick.txt | awk '{ print $1 }'` 41 | assert_equal $SHA `shasum < $BATS_TMPDIR/mobydick_test.txt | awk '{ print $1 }'` 42 | } 43 | 44 | 45 | @test "put dir into dir" { 46 | run $HDFS put $ROOT_TEST_DIR/test /_test_cmd/put/test 47 | assert_success 48 | 49 | run $HDFS cat /_test_cmd/put/test/test/foo.txt 50 | assert_output "bar" 51 | 52 | run bash -c "$HDFS cat /_test_cmd/put/test/test/mobydick.txt > $BATS_TMPDIR/mobydick_test.txt" 53 | assert_success 54 | 55 | SHA=`shasum < $ROOT_TEST_DIR/test/mobydick.txt | awk '{ print $1 }'` 56 | assert_equal $SHA `shasum < $BATS_TMPDIR/mobydick_test.txt | awk '{ print $1 }'` 57 | } 58 | 59 | @test "put dir into file" { 60 | run $HDFS put $ROOT_TEST_DIR/test /_test_cmd/put/existing.txt 61 | assert_failure 62 | assert_output < $BATS_TMPDIR/mobydick_stdin_test.txt" 80 | assert_success 81 | 82 | SHA=`shasum < $ROOT_TEST_DIR/test/mobydick.txt | awk '{ print $1 }'` 83 | assert_equal $SHA `shasum < $BATS_TMPDIR/mobydick_stdin_test.txt | awk '{ print $1 }'` 84 | } 85 | 86 | @test "put stdin into file" { 87 | run bash -c "echo 'foo bar baz' | $HDFS put - /_test_cmd/put/existing.txt" 88 | assert_failure 89 | assert_output < (1024 * 1024 * 1024 * 1024): 10 | return fmt.Sprintf("%#.1fT", float64(i)/1024/1024/1024/1024) 11 | case i > (1024 * 1024 * 1024): 12 | return fmt.Sprintf("%#.1fG", float64(i)/1024/1024/1024) 13 | case i > (1024 * 1024): 14 | return fmt.Sprintf("%#.1fM", float64(i)/1024/1024) 15 | case i > 1024: 16 | return fmt.Sprintf("%#.1fK", float64(i)/1024) 17 | default: 18 | return fmt.Sprintf("%dB", i) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /conf.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "encoding/xml" 5 | "errors" 6 | "io/ioutil" 7 | "net/url" 8 | "os" 9 | "path/filepath" 10 | "sort" 11 | "strings" 12 | ) 13 | 14 | // Property is the struct representation of hadoop configuration 15 | // key value pair. 16 | type Property struct { 17 | Name string `xml:"name"` 18 | Value string `xml:"value"` 19 | } 20 | 21 | type propertyList struct { 22 | Property []Property `xml:"property"` 23 | } 24 | 25 | // HadoopConf represents a map of all the key value configutation 26 | // pairs found in a user's hadoop configuration files. 27 | type HadoopConf map[string]string 28 | 29 | var errUnresolvedNamenode = errors.New("no namenode address in configuration") 30 | 31 | // LoadHadoopConf returns a HadoopConf object representing configuration from 32 | // the specified path, or finds the correct path in the environment. If 33 | // path or the env variable HADOOP_CONF_DIR is specified, it should point 34 | // directly to the directory where the xml files are. If neither is specified, 35 | // ${HADOOP_HOME}/conf will be used. 36 | func LoadHadoopConf(path string) HadoopConf { 37 | 38 | if path == "" { 39 | path = os.Getenv("HADOOP_CONF_DIR") 40 | if path == "" { 41 | path = filepath.Join(os.Getenv("HADOOP_HOME"), "conf") 42 | } 43 | } 44 | 45 | hadoopConf := make(HadoopConf) 46 | for _, file := range []string{"core-site.xml", "hdfs-site.xml"} { 47 | pList := propertyList{} 48 | f, err := ioutil.ReadFile(filepath.Join(path, file)) 49 | if err != nil { 50 | continue 51 | } 52 | 53 | err = xml.Unmarshal(f, &pList) 54 | if err != nil { 55 | continue 56 | } 57 | 58 | for _, prop := range pList.Property { 59 | hadoopConf[prop.Name] = prop.Value 60 | } 61 | } 62 | 63 | return hadoopConf 64 | } 65 | 66 | // Namenodes returns the namenode hosts present in the configuration. The 67 | // returned slice will be sorted and deduped. 68 | func (conf HadoopConf) Namenodes() ([]string, error) { 69 | nns := make(map[string]bool) 70 | for key, value := range conf { 71 | if strings.Contains(key, "fs.default") { 72 | nnUrl, _ := url.Parse(value) 73 | nns[nnUrl.Host] = true 74 | } else if strings.HasPrefix(key, "dfs.namenode.rpc-address") { 75 | nns[value] = true 76 | } 77 | } 78 | 79 | if len(nns) == 0 { 80 | return nil, errUnresolvedNamenode 81 | } 82 | 83 | keys := make([]string, 0, len(nns)) 84 | for k, _ := range nns { 85 | keys = append(keys, k) 86 | } 87 | 88 | sort.Strings(keys) 89 | return keys, nil 90 | } 91 | -------------------------------------------------------------------------------- /conf_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestConfFallback(t *testing.T) { 11 | os.Setenv("HADOOP_HOME", "test") // This will resolve to test/conf. 12 | os.Setenv("HADOOP_CONF_DIR", "test/conf2") 13 | 14 | confNamenodes := []string{"namenode1:8020", "namenode2:8020"} 15 | conf2Namenodes := []string{"namenode3:8020"} 16 | conf3Namenodes := []string{"namenode4:8020"} 17 | 18 | conf := LoadHadoopConf("test/conf3") 19 | nns, err := conf.Namenodes() 20 | assert.Nil(t, err) 21 | assert.EqualValues(t, conf3Namenodes, nns, "loading via specified path (test/conf3)") 22 | 23 | conf = LoadHadoopConf("") 24 | nns, err = conf.Namenodes() 25 | assert.Nil(t, err) 26 | assert.EqualValues(t, conf2Namenodes, nns, "loading via HADOOP_CONF_DIR (test/conf2)") 27 | 28 | os.Unsetenv("HADOOP_CONF_DIR") 29 | 30 | conf = LoadHadoopConf("") 31 | nns, err = conf.Namenodes() 32 | assert.Nil(t, err) 33 | assert.EqualValues(t, confNamenodes, nns, "loading via HADOOP_HOME (test/conf)") 34 | 35 | os.Unsetenv("HADOOP_HOME") 36 | } 37 | -------------------------------------------------------------------------------- /content_summary.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | 6 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 7 | "github.com/colinmarc/hdfs/rpc" 8 | "github.com/golang/protobuf/proto" 9 | ) 10 | 11 | // ContentSummary represents a set of information about a file or directory in 12 | // HDFS. It's provided directly by the namenode, and has no unix filesystem 13 | // analogue. 14 | type ContentSummary struct { 15 | name string 16 | contentSummary *hdfs.ContentSummaryProto 17 | } 18 | 19 | // GetContentSummary returns a ContentSummary representing the named file or 20 | // directory. The summary contains information about the entire tree rooted 21 | // in the named file; for instance, it can return the total size of all 22 | func (c *Client) GetContentSummary(name string) (*ContentSummary, error) { 23 | cs, err := c.getContentSummary(name) 24 | if err != nil { 25 | err = &os.PathError{"content summary", name, err} 26 | } 27 | 28 | return cs, err 29 | } 30 | 31 | func (c *Client) getContentSummary(name string) (*ContentSummary, error) { 32 | req := &hdfs.GetContentSummaryRequestProto{Path: proto.String(name)} 33 | resp := &hdfs.GetContentSummaryResponseProto{} 34 | 35 | err := c.namenode.Execute("getContentSummary", req, resp) 36 | if err != nil { 37 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 38 | err = interpretException(nnErr.Exception, err) 39 | } 40 | 41 | return nil, err 42 | } 43 | 44 | return &ContentSummary{name, resp.GetSummary()}, nil 45 | } 46 | 47 | // Size returns the total size of the named path, including any subdirectories. 48 | func (cs *ContentSummary) Size() int64 { 49 | return int64(cs.contentSummary.GetLength()) 50 | } 51 | 52 | // SizeAfterReplication returns the total size of the named path, including any 53 | // subdirectories. Unlike Size, it counts the total replicated size of each 54 | // file, and represents the total on-disk footprint for a tree in HDFS. 55 | func (cs *ContentSummary) SizeAfterReplication() int64 { 56 | return int64(cs.contentSummary.GetSpaceConsumed()) 57 | } 58 | 59 | // FileCount returns the number of files under the named path, including any 60 | // subdirectories. If the named path is a file, FileCount returns 1. 61 | func (cs *ContentSummary) FileCount() int { 62 | return int(cs.contentSummary.GetFileCount()) 63 | } 64 | 65 | // DirectoryCount returns the number of directories under the named one, 66 | // including any subdirectories, and including the root directory itself. If 67 | // the named path is a file, this returns 0. 68 | func (cs *ContentSummary) DirectoryCount() int { 69 | return int(cs.contentSummary.GetDirectoryCount()) 70 | } 71 | 72 | // NameQuota returns the HDFS configured "name quota" for the named path. The 73 | // name quota is a hard limit on the number of directories and files inside a 74 | // directory; see http://goo.gl/sOSJmJ for more information. 75 | func (cs *ContentSummary) NameQuota() int { 76 | return int(cs.contentSummary.GetQuota()) 77 | } 78 | 79 | // SpaceQuota returns the HDFS configured "name quota" for the named path. The 80 | // name quota is a hard limit on the number of directories and files inside 81 | // a directory; see http://goo.gl/sOSJmJ for more information. 82 | func (cs *ContentSummary) SpaceQuota() int64 { 83 | return int64(cs.contentSummary.GetSpaceQuota()) 84 | } 85 | -------------------------------------------------------------------------------- /content_summary_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestContentSummaryDir(t *testing.T) { 12 | client := getClient(t) 13 | 14 | mkdirp(t, "/_test/dirforcs/1") 15 | mkdirp(t, "/_test/dirforcs/2") 16 | touch(t, "/_test/dirforcs/foo") 17 | touch(t, "/_test/dirforcs/1/bar") 18 | 19 | resp, err := client.GetContentSummary("/_test/dirforcs") 20 | require.NoError(t, err) 21 | 22 | assert.EqualValues(t, 2, resp.FileCount()) 23 | assert.EqualValues(t, 3, resp.DirectoryCount()) 24 | } 25 | 26 | func TestContentSummaryFile(t *testing.T) { 27 | client := getClient(t) 28 | 29 | resp, err := client.GetContentSummary("/_test/foo.txt") 30 | require.NoError(t, err) 31 | 32 | assert.EqualValues(t, 4, resp.Size()) 33 | assert.True(t, resp.SizeAfterReplication() >= 4) 34 | assert.EqualValues(t, 1, resp.FileCount()) 35 | assert.EqualValues(t, 0, resp.DirectoryCount()) 36 | } 37 | 38 | func TestContentSummaryNonExistent(t *testing.T) { 39 | client := getClient(t) 40 | 41 | resp, err := client.GetContentSummary("/_test/nonexistent") 42 | assertPathError(t, err, "content summary", "/_test/nonexistent", os.ErrNotExist) 43 | assert.Nil(t, resp) 44 | } 45 | 46 | func TestContentSummaryDirWithoutPermission(t *testing.T) { 47 | otherClient := getClientForUser(t, "other") 48 | 49 | mkdirp(t, "/_test/accessdenied") 50 | touch(t, "/_test/accessdenied/foo") 51 | 52 | _, err := otherClient.GetContentSummary("/_test/accessdenied/foo") 53 | assertPathError(t, err, "content summary", "/_test/accessdenied/foo", os.ErrPermission) 54 | } 55 | -------------------------------------------------------------------------------- /exceptions.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import "os" 4 | 5 | const ( 6 | fileNotFoundException = "java.io.FileNotFoundException" 7 | permissionDeniedException = "org.apache.hadoop.security.AccessControlException" 8 | ) 9 | 10 | func interpretException(exception string, err error) error { 11 | switch exception { 12 | case fileNotFoundException: 13 | return os.ErrNotExist 14 | case permissionDeniedException: 15 | return os.ErrPermission 16 | default: 17 | return err 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /hdfs.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package hdfs provides a native, idiomatic interface to HDFS. Where possible, 3 | it mimics the functionality and signatures of the standard `os` package. 4 | 5 | Example: 6 | 7 | client, _ := hdfs.New("namenode:8020") 8 | 9 | file, _ := client.Open("/mobydick.txt") 10 | 11 | buf := make([]byte, 59) 12 | file.ReadAt(buf, 48847) 13 | 14 | fmt.Println(string(buf)) 15 | // => Abominable are the tumblers into which he pours his poison. 16 | */ 17 | package hdfs 18 | -------------------------------------------------------------------------------- /mkdir.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "path" 6 | 7 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 8 | "github.com/colinmarc/hdfs/rpc" 9 | "github.com/golang/protobuf/proto" 10 | ) 11 | 12 | // Mkdir creates a new directory with the specified name and permission bits. 13 | func (c *Client) Mkdir(dirname string, perm os.FileMode) error { 14 | return c.mkdir(dirname, perm, false) 15 | } 16 | 17 | // MkdirAll creates a directory for dirname, along with any necessary parents, 18 | // and returns nil, or else returns an error. The permission bits perm are used 19 | // for all directories that MkdirAll creates. If dirname is already a directory, 20 | // MkdirAll does nothing and returns nil. 21 | func (c *Client) MkdirAll(dirname string, perm os.FileMode) error { 22 | return c.mkdir(dirname, perm, true) 23 | } 24 | 25 | func (c *Client) mkdir(dirname string, perm os.FileMode, createParent bool) error { 26 | dirname = path.Clean(dirname) 27 | 28 | info, err := c.getFileInfo(dirname) 29 | if err == nil { 30 | if createParent && info.IsDir() { 31 | return nil 32 | } 33 | return &os.PathError{"mkdir", dirname, os.ErrExist} 34 | } else if !os.IsNotExist(err) { 35 | return &os.PathError{"mkdir", dirname, err} 36 | } 37 | 38 | req := &hdfs.MkdirsRequestProto{ 39 | Src: proto.String(dirname), 40 | Masked: &hdfs.FsPermissionProto{Perm: proto.Uint32(uint32(perm))}, 41 | CreateParent: proto.Bool(createParent), 42 | } 43 | resp := &hdfs.MkdirsResponseProto{} 44 | 45 | err = c.namenode.Execute("mkdirs", req, resp) 46 | if err != nil { 47 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 48 | err = interpretException(nnErr.Exception, err) 49 | } 50 | 51 | return &os.PathError{"mkdir", dirname, err} 52 | } 53 | 54 | return nil 55 | } 56 | -------------------------------------------------------------------------------- /mkdir_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | var mode = 0777 | os.ModeDir 12 | 13 | func TestMkdir(t *testing.T) { 14 | client := getClient(t) 15 | 16 | baleet(t, "/_test/dir2") 17 | 18 | err := client.Mkdir("/_test/dir2", mode) 19 | require.NoError(t, err) 20 | 21 | fi, err := client.Stat("/_test/dir2") 22 | require.NoError(t, err) 23 | assert.True(t, fi.IsDir()) 24 | } 25 | 26 | func TestMkdirExists(t *testing.T) { 27 | client := getClient(t) 28 | 29 | mkdirp(t, "/_test/existingdir") 30 | 31 | err := client.Mkdir("/_test/existingdir", mode) 32 | assertPathError(t, err, "mkdir", "/_test/existingdir", os.ErrExist) 33 | } 34 | 35 | func TestMkdirWithoutParent(t *testing.T) { 36 | client := getClient(t) 37 | 38 | baleet(t, "/_test/nonexistent") 39 | 40 | err := client.Mkdir("/_test/nonexistent/foo", mode) 41 | assertPathError(t, err, "mkdir", "/_test/nonexistent/foo", os.ErrNotExist) 42 | 43 | _, err = client.Stat("/_test/nonexistent/foo") 44 | assertPathError(t, err, "stat", "/_test/nonexistent/foo", os.ErrNotExist) 45 | 46 | _, err = client.Stat("/_test/nonexistent") 47 | assertPathError(t, err, "stat", "/_test/nonexistent", os.ErrNotExist) 48 | } 49 | 50 | func TestMkdirAll(t *testing.T) { 51 | client := getClient(t) 52 | 53 | baleet(t, "/_test/dir3") 54 | 55 | err := client.MkdirAll("/_test/dir3/foo", mode) 56 | require.NoError(t, err) 57 | 58 | fi, err := client.Stat("/_test/dir3/foo") 59 | require.NoError(t, err) 60 | assert.True(t, fi.IsDir()) 61 | 62 | fi, err = client.Stat("/_test/dir3") 63 | require.NoError(t, err) 64 | assert.True(t, fi.IsDir()) 65 | assert.EqualValues(t, 0, fi.Size()) 66 | } 67 | 68 | func TestMkdirAllExists(t *testing.T) { 69 | client := getClient(t) 70 | 71 | baleet(t, "/_test/dir4") 72 | 73 | err := client.MkdirAll("/_test/dir4/foo", mode) 74 | require.NoError(t, err) 75 | 76 | err = client.MkdirAll("/_test/dir4/foo", mode) 77 | require.NoError(t, err) 78 | } 79 | 80 | func TestMkdirWIthoutPermission(t *testing.T) { 81 | client := getClient(t) 82 | otherClient := getClientForUser(t, "other") 83 | 84 | mkdirp(t, "/_test/accessdenied") 85 | 86 | err := otherClient.Mkdir("/_test/accessdenied/dir", mode) 87 | assertPathError(t, err, "mkdir", "/_test/accessdenied/dir", os.ErrPermission) 88 | 89 | _, err = client.Stat("/_test/accessdenied/dir") 90 | assertPathError(t, err, "stat", "/_test/accessdenied/dir", os.ErrNotExist) 91 | } 92 | 93 | func TestMkdirAllWIthoutPermission(t *testing.T) { 94 | client := getClient(t) 95 | otherClient := getClientForUser(t, "other") 96 | 97 | mkdirp(t, "/_test/accessdenied") 98 | 99 | err := otherClient.Mkdir("/_test/accessdenied/dir2/foo", mode) 100 | assertPathError(t, err, "mkdir", "/_test/accessdenied/dir2/foo", os.ErrPermission) 101 | 102 | _, err = client.Stat("/_test/accessdenied/dir2/foo") 103 | assertPathError(t, err, "stat", "/_test/accessdenied/dir2/foo", os.ErrNotExist) 104 | 105 | _, err = client.Stat("/_test/accessdenied/dir2") 106 | assertPathError(t, err, "stat", "/_test/accessdenied/dir2", os.ErrNotExist) 107 | } 108 | -------------------------------------------------------------------------------- /perms.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "time" 6 | 7 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 8 | "github.com/colinmarc/hdfs/rpc" 9 | "github.com/golang/protobuf/proto" 10 | ) 11 | 12 | // Chmod changes the mode of the named file to mode. 13 | func (c *Client) Chmod(name string, perm os.FileMode) error { 14 | req := &hdfs.SetPermissionRequestProto{ 15 | Src: proto.String(name), 16 | Permission: &hdfs.FsPermissionProto{Perm: proto.Uint32(uint32(perm))}, 17 | } 18 | resp := &hdfs.SetPermissionResponseProto{} 19 | 20 | err := c.namenode.Execute("setPermission", req, resp) 21 | if err != nil { 22 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 23 | err = interpretException(nnErr.Exception, err) 24 | } 25 | 26 | return &os.PathError{"chmod", name, err} 27 | } 28 | 29 | return nil 30 | } 31 | 32 | // Chown changes the user and group of the file. Unlike os.Chown, this takes 33 | // a string username and group (since that's what HDFS uses.) 34 | // 35 | // If an empty string is passed for user or group, that field will not be 36 | // changed remotely. 37 | func (c *Client) Chown(name string, user, group string) error { 38 | req := &hdfs.SetOwnerRequestProto{ 39 | Src: proto.String(name), 40 | Username: proto.String(user), 41 | Groupname: proto.String(group), 42 | } 43 | resp := &hdfs.SetOwnerResponseProto{} 44 | 45 | err := c.namenode.Execute("setOwner", req, resp) 46 | if err != nil { 47 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 48 | err = interpretException(nnErr.Exception, err) 49 | } 50 | 51 | return &os.PathError{"chown", name, err} 52 | } 53 | 54 | return nil 55 | } 56 | 57 | // Chtimes changes the access and modification times of the named file. 58 | func (c *Client) Chtimes(name string, atime time.Time, mtime time.Time) error { 59 | req := &hdfs.SetTimesRequestProto{ 60 | Src: proto.String(name), 61 | Mtime: proto.Uint64(uint64(mtime.Unix()) * 1000), 62 | Atime: proto.Uint64(uint64(atime.Unix()) * 1000), 63 | } 64 | resp := &hdfs.SetTimesResponseProto{} 65 | 66 | err := c.namenode.Execute("setTimes", req, resp) 67 | if err != nil { 68 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 69 | err = interpretException(nnErr.Exception, err) 70 | } 71 | 72 | return &os.PathError{"chtimes", name, err} 73 | } 74 | 75 | return nil 76 | } 77 | -------------------------------------------------------------------------------- /perms_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestChmod(t *testing.T) { 13 | client := getClient(t) 14 | 15 | touch(t, "/_test/tochmod") 16 | 17 | err := client.Chmod("/_test/tochmod", 0777) 18 | require.NoError(t, err) 19 | 20 | fi, err := client.Stat("/_test/tochmod") 21 | assert.NoError(t, err) 22 | assert.EqualValues(t, 0777, fi.Mode()) 23 | } 24 | 25 | func TestChmodDir(t *testing.T) { 26 | client := getClient(t) 27 | 28 | mkdirp(t, "/_test/dirtochmod") 29 | 30 | err := client.Chmod("/_test/dirtochmod", 0777) 31 | require.NoError(t, err) 32 | 33 | fi, err := client.Stat("/_test/dirtochmod") 34 | assert.NoError(t, err) 35 | assert.EqualValues(t, 0777|os.ModeDir, fi.Mode()) 36 | } 37 | 38 | func TestChmodNonexistent(t *testing.T) { 39 | client := getClient(t) 40 | 41 | baleet(t, "/_test/nonexistent") 42 | 43 | err := client.Chmod("/_test/nonexistent", 0777) 44 | assertPathError(t, err, "chmod", "/_test/nonexistent", os.ErrNotExist) 45 | } 46 | 47 | func TestChmodWithoutPermission(t *testing.T) { 48 | otherClient := getClientForUser(t, "other") 49 | 50 | mkdirp(t, "/_test/accessdenied") 51 | 52 | err := otherClient.Chmod("/_test/accessdenied", 0777) 53 | assertPathError(t, err, "chmod", "/_test/accessdenied", os.ErrPermission) 54 | } 55 | 56 | func TestChown(t *testing.T) { 57 | client := getClient(t) 58 | 59 | baleet(t, "/_test/tochown") 60 | touch(t, "/_test/tochown") 61 | 62 | err := client.Chown("/_test/tochown", "other", "") 63 | require.NoError(t, err) 64 | 65 | fi, err := client.Stat("/_test/tochown") 66 | assert.NoError(t, err) 67 | assert.EqualValues(t, fi.(*FileInfo).Owner(), "other") 68 | } 69 | 70 | func TestChownDir(t *testing.T) { 71 | client := getClient(t) 72 | 73 | baleet(t, "/_test/tochowndir") 74 | mkdirp(t, "/_test/tochowndir") 75 | 76 | err := client.Chown("/_test/tochowndir", "other", "") 77 | require.NoError(t, err) 78 | 79 | fi, err := client.Stat("/_test/tochowndir") 80 | assert.NoError(t, err) 81 | assert.EqualValues(t, fi.(*FileInfo).Owner(), "other") 82 | } 83 | 84 | func TestChownNonexistent(t *testing.T) { 85 | client := getClient(t) 86 | 87 | baleet(t, "/_test/nonexistent") 88 | 89 | err := client.Chown("/_test/nonexistent", "other", "") 90 | assertPathError(t, err, "chown", "/_test/nonexistent", os.ErrNotExist) 91 | } 92 | 93 | func TestChownWithoutPermission(t *testing.T) { 94 | otherClient := getClientForUser(t, "other") 95 | 96 | mkdirp(t, "/_test/accessdenied") 97 | 98 | err := otherClient.Chown("/_test/accessdenied", "owner", "") 99 | assertPathError(t, err, "chown", "/_test/accessdenied", os.ErrPermission) 100 | } 101 | 102 | func TestChtimes(t *testing.T) { 103 | client := getClient(t) 104 | 105 | baleet(t, "/_test/tochtime") 106 | touch(t, "/_test/tochtime") 107 | 108 | birthday := time.Date(1990, 1, 22, 14, 33, 35, 0, time.UTC) 109 | client.Chtimes("/_test/tochtime", birthday, birthday) 110 | 111 | fi, err := client.Stat("/_test/tochtime") 112 | assert.NoError(t, err) 113 | assert.EqualValues(t, birthday, fi.ModTime().UTC(), birthday) 114 | assert.EqualValues(t, birthday, fi.(*FileInfo).AccessTime().UTC(), birthday) 115 | } 116 | -------------------------------------------------------------------------------- /protocol/hadoop_common/GenericRefreshProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.proto"; 26 | option java_outer_classname = "GenericRefreshProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Refresh request. 33 | */ 34 | message GenericRefreshRequestProto { 35 | optional string identifier = 1; 36 | repeated string args = 2; 37 | } 38 | 39 | /** 40 | * A single response from a refresh handler. 41 | */ 42 | message GenericRefreshResponseProto { 43 | optional int32 exitStatus = 1; // unix exit status to return 44 | optional string userMessage = 2; // to be displayed to the user 45 | optional string senderName = 3; // which handler sent this message 46 | } 47 | 48 | /** 49 | * Collection of responses from zero or more handlers. 50 | */ 51 | message GenericRefreshResponseCollectionProto { 52 | repeated GenericRefreshResponseProto responses = 1; 53 | } 54 | 55 | /** 56 | * Protocol which is used to refresh a user-specified feature. 57 | */ 58 | service GenericRefreshProtocolService { 59 | rpc refresh(GenericRefreshRequestProto) 60 | returns(GenericRefreshResponseCollectionProto); 61 | } 62 | -------------------------------------------------------------------------------- /protocol/hadoop_common/GetUserMappingsProtocol.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: GetUserMappingsProtocol.proto 3 | 4 | package hadoop_common 5 | 6 | import proto "github.com/golang/protobuf/proto" 7 | import fmt "fmt" 8 | import math "math" 9 | 10 | // Reference proto, json, and math imports to suppress error if they are not otherwise used. 11 | var _ = proto.Marshal 12 | var _ = fmt.Errorf 13 | var _ = math.Inf 14 | 15 | // * 16 | // Get groups for user request. 17 | type GetGroupsForUserRequestProto struct { 18 | User *string `protobuf:"bytes,1,req,name=user" json:"user,omitempty"` 19 | XXX_unrecognized []byte `json:"-"` 20 | } 21 | 22 | func (m *GetGroupsForUserRequestProto) Reset() { *m = GetGroupsForUserRequestProto{} } 23 | func (m *GetGroupsForUserRequestProto) String() string { return proto.CompactTextString(m) } 24 | func (*GetGroupsForUserRequestProto) ProtoMessage() {} 25 | func (*GetGroupsForUserRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} } 26 | 27 | func (m *GetGroupsForUserRequestProto) GetUser() string { 28 | if m != nil && m.User != nil { 29 | return *m.User 30 | } 31 | return "" 32 | } 33 | 34 | // * 35 | // Response for get groups. 36 | type GetGroupsForUserResponseProto struct { 37 | Groups []string `protobuf:"bytes,1,rep,name=groups" json:"groups,omitempty"` 38 | XXX_unrecognized []byte `json:"-"` 39 | } 40 | 41 | func (m *GetGroupsForUserResponseProto) Reset() { *m = GetGroupsForUserResponseProto{} } 42 | func (m *GetGroupsForUserResponseProto) String() string { return proto.CompactTextString(m) } 43 | func (*GetGroupsForUserResponseProto) ProtoMessage() {} 44 | func (*GetGroupsForUserResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{1} } 45 | 46 | func (m *GetGroupsForUserResponseProto) GetGroups() []string { 47 | if m != nil { 48 | return m.Groups 49 | } 50 | return nil 51 | } 52 | 53 | func init() { 54 | proto.RegisterType((*GetGroupsForUserRequestProto)(nil), "hadoop.common.GetGroupsForUserRequestProto") 55 | proto.RegisterType((*GetGroupsForUserResponseProto)(nil), "hadoop.common.GetGroupsForUserResponseProto") 56 | } 57 | 58 | func init() { proto.RegisterFile("GetUserMappingsProtocol.proto", fileDescriptor10) } 59 | 60 | var fileDescriptor10 = []byte{ 61 | // 213 bytes of a gzipped FileDescriptorProto 62 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x75, 0x4f, 0x2d, 0x09, 63 | 0x2d, 0x4e, 0x2d, 0xf2, 0x4d, 0x2c, 0x28, 0xc8, 0xcc, 0x4b, 0x2f, 0x0e, 0x28, 0xca, 0x2f, 0xc9, 64 | 0x4f, 0xce, 0xcf, 0xd1, 0x2b, 0x00, 0x31, 0x84, 0x78, 0x33, 0x12, 0x53, 0xf2, 0xf3, 0x0b, 0xf4, 65 | 0x92, 0xf3, 0x73, 0x73, 0xf3, 0xf3, 0x94, 0x8c, 0xb8, 0x64, 0xdc, 0x53, 0x4b, 0xdc, 0x8b, 0xf2, 66 | 0x4b, 0x0b, 0x8a, 0xdd, 0xf2, 0x8b, 0x40, 0x1a, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0xc0, 67 | 0xfa, 0x84, 0x84, 0xb8, 0x58, 0x4a, 0x8b, 0x53, 0x8b, 0x24, 0x18, 0x15, 0x98, 0x34, 0x38, 0x83, 68 | 0xc0, 0x6c, 0x25, 0x73, 0xb0, 0x1d, 0x68, 0x7a, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x21, 0x9a, 69 | 0xc4, 0xb8, 0xd8, 0xd2, 0xc1, 0xb2, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0x9c, 0x41, 0x50, 0x9e, 0x51, 70 | 0x3f, 0x23, 0x97, 0x1c, 0x0e, 0xd7, 0x05, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x0a, 0xe5, 0x72, 71 | 0x09, 0xa4, 0xa3, 0x99, 0x2d, 0xa4, 0xad, 0x87, 0xe2, 0x66, 0x3d, 0x7c, 0x0e, 0x96, 0xd2, 0x21, 72 | 0xa8, 0x18, 0xc9, 0xa5, 0x4e, 0x2e, 0x5c, 0xb2, 0xf9, 0x45, 0xe9, 0x7a, 0x89, 0x05, 0x89, 0xc9, 73 | 0x19, 0xa9, 0x30, 0x9d, 0x25, 0xf9, 0xf9, 0x39, 0xc5, 0x90, 0xe0, 0x72, 0xc2, 0x15, 0x9a, 0x60, 74 | 0xba, 0xb8, 0x83, 0x91, 0x71, 0x01, 0x23, 0x23, 0x20, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x38, 0xcc, 75 | 0x6f, 0x73, 0x01, 0x00, 0x00, 76 | } 77 | -------------------------------------------------------------------------------- /protocol/hadoop_common/GetUserMappingsProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.tools.proto"; 26 | option java_outer_classname = "GetUserMappingsProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Get groups for user request. 33 | */ 34 | message GetGroupsForUserRequestProto { 35 | required string user = 1; 36 | } 37 | 38 | /** 39 | * Response for get groups. 40 | */ 41 | message GetGroupsForUserResponseProto { 42 | repeated string groups = 1; 43 | } 44 | 45 | 46 | /** 47 | * Protocol which maps users to groups. 48 | */ 49 | service GetUserMappingsProtocolService { 50 | /** 51 | * Get the groups which are mapped to the given user. 52 | */ 53 | rpc getGroupsForUser(GetGroupsForUserRequestProto) 54 | returns(GetGroupsForUserResponseProto); 55 | } 56 | -------------------------------------------------------------------------------- /protocol/hadoop_common/HAServiceProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ha.proto"; 26 | option java_outer_classname = "HAServiceProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | enum HAServiceStateProto { 32 | INITIALIZING = 0; 33 | ACTIVE = 1; 34 | STANDBY = 2; 35 | } 36 | 37 | enum HARequestSource { 38 | REQUEST_BY_USER = 0; 39 | REQUEST_BY_USER_FORCED = 1; 40 | REQUEST_BY_ZKFC = 2; 41 | } 42 | 43 | message HAStateChangeRequestInfoProto { 44 | required HARequestSource reqSource = 1; 45 | } 46 | 47 | /** 48 | * void request 49 | */ 50 | message MonitorHealthRequestProto { 51 | } 52 | 53 | /** 54 | * void response 55 | */ 56 | message MonitorHealthResponseProto { 57 | } 58 | 59 | /** 60 | * void request 61 | */ 62 | message TransitionToActiveRequestProto { 63 | required HAStateChangeRequestInfoProto reqInfo = 1; 64 | } 65 | 66 | /** 67 | * void response 68 | */ 69 | message TransitionToActiveResponseProto { 70 | } 71 | 72 | /** 73 | * void request 74 | */ 75 | message TransitionToStandbyRequestProto { 76 | required HAStateChangeRequestInfoProto reqInfo = 1; 77 | } 78 | 79 | /** 80 | * void response 81 | */ 82 | message TransitionToStandbyResponseProto { 83 | } 84 | 85 | /** 86 | * void request 87 | */ 88 | message GetServiceStatusRequestProto { 89 | } 90 | 91 | /** 92 | * Returns the state of the service 93 | */ 94 | message GetServiceStatusResponseProto { 95 | required HAServiceStateProto state = 1; 96 | 97 | // If state is STANDBY, indicate whether it is 98 | // ready to become active. 99 | optional bool readyToBecomeActive = 2; 100 | // If not ready to become active, a textual explanation of why not 101 | optional string notReadyReason = 3; 102 | } 103 | 104 | /** 105 | * Protocol interface provides High availability related 106 | * primitives to monitor and failover a service. 107 | * 108 | * For details see o.a.h.ha.HAServiceProtocol. 109 | */ 110 | service HAServiceProtocolService { 111 | /** 112 | * Monitor the health of a service. 113 | */ 114 | rpc monitorHealth(MonitorHealthRequestProto) 115 | returns(MonitorHealthResponseProto); 116 | 117 | /** 118 | * Request service to tranisition to active state. 119 | */ 120 | rpc transitionToActive(TransitionToActiveRequestProto) 121 | returns(TransitionToActiveResponseProto); 122 | 123 | /** 124 | * Request service to transition to standby state. 125 | */ 126 | rpc transitionToStandby(TransitionToStandbyRequestProto) 127 | returns(TransitionToStandbyResponseProto); 128 | 129 | /** 130 | * Get the current status of the service. 131 | */ 132 | rpc getServiceStatus(GetServiceStatusRequestProto) 133 | returns(GetServiceStatusResponseProto); 134 | } 135 | -------------------------------------------------------------------------------- /protocol/hadoop_common/IpcConnectionContext.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.protobuf"; 26 | option java_outer_classname = "IpcConnectionContextProtos"; 27 | option java_generate_equals_and_hash = true; 28 | package hadoop.common; 29 | 30 | /** 31 | * Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext 32 | */ 33 | message UserInformationProto { 34 | optional string effectiveUser = 1; 35 | optional string realUser = 2; 36 | } 37 | 38 | /** 39 | * The connection context is sent as part of the connection establishment. 40 | * It establishes the context for ALL Rpc calls within the connection. 41 | */ 42 | message IpcConnectionContextProto { 43 | // UserInfo beyond what is determined as part of security handshake 44 | // at connection time (kerberos, tokens etc). 45 | optional UserInformationProto userInfo = 2; 46 | 47 | // Protocol name for next rpc layer. 48 | // The client created a proxy with this protocol name 49 | optional string protocol = 3; 50 | } 51 | -------------------------------------------------------------------------------- /protocol/hadoop_common/ProtobufRpcEngine.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | /** 26 | * These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer 27 | * to marshal the request and response in the RPC layer. 28 | * The messages are sent in addition to the normal RPC header as 29 | * defined in RpcHeader.proto 30 | */ 31 | option java_package = "org.apache.hadoop.ipc.protobuf"; 32 | option java_outer_classname = "ProtobufRpcEngineProtos"; 33 | option java_generate_equals_and_hash = true; 34 | package hadoop.common; 35 | 36 | /** 37 | * This message is the header for the Protobuf Rpc Engine 38 | * when sending a RPC request from RPC client to the RPC server. 39 | * The actual request (serialized as protobuf) follows this request. 40 | * 41 | * No special header is needed for the Rpc Response for Protobuf Rpc Engine. 42 | * The normal RPC response header (see RpcHeader.proto) are sufficient. 43 | */ 44 | message RequestHeaderProto { 45 | /** Name of the RPC method */ 46 | required string methodName = 1; 47 | 48 | /** 49 | * RPCs for a particular interface (ie protocol) are done using a 50 | * IPC connection that is setup using rpcProxy. 51 | * The rpcProxy's has a declared protocol name that is 52 | * sent form client to server at connection time. 53 | * 54 | * Each Rpc call also sends a protocol name 55 | * (called declaringClassprotocolName). This name is usually the same 56 | * as the connection protocol name except in some cases. 57 | * For example metaProtocols such ProtocolInfoProto which get metainfo 58 | * about the protocol reuse the connection but need to indicate that 59 | * the actual protocol is different (i.e. the protocol is 60 | * ProtocolInfoProto) since they reuse the connection; in this case 61 | * the declaringClassProtocolName field is set to the ProtocolInfoProto 62 | */ 63 | required string declaringClassProtocolName = 2; 64 | 65 | /** protocol version of class declaring the called method */ 66 | required uint64 clientProtocolVersion = 3; 67 | } 68 | -------------------------------------------------------------------------------- /protocol/hadoop_common/ProtocolInfo.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.protobuf"; 26 | option java_outer_classname = "ProtocolInfoProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Request to get protocol versions for all supported rpc kinds. 33 | */ 34 | message GetProtocolVersionsRequestProto { 35 | required string protocol = 1; // Protocol name 36 | } 37 | 38 | /** 39 | * Protocol version with corresponding rpc kind. 40 | */ 41 | message ProtocolVersionProto { 42 | required string rpcKind = 1; //RPC kind 43 | repeated uint64 versions = 2; //Protocol version corresponding to the rpc kind. 44 | } 45 | 46 | /** 47 | * Get protocol version response. 48 | */ 49 | message GetProtocolVersionsResponseProto { 50 | repeated ProtocolVersionProto protocolVersions = 1; 51 | } 52 | 53 | /** 54 | * Get protocol signature request. 55 | */ 56 | message GetProtocolSignatureRequestProto { 57 | required string protocol = 1; // Protocol name 58 | required string rpcKind = 2; // RPC kind 59 | } 60 | 61 | /** 62 | * Get protocol signature response. 63 | */ 64 | message GetProtocolSignatureResponseProto { 65 | repeated ProtocolSignatureProto protocolSignature = 1; 66 | } 67 | 68 | message ProtocolSignatureProto { 69 | required uint64 version = 1; 70 | repeated uint32 methods = 2; 71 | } 72 | 73 | /** 74 | * Protocol to get information about protocols. 75 | */ 76 | service ProtocolInfoService { 77 | /** 78 | * Return protocol version corresponding to protocol interface for each 79 | * supported rpc kind. 80 | */ 81 | rpc getProtocolVersions(GetProtocolVersionsRequestProto) 82 | returns (GetProtocolVersionsResponseProto); 83 | 84 | /** 85 | * Return protocol version corresponding to protocol interface. 86 | */ 87 | rpc getProtocolSignature(GetProtocolSignatureRequestProto) 88 | returns (GetProtocolSignatureResponseProto); 89 | } 90 | -------------------------------------------------------------------------------- /protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: RefreshAuthorizationPolicyProtocol.proto 3 | 4 | package hadoop_common 5 | 6 | import proto "github.com/golang/protobuf/proto" 7 | import fmt "fmt" 8 | import math "math" 9 | 10 | // Reference proto, json, and math imports to suppress error if they are not otherwise used. 11 | var _ = proto.Marshal 12 | var _ = fmt.Errorf 13 | var _ = math.Inf 14 | 15 | // * 16 | // Refresh service acl request. 17 | type RefreshServiceAclRequestProto struct { 18 | XXX_unrecognized []byte `json:"-"` 19 | } 20 | 21 | func (m *RefreshServiceAclRequestProto) Reset() { *m = RefreshServiceAclRequestProto{} } 22 | func (m *RefreshServiceAclRequestProto) String() string { return proto.CompactTextString(m) } 23 | func (*RefreshServiceAclRequestProto) ProtoMessage() {} 24 | func (*RefreshServiceAclRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } 25 | 26 | // * 27 | // void response 28 | type RefreshServiceAclResponseProto struct { 29 | XXX_unrecognized []byte `json:"-"` 30 | } 31 | 32 | func (m *RefreshServiceAclResponseProto) Reset() { *m = RefreshServiceAclResponseProto{} } 33 | func (m *RefreshServiceAclResponseProto) String() string { return proto.CompactTextString(m) } 34 | func (*RefreshServiceAclResponseProto) ProtoMessage() {} 35 | func (*RefreshServiceAclResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } 36 | 37 | func init() { 38 | proto.RegisterType((*RefreshServiceAclRequestProto)(nil), "hadoop.common.RefreshServiceAclRequestProto") 39 | proto.RegisterType((*RefreshServiceAclResponseProto)(nil), "hadoop.common.RefreshServiceAclResponseProto") 40 | } 41 | 42 | func init() { proto.RegisterFile("RefreshAuthorizationPolicyProtocol.proto", fileDescriptor2) } 43 | 44 | var fileDescriptor2 = []byte{ 45 | // 189 bytes of a gzipped FileDescriptorProto 46 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x08, 0x4a, 0x4d, 0x2b, 47 | 0x4a, 0x2d, 0xce, 0x70, 0x2c, 0x2d, 0xc9, 0xc8, 0x2f, 0xca, 0xac, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 48 | 0x0b, 0xc8, 0xcf, 0xc9, 0x4c, 0xae, 0x0c, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0xce, 0xcf, 0xd1, 0x2b, 49 | 0x00, 0x31, 0x84, 0x78, 0x33, 0x12, 0x53, 0xf2, 0xf3, 0x0b, 0xf4, 0x92, 0xf3, 0x73, 0x73, 0xf3, 50 | 0xf3, 0x94, 0xe4, 0xb9, 0x64, 0xa1, 0x5a, 0x83, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0x1d, 0x93, 51 | 0x73, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0xc0, 0x1a, 0x95, 0x14, 0xb8, 0xe4, 0xb0, 0x28, 52 | 0x28, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x05, 0xab, 0x30, 0x9a, 0xcb, 0xc8, 0xa5, 0x49, 0xd8, 0x7a, 53 | 0xa8, 0x6e, 0xa1, 0x02, 0x2e, 0xc1, 0x22, 0x74, 0xf3, 0x84, 0x74, 0xf4, 0x50, 0x5c, 0xa5, 0x87, 54 | 0xd7, 0x49, 0x52, 0xba, 0x84, 0x55, 0x23, 0xb9, 0xcf, 0x29, 0x88, 0x4b, 0x21, 0xbf, 0x28, 0x5d, 55 | 0x2f, 0xb1, 0x20, 0x31, 0x39, 0x23, 0x15, 0xa6, 0xb5, 0x38, 0x35, 0xb9, 0xb4, 0x28, 0xb3, 0xa4, 56 | 0x12, 0x12, 0x2a, 0x4e, 0x44, 0x84, 0x1f, 0x98, 0x2e, 0xee, 0x60, 0x64, 0x5c, 0xc0, 0xc8, 0x08, 57 | 0x08, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xf2, 0xf6, 0x15, 0x70, 0x01, 0x00, 0x00, 58 | } 59 | -------------------------------------------------------------------------------- /protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.security.proto"; 26 | option java_outer_classname = "RefreshAuthorizationPolicyProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Refresh service acl request. 33 | */ 34 | message RefreshServiceAclRequestProto { 35 | } 36 | 37 | /** 38 | * void response 39 | */ 40 | message RefreshServiceAclResponseProto { 41 | } 42 | 43 | /** 44 | * Protocol which is used to refresh the authorization policy in use currently. 45 | */ 46 | service RefreshAuthorizationPolicyProtocolService { 47 | /** 48 | * Refresh the service-level authorization policy in-effect. 49 | */ 50 | rpc refreshServiceAcl(RefreshServiceAclRequestProto) 51 | returns(RefreshServiceAclResponseProto); 52 | } 53 | -------------------------------------------------------------------------------- /protocol/hadoop_common/RefreshCallQueueProtocol.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: RefreshCallQueueProtocol.proto 3 | 4 | package hadoop_common 5 | 6 | import proto "github.com/golang/protobuf/proto" 7 | import fmt "fmt" 8 | import math "math" 9 | 10 | // Reference proto, json, and math imports to suppress error if they are not otherwise used. 11 | var _ = proto.Marshal 12 | var _ = fmt.Errorf 13 | var _ = math.Inf 14 | 15 | // * 16 | // Refresh callqueue request. 17 | type RefreshCallQueueRequestProto struct { 18 | XXX_unrecognized []byte `json:"-"` 19 | } 20 | 21 | func (m *RefreshCallQueueRequestProto) Reset() { *m = RefreshCallQueueRequestProto{} } 22 | func (m *RefreshCallQueueRequestProto) String() string { return proto.CompactTextString(m) } 23 | func (*RefreshCallQueueRequestProto) ProtoMessage() {} 24 | func (*RefreshCallQueueRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } 25 | 26 | // * 27 | // void response. 28 | type RefreshCallQueueResponseProto struct { 29 | XXX_unrecognized []byte `json:"-"` 30 | } 31 | 32 | func (m *RefreshCallQueueResponseProto) Reset() { *m = RefreshCallQueueResponseProto{} } 33 | func (m *RefreshCallQueueResponseProto) String() string { return proto.CompactTextString(m) } 34 | func (*RefreshCallQueueResponseProto) ProtoMessage() {} 35 | func (*RefreshCallQueueResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } 36 | 37 | func init() { 38 | proto.RegisterType((*RefreshCallQueueRequestProto)(nil), "hadoop.common.RefreshCallQueueRequestProto") 39 | proto.RegisterType((*RefreshCallQueueResponseProto)(nil), "hadoop.common.RefreshCallQueueResponseProto") 40 | } 41 | 42 | func init() { proto.RegisterFile("RefreshCallQueueProtocol.proto", fileDescriptor6) } 43 | 44 | var fileDescriptor6 = []byte{ 45 | // 168 bytes of a gzipped FileDescriptorProto 46 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x0b, 0x4a, 0x4d, 0x2b, 47 | 0x4a, 0x2d, 0xce, 0x70, 0x4e, 0xcc, 0xc9, 0x09, 0x2c, 0x4d, 0x2d, 0x4d, 0x0d, 0x28, 0xca, 0x2f, 48 | 0xc9, 0x4f, 0xce, 0xcf, 0xd1, 0x2b, 0x00, 0x31, 0x84, 0x78, 0x33, 0x12, 0x53, 0xf2, 0xf3, 0x0b, 49 | 0xf4, 0x92, 0xf3, 0x73, 0x73, 0xf3, 0xf3, 0x94, 0xe4, 0xb8, 0x64, 0xd0, 0x35, 0x04, 0xa5, 0x16, 50 | 0x96, 0xa6, 0x16, 0x97, 0x80, 0xf5, 0x29, 0xc9, 0x73, 0xc9, 0x62, 0xca, 0x17, 0x17, 0xe4, 0xe7, 51 | 0x15, 0x43, 0x0c, 0x36, 0x9a, 0xc0, 0xc8, 0x25, 0x8f, 0xcb, 0xca, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, 52 | 0xe4, 0x54, 0xa1, 0x5c, 0x2e, 0x81, 0x22, 0x34, 0x25, 0x42, 0xda, 0x7a, 0x28, 0x0e, 0xd1, 0xc3, 53 | 0xe7, 0x0a, 0x29, 0x1d, 0x82, 0x8a, 0x91, 0x9c, 0xe4, 0xe4, 0xcc, 0x25, 0x9d, 0x5f, 0x94, 0xae, 54 | 0x97, 0x58, 0x90, 0x98, 0x9c, 0x91, 0x0a, 0xd3, 0x99, 0x59, 0x90, 0x0c, 0x09, 0x01, 0x27, 0x9c, 55 | 0x21, 0x04, 0xa6, 0x8b, 0x3b, 0x18, 0x19, 0x17, 0x30, 0x32, 0x02, 0x02, 0x00, 0x00, 0xff, 0xff, 56 | 0x5d, 0x22, 0xcd, 0xe0, 0x48, 0x01, 0x00, 0x00, 57 | } 58 | -------------------------------------------------------------------------------- /protocol/hadoop_common/RefreshCallQueueProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.proto"; 26 | option java_outer_classname = "RefreshCallQueueProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Refresh callqueue request. 33 | */ 34 | message RefreshCallQueueRequestProto { 35 | } 36 | 37 | /** 38 | * void response. 39 | */ 40 | message RefreshCallQueueResponseProto { 41 | } 42 | 43 | /** 44 | * Protocol which is used to refresh the callqueue. 45 | */ 46 | service RefreshCallQueueProtocolService { 47 | /** 48 | * Refresh the callqueue. 49 | */ 50 | rpc refreshCallQueue(RefreshCallQueueRequestProto) 51 | returns(RefreshCallQueueResponseProto); 52 | } 53 | -------------------------------------------------------------------------------- /protocol/hadoop_common/RefreshUserMappingsProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.security.proto"; 26 | option java_outer_classname = "RefreshUserMappingsProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Refresh user to group mappings request. 33 | */ 34 | message RefreshUserToGroupsMappingsRequestProto { 35 | } 36 | 37 | /** 38 | * void response 39 | */ 40 | message RefreshUserToGroupsMappingsResponseProto { 41 | } 42 | 43 | /** 44 | * Refresh superuser configuration request. 45 | */ 46 | message RefreshSuperUserGroupsConfigurationRequestProto { 47 | } 48 | 49 | /** 50 | * void response 51 | */ 52 | message RefreshSuperUserGroupsConfigurationResponseProto { 53 | } 54 | 55 | /** 56 | * Protocol to refresh the user mappings. 57 | */ 58 | service RefreshUserMappingsProtocolService { 59 | /** 60 | * Refresh user to group mappings. 61 | */ 62 | rpc refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequestProto) 63 | returns(RefreshUserToGroupsMappingsResponseProto); 64 | 65 | /** 66 | * Refresh superuser proxy group list. 67 | */ 68 | rpc refreshSuperUserGroupsConfiguration(RefreshSuperUserGroupsConfigurationRequestProto) 69 | returns(RefreshSuperUserGroupsConfigurationResponseProto); 70 | } 71 | -------------------------------------------------------------------------------- /protocol/hadoop_common/Security.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.security.proto"; 26 | option java_outer_classname = "SecurityProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Security token identifier 33 | */ 34 | message TokenProto { 35 | required bytes identifier = 1; 36 | required bytes password = 2; 37 | required string kind = 3; 38 | required string service = 4; 39 | } 40 | 41 | message GetDelegationTokenRequestProto { 42 | required string renewer = 1; 43 | } 44 | 45 | message GetDelegationTokenResponseProto { 46 | optional hadoop.common.TokenProto token = 1; 47 | } 48 | 49 | message RenewDelegationTokenRequestProto { 50 | required hadoop.common.TokenProto token = 1; 51 | } 52 | 53 | message RenewDelegationTokenResponseProto { 54 | required uint64 newExpiryTime = 1; 55 | } 56 | 57 | message CancelDelegationTokenRequestProto { 58 | required hadoop.common.TokenProto token = 1; 59 | } 60 | 61 | message CancelDelegationTokenResponseProto { // void response 62 | } 63 | 64 | -------------------------------------------------------------------------------- /protocol/hadoop_common/TraceAdmin.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.tracing"; 26 | option java_outer_classname = "TraceAdminPB"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | message ListSpanReceiversRequestProto { 32 | } 33 | 34 | message SpanReceiverListInfo { 35 | required int64 id = 1; 36 | required string className = 2; 37 | } 38 | 39 | message ListSpanReceiversResponseProto { 40 | repeated SpanReceiverListInfo descriptions = 1; 41 | } 42 | 43 | message ConfigPair { 44 | required string key = 1; 45 | required string value = 2; 46 | } 47 | 48 | message AddSpanReceiverRequestProto { 49 | required string className = 1; 50 | repeated ConfigPair config = 2; 51 | } 52 | 53 | message AddSpanReceiverResponseProto { 54 | required int64 id = 1; 55 | } 56 | 57 | message RemoveSpanReceiverRequestProto { 58 | required int64 id = 1; 59 | } 60 | 61 | message RemoveSpanReceiverResponseProto { 62 | } 63 | 64 | service TraceAdminService { 65 | rpc listSpanReceivers(ListSpanReceiversRequestProto) 66 | returns(ListSpanReceiversResponseProto); 67 | 68 | rpc addSpanReceiver(AddSpanReceiverRequestProto) 69 | returns(AddSpanReceiverResponseProto); 70 | 71 | rpc removeSpanReceiver(RemoveSpanReceiverRequestProto) 72 | returns(RemoveSpanReceiverResponseProto); 73 | } 74 | -------------------------------------------------------------------------------- /protocol/hadoop_common/ZKFCProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ha.proto"; 26 | option java_outer_classname = "ZKFCProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | message CedeActiveRequestProto { 32 | required uint32 millisToCede = 1; 33 | } 34 | 35 | message CedeActiveResponseProto { 36 | } 37 | 38 | message GracefulFailoverRequestProto { 39 | } 40 | 41 | message GracefulFailoverResponseProto { 42 | } 43 | 44 | 45 | /** 46 | * Protocol provides manual control of the ZK Failover Controllers 47 | */ 48 | service ZKFCProtocolService { 49 | /** 50 | * Request that the service cede its active state, and quit the election 51 | * for some amount of time 52 | */ 53 | rpc cedeActive(CedeActiveRequestProto) 54 | returns(CedeActiveResponseProto); 55 | 56 | 57 | rpc gracefulFailover(GracefulFailoverRequestProto) 58 | returns(GracefulFailoverResponseProto); 59 | } 60 | -------------------------------------------------------------------------------- /protocol/hadoop_hdfs/ReconfigurationProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | // This file contains protocol buffers that are used to reconfigure NameNode 20 | // and DataNode by HDFS admin. 21 | 22 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 23 | option java_outer_classname = "ReconfigurationProtocolProtos"; 24 | option java_generic_services = true; 25 | option java_generate_equals_and_hash = true; 26 | package hadoop.hdfs; 27 | 28 | /** Asks NN/DN to reload configuration file. */ 29 | message StartReconfigurationRequestProto { 30 | } 31 | 32 | message StartReconfigurationResponseProto { 33 | } 34 | 35 | /** Query the running status of reconfiguration process */ 36 | message GetReconfigurationStatusRequestProto { 37 | } 38 | 39 | message GetReconfigurationStatusConfigChangeProto { 40 | required string name = 1; 41 | required string oldValue = 2; 42 | optional string newValue = 3; 43 | optional string errorMessage = 4; // It is empty if success. 44 | } 45 | 46 | message GetReconfigurationStatusResponseProto { 47 | required int64 startTime = 1; 48 | optional int64 endTime = 2; 49 | repeated GetReconfigurationStatusConfigChangeProto changes = 3; 50 | } 51 | 52 | /** Query the reconfigurable properties on NN/DN. */ 53 | message ListReconfigurablePropertiesRequestProto { 54 | } 55 | 56 | message ListReconfigurablePropertiesResponseProto { 57 | repeated string name = 1; 58 | } 59 | 60 | /** 61 | * Protocol used from client to the NN/DN. 62 | * See the request and response for details of rpc call. 63 | */ 64 | service ReconfigurationProtocolService { 65 | rpc getReconfigurationStatus(GetReconfigurationStatusRequestProto) 66 | returns(GetReconfigurationStatusResponseProto); 67 | 68 | rpc startReconfiguration(StartReconfigurationRequestProto) 69 | returns(StartReconfigurationResponseProto); 70 | 71 | rpc listReconfigurableProperties( 72 | ListReconfigurablePropertiesRequestProto) 73 | returns(ListReconfigurablePropertiesResponseProto); 74 | } -------------------------------------------------------------------------------- /protocol/hadoop_hdfs/acl.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 20 | option java_outer_classname = "AclProtos"; 21 | option java_generate_equals_and_hash = true; 22 | package hadoop.hdfs; 23 | 24 | import "hdfs.proto"; 25 | 26 | message AclEntryProto { 27 | enum AclEntryScopeProto { 28 | ACCESS = 0x0; 29 | DEFAULT = 0x1; 30 | } 31 | 32 | enum AclEntryTypeProto { 33 | USER = 0x0; 34 | GROUP = 0x1; 35 | MASK = 0x2; 36 | OTHER = 0x3; 37 | } 38 | 39 | enum FsActionProto { 40 | NONE = 0x0; 41 | EXECUTE = 0x1; 42 | WRITE = 0x2; 43 | WRITE_EXECUTE = 0x3; 44 | READ = 0x4; 45 | READ_EXECUTE = 0x5; 46 | READ_WRITE = 0x6; 47 | PERM_ALL = 0x7; 48 | } 49 | 50 | required AclEntryTypeProto type = 1; 51 | required AclEntryScopeProto scope = 2; 52 | required FsActionProto permissions = 3; 53 | optional string name = 4; 54 | } 55 | 56 | message AclStatusProto { 57 | required string owner = 1; 58 | required string group = 2; 59 | required bool sticky = 3; 60 | repeated AclEntryProto entries = 4; 61 | optional FsPermissionProto permission = 5; 62 | } 63 | 64 | message ModifyAclEntriesRequestProto { 65 | required string src = 1; 66 | repeated AclEntryProto aclSpec = 2; 67 | } 68 | 69 | message ModifyAclEntriesResponseProto { 70 | } 71 | 72 | message RemoveAclRequestProto { 73 | required string src = 1; 74 | } 75 | 76 | message RemoveAclResponseProto { 77 | } 78 | 79 | message RemoveAclEntriesRequestProto { 80 | required string src = 1; 81 | repeated AclEntryProto aclSpec = 2; 82 | } 83 | 84 | message RemoveAclEntriesResponseProto { 85 | } 86 | 87 | message RemoveDefaultAclRequestProto { 88 | required string src = 1; 89 | } 90 | 91 | message RemoveDefaultAclResponseProto { 92 | } 93 | 94 | message SetAclRequestProto { 95 | required string src = 1; 96 | repeated AclEntryProto aclSpec = 2; 97 | } 98 | 99 | message SetAclResponseProto { 100 | } 101 | 102 | message GetAclStatusRequestProto { 103 | required string src = 1; 104 | } 105 | 106 | message GetAclStatusResponseProto { 107 | required AclStatusProto result = 1; 108 | } 109 | -------------------------------------------------------------------------------- /protocol/hadoop_hdfs/encryption.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers that are used throughout HDFS -- i.e. 26 | // by the client, server, and data transfer protocols. 27 | 28 | 29 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 30 | option java_outer_classname = "EncryptionZonesProtos"; 31 | option java_generate_equals_and_hash = true; 32 | package hadoop.hdfs; 33 | 34 | import "hdfs.proto"; 35 | 36 | message CreateEncryptionZoneRequestProto { 37 | required string src = 1; 38 | optional string keyName = 2; 39 | } 40 | 41 | message CreateEncryptionZoneResponseProto { 42 | } 43 | 44 | message ListEncryptionZonesRequestProto { 45 | required int64 id = 1; 46 | } 47 | 48 | message EncryptionZoneProto { 49 | required int64 id = 1; 50 | required string path = 2; 51 | required CipherSuiteProto suite = 3; 52 | required CryptoProtocolVersionProto cryptoProtocolVersion = 4; 53 | required string keyName = 5; 54 | } 55 | 56 | message ListEncryptionZonesResponseProto { 57 | repeated EncryptionZoneProto zones = 1; 58 | required bool hasMore = 2; 59 | } 60 | 61 | message GetEZForPathRequestProto { 62 | required string src = 1; 63 | } 64 | 65 | message GetEZForPathResponseProto { 66 | optional EncryptionZoneProto zone = 1; 67 | } 68 | -------------------------------------------------------------------------------- /protocol/hadoop_hdfs/erasurecoding.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 20 | option java_outer_classname = "ErasureCodingProtos"; 21 | option java_generate_equals_and_hash = true; 22 | package hadoop.hdfs; 23 | 24 | import "hdfs.proto"; 25 | 26 | message SetErasureCodingPolicyRequestProto { 27 | required string src = 1; 28 | optional ErasureCodingPolicyProto ecPolicy = 2; 29 | } 30 | 31 | message SetErasureCodingPolicyResponseProto { 32 | } 33 | 34 | message GetErasureCodingPoliciesRequestProto { // void request 35 | } 36 | 37 | message GetErasureCodingPoliciesResponseProto { 38 | repeated ErasureCodingPolicyProto ecPolicies = 1; 39 | } 40 | 41 | message GetErasureCodingPolicyRequestProto { 42 | required string src = 1; // path to get the policy info 43 | } 44 | 45 | message GetErasureCodingPolicyResponseProto { 46 | optional ErasureCodingPolicyProto ecPolicy = 1; 47 | } 48 | 49 | /** 50 | * Block erasure coding reconstruction info 51 | */ 52 | message BlockECReconstructionInfoProto { 53 | required ExtendedBlockProto block = 1; 54 | required DatanodeInfosProto sourceDnInfos = 2; 55 | required DatanodeInfosProto targetDnInfos = 3; 56 | required StorageUuidsProto targetStorageUuids = 4; 57 | required StorageTypesProto targetStorageTypes = 5; 58 | required bytes liveBlockIndices = 6; 59 | required ErasureCodingPolicyProto ecPolicy = 7; 60 | } 61 | -------------------------------------------------------------------------------- /protocol/hadoop_hdfs/xattr.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 20 | option java_outer_classname = "XAttrProtos"; 21 | option java_generate_equals_and_hash = true; 22 | package hadoop.hdfs; 23 | 24 | message XAttrProto { 25 | enum XAttrNamespaceProto { 26 | USER = 0; 27 | TRUSTED = 1; 28 | SECURITY = 2; 29 | SYSTEM = 3; 30 | RAW = 4; 31 | } 32 | 33 | required XAttrNamespaceProto namespace = 1; 34 | required string name = 2; 35 | optional bytes value = 3; 36 | } 37 | 38 | enum XAttrSetFlagProto { 39 | XATTR_CREATE = 0x01; 40 | XATTR_REPLACE = 0x02; 41 | } 42 | 43 | message SetXAttrRequestProto { 44 | required string src = 1; 45 | optional XAttrProto xAttr = 2; 46 | optional uint32 flag = 3; //bits set using XAttrSetFlagProto 47 | } 48 | 49 | message SetXAttrResponseProto { 50 | } 51 | 52 | message GetXAttrsRequestProto { 53 | required string src = 1; 54 | repeated XAttrProto xAttrs = 2; 55 | } 56 | 57 | message GetXAttrsResponseProto { 58 | repeated XAttrProto xAttrs = 1; 59 | } 60 | 61 | message ListXAttrsRequestProto { 62 | required string src = 1; 63 | } 64 | 65 | message ListXAttrsResponseProto { 66 | repeated XAttrProto xAttrs = 1; 67 | } 68 | 69 | message RemoveXAttrRequestProto { 70 | required string src = 1; 71 | optional XAttrProto xAttr = 2; 72 | } 73 | 74 | message RemoveXAttrResponseProto { 75 | } 76 | -------------------------------------------------------------------------------- /readdir.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "path" 6 | 7 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 8 | "github.com/colinmarc/hdfs/rpc" 9 | "github.com/golang/protobuf/proto" 10 | ) 11 | 12 | // ReadDir reads the directory named by dirname and returns a list of sorted 13 | // directory entries. 14 | func (c *Client) ReadDir(dirname string) ([]os.FileInfo, error) { 15 | return c.getDirList(dirname, "", 0) 16 | } 17 | 18 | func (c *Client) getDirList(dirname string, after string, max int) ([]os.FileInfo, error) { 19 | var res []os.FileInfo 20 | last := after 21 | for max <= 0 || len(res) < max { 22 | partial, remaining, err := c.getPartialDirList(dirname, last) 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | res = append(res, partial...) 28 | if remaining == 0 { 29 | break 30 | } else if len(partial) > 0 { 31 | last = partial[len(partial)-1].Name() 32 | } 33 | } 34 | 35 | if max > 0 && len(res) > max { 36 | res = res[:max] 37 | } 38 | 39 | return res, nil 40 | } 41 | 42 | func (c *Client) getPartialDirList(dirname string, after string) ([]os.FileInfo, int, error) { 43 | dirname = path.Clean(dirname) 44 | 45 | req := &hdfs.GetListingRequestProto{ 46 | Src: proto.String(dirname), 47 | StartAfter: []byte(after), 48 | NeedLocation: proto.Bool(true), 49 | } 50 | resp := &hdfs.GetListingResponseProto{} 51 | 52 | err := c.namenode.Execute("getListing", req, resp) 53 | if err != nil { 54 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 55 | err = interpretException(nnErr.Exception, err) 56 | } 57 | 58 | return nil, 0, &os.PathError{"readdir", dirname, err} 59 | } else if resp.GetDirList() == nil { 60 | return nil, 0, &os.PathError{"readdir", dirname, os.ErrNotExist} 61 | } 62 | 63 | list := resp.GetDirList().GetPartialListing() 64 | res := make([]os.FileInfo, 0, len(list)) 65 | for _, status := range list { 66 | res = append(res, newFileInfo(status, "")) 67 | } 68 | 69 | remaining := int(resp.GetDirList().GetRemainingEntries()) 70 | return res, remaining, nil 71 | } 72 | -------------------------------------------------------------------------------- /readdir_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestReadDir(t *testing.T) { 13 | client := getClient(t) 14 | 15 | mkdirp(t, "/_test/fulldir") 16 | mkdirp(t, "/_test/fulldir/dir") 17 | touch(t, "/_test/fulldir/1") 18 | touch(t, "/_test/fulldir/2") 19 | touch(t, "/_test/fulldir/3") 20 | 21 | res, err := client.ReadDir("/_test/fulldir") 22 | require.NoError(t, err) 23 | require.Equal(t, len(res), 4) 24 | 25 | assert.EqualValues(t, "1", res[0].Name()) 26 | assert.False(t, res[0].IsDir()) 27 | 28 | assert.EqualValues(t, "2", res[1].Name()) 29 | assert.False(t, res[1].IsDir()) 30 | 31 | assert.EqualValues(t, "3", res[2].Name()) 32 | assert.False(t, res[2].IsDir()) 33 | 34 | assert.EqualValues(t, "dir", res[3].Name()) 35 | assert.True(t, res[3].IsDir()) 36 | } 37 | 38 | func TestReadDirMany(t *testing.T) { 39 | client := getClient(t) 40 | 41 | mkdirp(t, "/_test/hugedir") 42 | for i := 1; i <= 1000; i++ { 43 | touch(t, fmt.Sprintf("/_test/hugedir/%d", i)) 44 | } 45 | 46 | res, err := client.ReadDir("/_test/hugedir") 47 | require.NoError(t, err) 48 | require.Equal(t, len(res), 1000) 49 | } 50 | 51 | func TestReadDirTrailingSlash(t *testing.T) { 52 | client := getClient(t) 53 | 54 | mkdirp(t, "/_test/fulldir2") 55 | mkdirp(t, "/_test/fulldir2/dir") 56 | touch(t, "/_test/fulldir2/1") 57 | touch(t, "/_test/fulldir2/2") 58 | touch(t, "/_test/fulldir2/3") 59 | 60 | res, err := client.ReadDir("/_test/fulldir2/") 61 | require.NoError(t, err) 62 | require.Equal(t, len(res), 4) 63 | 64 | assert.EqualValues(t, "1", res[0].Name()) 65 | assert.False(t, res[0].IsDir()) 66 | 67 | assert.EqualValues(t, "2", res[1].Name()) 68 | assert.False(t, res[1].IsDir()) 69 | 70 | assert.EqualValues(t, "3", res[2].Name()) 71 | assert.False(t, res[2].IsDir()) 72 | 73 | assert.EqualValues(t, "dir", res[3].Name()) 74 | assert.True(t, res[3].IsDir()) 75 | } 76 | 77 | func TestReadEmptyDir(t *testing.T) { 78 | client := getClient(t) 79 | 80 | baleet(t, "/_test/emptydir") 81 | mkdirp(t, "/_test/emptydir") 82 | 83 | res, err := client.ReadDir("/_test/emptydir") 84 | assert.NoError(t, err) 85 | assert.EqualValues(t, 0, len(res)) 86 | } 87 | 88 | func TestReadDirNonexistent(t *testing.T) { 89 | client := getClient(t) 90 | 91 | baleet(t, "/_test/nonexistent") 92 | 93 | res, err := client.ReadDir("/_test/nonexistent") 94 | assertPathError(t, err, "readdir", "/_test/nonexistent", os.ErrNotExist) 95 | assert.Nil(t, res) 96 | } 97 | 98 | func TestReadDirWithoutPermission(t *testing.T) { 99 | mkdirp(t, "/_test/accessdenied") 100 | touch(t, "/_test/accessdenied/foo") 101 | 102 | client := getClientForUser(t, "other") 103 | 104 | res, err := client.ReadDir("/_test/accessdenied") 105 | assertPathError(t, err, "readdir", "/_test/accessdenied", os.ErrPermission) 106 | assert.Nil(t, res) 107 | } 108 | -------------------------------------------------------------------------------- /remove.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | 7 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 8 | "github.com/colinmarc/hdfs/rpc" 9 | "github.com/golang/protobuf/proto" 10 | ) 11 | 12 | // Remove removes the named file or directory. 13 | func (c *Client) Remove(name string) error { 14 | _, err := c.getFileInfo(name) 15 | if err != nil { 16 | return &os.PathError{"remove", name, err} 17 | } 18 | 19 | req := &hdfs.DeleteRequestProto{ 20 | Src: proto.String(name), 21 | Recursive: proto.Bool(true), 22 | } 23 | resp := &hdfs.DeleteResponseProto{} 24 | 25 | err = c.namenode.Execute("delete", req, resp) 26 | if err != nil { 27 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 28 | err = interpretException(nnErr.Exception, err) 29 | } 30 | 31 | return &os.PathError{"remove", name, err} 32 | } else if resp.Result == nil { 33 | return &os.PathError{ 34 | "remove", 35 | name, 36 | errors.New("Unexpected empty response to 'delete' rpc call"), 37 | } 38 | } 39 | 40 | return nil 41 | } 42 | -------------------------------------------------------------------------------- /remove_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestRemove(t *testing.T) { 12 | client := getClient(t) 13 | 14 | baleet(t, "/_test/todelete") 15 | mkdirp(t, "/_test/todelete") 16 | 17 | err := client.Remove("/_test/todelete") 18 | require.NoError(t, err) 19 | 20 | fi, err := client.Stat("/_test/todelete") 21 | assert.Nil(t, fi) 22 | assertPathError(t, err, "stat", "/_test/todelete", os.ErrNotExist) 23 | } 24 | 25 | func TestRemoveNotExistent(t *testing.T) { 26 | client := getClient(t) 27 | 28 | baleet(t, "/_test/nonexistent") 29 | 30 | err := client.Remove("/_test/nonexistent") 31 | assertPathError(t, err, "remove", "/_test/nonexistent", os.ErrNotExist) 32 | } 33 | 34 | func TestRemoveWithoutPermission(t *testing.T) { 35 | otherClient := getClientForUser(t, "other") 36 | 37 | mkdirp(t, "/_test/accessdenied") 38 | touch(t, "/_test/accessdenied/foo") 39 | 40 | err := otherClient.Remove("/_test/accessdenied/foo") 41 | assertPathError(t, err, "remove", "/_test/accessdenied/foo", os.ErrPermission) 42 | } 43 | -------------------------------------------------------------------------------- /rename.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | 6 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 7 | "github.com/colinmarc/hdfs/rpc" 8 | "github.com/golang/protobuf/proto" 9 | ) 10 | 11 | // Rename renames (moves) a file. 12 | func (c *Client) Rename(oldpath, newpath string) error { 13 | _, err := c.getFileInfo(newpath) 14 | if err != nil && !os.IsNotExist(err) { 15 | return &os.PathError{"rename", newpath, err} 16 | } 17 | 18 | req := &hdfs.Rename2RequestProto{ 19 | Src: proto.String(oldpath), 20 | Dst: proto.String(newpath), 21 | OverwriteDest: proto.Bool(true), 22 | } 23 | resp := &hdfs.Rename2ResponseProto{} 24 | 25 | err = c.namenode.Execute("rename2", req, resp) 26 | if err != nil { 27 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 28 | err = interpretException(nnErr.Exception, err) 29 | } 30 | 31 | return &os.PathError{"rename", oldpath, err} 32 | } 33 | 34 | return nil 35 | } 36 | -------------------------------------------------------------------------------- /rename_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestRename(t *testing.T) { 12 | client := getClient(t) 13 | 14 | touch(t, "/_test/tomove") 15 | baleet(t, "/_test/tomovedest") 16 | 17 | err := client.Rename("/_test/tomove", "/_test/tomovedest") 18 | require.NoError(t, err) 19 | 20 | fi, err := client.Stat("/_test/tomove") 21 | assert.Nil(t, fi) 22 | assertPathError(t, err, "stat", "/_test/tomove", os.ErrNotExist) 23 | 24 | fi, err = client.Stat("/_test/tomovedest") 25 | require.NoError(t, err) 26 | } 27 | 28 | func TestRenameSrcNotExistent(t *testing.T) { 29 | client := getClient(t) 30 | 31 | baleet(t, "/_test/nonexistent") 32 | baleet(t, "/_test/nonexistent2") 33 | 34 | err := client.Rename("/_test/nonexistent", "/_test/nonexistent2") 35 | assertPathError(t, err, "rename", "/_test/nonexistent", os.ErrNotExist) 36 | } 37 | 38 | func TestRenameDestExists(t *testing.T) { 39 | client := getClient(t) 40 | 41 | touch(t, "/_test/tomove2") 42 | touch(t, "/_test/tomovedest2") 43 | 44 | err := client.Rename("/_test/tomove2", "/_test/tomovedest2") 45 | require.NoError(t, err) 46 | } 47 | 48 | func TestRenameWithoutPermissionForSrc(t *testing.T) { 49 | otherClient := getClientForUser(t, "other") 50 | 51 | mkdirp(t, "/_test/accessdenied") 52 | touch(t, "/_test/accessdenied/foo") 53 | 54 | err := otherClient.Rename("/_test/accessdenied/foo", "/_test/tomovedest3") 55 | assertPathError(t, err, "rename", "/_test/accessdenied/foo", os.ErrPermission) 56 | } 57 | 58 | func TestRenameWithoutPermissionForDest(t *testing.T) { 59 | otherClient := getClientForUser(t, "other") 60 | 61 | baleet(t, "/_test/ownedbyother2") 62 | 63 | err := otherClient.CreateEmptyFile("/_test/ownedbyother2") 64 | require.NoError(t, err) 65 | 66 | err = otherClient.Rename("/_test/ownedbyother2", "/_test/accessdenied/tomovedest4") 67 | assertPathError(t, err, "rename", "/_test/accessdenied/tomovedest4", os.ErrPermission) 68 | } 69 | -------------------------------------------------------------------------------- /rpc/block_writer_test.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "hash/crc32" 5 | "io" 6 | "os" 7 | "testing" 8 | 9 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 10 | "github.com/golang/protobuf/proto" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func createBlock(t *testing.T, name string) *BlockWriter { 16 | namenode := getNamenode(t) 17 | blockSize := int64(1048576) 18 | 19 | createReq := &hdfs.CreateRequestProto{ 20 | Src: proto.String(name), 21 | Masked: &hdfs.FsPermissionProto{Perm: proto.Uint32(uint32(0644))}, 22 | ClientName: proto.String(namenode.ClientName()), 23 | CreateFlag: proto.Uint32(1), 24 | CreateParent: proto.Bool(false), 25 | Replication: proto.Uint32(uint32(3)), 26 | BlockSize: proto.Uint64(uint64(blockSize)), 27 | } 28 | createResp := &hdfs.CreateResponseProto{} 29 | 30 | err := namenode.Execute("create", createReq, createResp) 31 | require.NoError(t, err) 32 | 33 | addBlockReq := &hdfs.AddBlockRequestProto{ 34 | Src: proto.String(name), 35 | ClientName: proto.String(namenode.ClientName()), 36 | Previous: nil, 37 | } 38 | addBlockResp := &hdfs.AddBlockResponseProto{} 39 | 40 | err = namenode.Execute("addBlock", addBlockReq, addBlockResp) 41 | require.NoError(t, err) 42 | 43 | block := addBlockResp.GetBlock() 44 | return NewBlockWriter(block, namenode, blockSize) 45 | } 46 | 47 | func finishBlock(t *testing.T, name string, bw *BlockWriter) { 48 | namenode := getNamenode(t) 49 | 50 | err := bw.Close() 51 | require.NoError(t, err) 52 | 53 | completeReq := &hdfs.CompleteRequestProto{ 54 | Src: proto.String(name), 55 | ClientName: proto.String(namenode.ClientName()), 56 | Last: bw.block.GetB(), 57 | } 58 | completeResp := &hdfs.CompleteResponseProto{} 59 | 60 | err = namenode.Execute("complete", completeReq, completeResp) 61 | require.NoError(t, err) 62 | } 63 | 64 | func baleet(t *testing.T, name string) { 65 | namenode := getNamenode(t) 66 | 67 | req := &hdfs.DeleteRequestProto{ 68 | Src: proto.String(name), 69 | Recursive: proto.Bool(true), 70 | } 71 | resp := &hdfs.DeleteResponseProto{} 72 | 73 | err := namenode.Execute("delete", req, resp) 74 | require.NoError(t, err) 75 | require.NotNil(t, resp.Result) 76 | } 77 | 78 | func TestWriteFailsOver(t *testing.T) { 79 | t.Skip("Write failover isn't implemented") 80 | 81 | name := "/_test/create/6.txt" 82 | baleet(t, name) 83 | 84 | mobydick, err := os.Open("../test/mobydick.txt") 85 | require.NoError(t, err) 86 | 87 | bw := createBlock(t, name) 88 | bw.connectNext() 89 | bw.stream.ackError = ackError{0, 0, hdfs.Status_ERROR} 90 | 91 | _, err = io.CopyN(bw, mobydick, 1048576) 92 | require.NoError(t, err) 93 | finishBlock(t, name, bw) 94 | 95 | br, _ := getBlockReader(t, name) 96 | hash := crc32.NewIEEE() 97 | n, err := io.Copy(hash, br) 98 | require.NoError(t, err) 99 | assert.EqualValues(t, 1048576, n) 100 | assert.EqualValues(t, 0xb35a6a0e, hash.Sum32()) 101 | } 102 | 103 | func TestPacketSize(t *testing.T) { 104 | bws := &blockWriteStream{} 105 | bws.buf.Write(make([]byte, outboundPacketSize*3)) 106 | packet := bws.makePacket() 107 | 108 | assert.EqualValues(t, outboundPacketSize, len(packet.data)) 109 | } 110 | 111 | func TestPacketSizeUndersize(t *testing.T) { 112 | bws := &blockWriteStream{} 113 | bws.buf.Write(make([]byte, outboundPacketSize-5)) 114 | packet := bws.makePacket() 115 | 116 | assert.EqualValues(t, outboundPacketSize-5, len(packet.data)) 117 | } 118 | 119 | func TestPacketSizeAlignment(t *testing.T) { 120 | bws := &blockWriteStream{} 121 | bws.buf.Write(make([]byte, outboundPacketSize*3)) 122 | 123 | bws.offset = 5 124 | packet := bws.makePacket() 125 | 126 | assert.EqualValues(t, outboundChunkSize-5, len(packet.data)) 127 | } 128 | -------------------------------------------------------------------------------- /rpc/checksum_reader_test.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "encoding/hex" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | const testChecksum = "b8d258c1ae6b31ce38b833f7e3bb5cb0" 12 | 13 | func TestReadChecksum(t *testing.T) { 14 | block := getBlocks(t, "/_test/mobydick.txt")[0] 15 | cr := NewChecksumReader(block) 16 | 17 | checksum, err := cr.ReadChecksum() 18 | require.NoError(t, err) 19 | assert.EqualValues(t, testChecksum, hex.EncodeToString(checksum)) 20 | } 21 | -------------------------------------------------------------------------------- /rpc/datanode_failover.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | // datanodeFailures is a global map of address to the last recorded failure 9 | var datanodeFailures = make(map[string]time.Time) 10 | var datanodeFailuresLock sync.Mutex 11 | 12 | // a datanodeFailover provides some common code for trying multiple datanodes 13 | // in the context of a single operation on a single block. 14 | type datanodeFailover struct { 15 | datanodes []string 16 | currentDatanode string 17 | err error 18 | } 19 | 20 | func newDatanodeFailover(datanodes []string) *datanodeFailover { 21 | return &datanodeFailover{ 22 | datanodes: datanodes, 23 | currentDatanode: "", 24 | err: nil, 25 | } 26 | } 27 | 28 | func (df *datanodeFailover) recordFailure(err error) { 29 | datanodeFailuresLock.Lock() 30 | defer datanodeFailuresLock.Unlock() 31 | 32 | datanodeFailures[df.currentDatanode] = time.Now() 33 | df.err = err 34 | } 35 | 36 | func (df *datanodeFailover) next() string { 37 | if df.numRemaining() == 0 { 38 | return "" 39 | } 40 | 41 | var picked = -1 42 | var oldestFailure time.Time 43 | 44 | for i, address := range df.datanodes { 45 | datanodeFailuresLock.Lock() 46 | failedAt, hasFailed := datanodeFailures[address] 47 | datanodeFailuresLock.Unlock() 48 | 49 | if !hasFailed { 50 | picked = i 51 | break 52 | } else if oldestFailure.IsZero() || failedAt.Before(oldestFailure) { 53 | picked = i 54 | oldestFailure = failedAt 55 | } 56 | } 57 | 58 | address := df.datanodes[picked] 59 | df.datanodes = append(df.datanodes[:picked], df.datanodes[picked+1:]...) 60 | 61 | df.currentDatanode = address 62 | return address 63 | } 64 | 65 | func (df *datanodeFailover) numRemaining() int { 66 | return len(df.datanodes) 67 | } 68 | 69 | func (df *datanodeFailover) lastError() error { 70 | return df.err 71 | } 72 | -------------------------------------------------------------------------------- /rpc/datanode_failover_test.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestPicksFirstDatanode(t *testing.T) { 11 | df := newDatanodeFailover([]string{"foo:6000", "bar:6000"}) 12 | assert.EqualValues(t, df.next(), "foo:6000") 13 | } 14 | 15 | func TestPicksDatanodesWithoutFailures(t *testing.T) { 16 | df := newDatanodeFailover([]string{"foo:6000", "foo:7000", "bar:6000"}) 17 | datanodeFailures["foo:6000"] = time.Now() 18 | 19 | assert.EqualValues(t, df.next(), "foo:7000") 20 | } 21 | 22 | func TestPicksDatanodesWithOldestFailures(t *testing.T) { 23 | df := newDatanodeFailover([]string{"foo:6000", "bar:6000"}) 24 | datanodeFailures["foo:6000"] = time.Now().Add(-10 * time.Minute) 25 | datanodeFailures["bar:6000"] = time.Now() 26 | 27 | assert.EqualValues(t, df.next(), "foo:6000") 28 | } 29 | -------------------------------------------------------------------------------- /setup_test_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | HADOOP_DISTRO=${HADOOP_DISTRO-"cdh"} 4 | HADOOP_HOME=${HADOOP_HOME-"/tmp/hadoop-$HADOOP_DISTRO"} 5 | NN_PORT=${NN_PORT-"9000"} 6 | HADOOP_NAMENODE="localhost:$NN_PORT" 7 | 8 | if [ ! -d "$HADOOP_HOME" ]; then 9 | mkdir -p $HADOOP_HOME 10 | 11 | if [ $HADOOP_DISTRO = "cdh" ]; then 12 | HADOOP_URL="http://archive.cloudera.com/cdh5/cdh/5/hadoop-latest.tar.gz" 13 | elif [ $HADOOP_DISTRO = "hdp" ]; then 14 | HADOOP_URL="http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.4.0.0/tars/hadoop-2.7.1.2.4.0.0-169.tar.gz" 15 | else 16 | echo "No/bad HADOOP_DISTRO='${HADOOP_DISTRO}' specified" 17 | exit 1 18 | fi 19 | 20 | echo "Downloading Hadoop from $HADOOP_URL to ${HADOOP_HOME}/hadoop.tar.gz" 21 | curl -o ${HADOOP_HOME}/hadoop.tar.gz -L $HADOOP_URL 22 | 23 | echo "Extracting ${HADOOP_HOME}/hadoop.tar.gz into $HADOOP_HOME" 24 | tar zxf ${HADOOP_HOME}/hadoop.tar.gz --strip-components 1 -C $HADOOP_HOME 25 | fi 26 | 27 | MINICLUSTER_JAR=$(find $HADOOP_HOME -name "hadoop-mapreduce-client-jobclient*.jar" | grep -v tests | grep -v sources | head -1) 28 | if [ ! -f "$MINICLUSTER_JAR" ]; then 29 | echo "Couldn't find minicluster jar" 30 | exit 1 31 | fi 32 | echo "minicluster jar found at $MINICLUSTER_JAR" 33 | 34 | 35 | # start the namenode in the background 36 | echo "Starting hadoop namenode..." 37 | $HADOOP_HOME/bin/hadoop jar $MINICLUSTER_JAR minicluster -nnport $NN_PORT -datanodes 3 -nomr -format "$@" > minicluster.log 2>&1 & 38 | sleep 30 39 | 40 | HADOOP_FS="$HADOOP_HOME/bin/hadoop fs -Ddfs.block.size=1048576" 41 | $HADOOP_FS -mkdir -p "hdfs://$HADOOP_NAMENODE/_test" 42 | $HADOOP_FS -chmod 777 "hdfs://$HADOOP_NAMENODE/_test" 43 | 44 | $HADOOP_FS -put ./test/foo.txt "hdfs://$HADOOP_NAMENODE/_test/foo.txt" 45 | $HADOOP_FS -put ./test/mobydick.txt "hdfs://$HADOOP_NAMENODE/_test/mobydick.txt" 46 | 47 | echo "Please run the following command:" 48 | echo "export HADOOP_NAMENODE='$HADOOP_NAMENODE'" 49 | -------------------------------------------------------------------------------- /stat.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "path" 6 | "time" 7 | 8 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 9 | "github.com/colinmarc/hdfs/rpc" 10 | "github.com/golang/protobuf/proto" 11 | ) 12 | 13 | // FileInfo implements os.FileInfo, and provides information about a file or 14 | // directory in HDFS. 15 | type FileInfo struct { 16 | name string 17 | status *hdfs.HdfsFileStatusProto 18 | } 19 | 20 | // Stat returns an os.FileInfo describing the named file or directory. 21 | func (c *Client) Stat(name string) (os.FileInfo, error) { 22 | fi, err := c.getFileInfo(name) 23 | if err != nil { 24 | err = &os.PathError{"stat", name, err} 25 | } 26 | 27 | return fi, err 28 | } 29 | 30 | func (c *Client) getFileInfo(name string) (os.FileInfo, error) { 31 | req := &hdfs.GetFileInfoRequestProto{Src: proto.String(name)} 32 | resp := &hdfs.GetFileInfoResponseProto{} 33 | 34 | err := c.namenode.Execute("getFileInfo", req, resp) 35 | if err != nil { 36 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 37 | err = interpretException(nnErr.Exception, err) 38 | } 39 | 40 | return nil, err 41 | } 42 | 43 | if resp.GetFs() == nil { 44 | return nil, os.ErrNotExist 45 | } 46 | 47 | return newFileInfo(resp.GetFs(), name), nil 48 | } 49 | 50 | func newFileInfo(status *hdfs.HdfsFileStatusProto, name string) *FileInfo { 51 | fi := &FileInfo{status: status} 52 | 53 | var fullName string 54 | if string(status.GetPath()) != "" { 55 | fullName = string(status.GetPath()) 56 | } else { 57 | fullName = name 58 | } 59 | 60 | fi.name = path.Base(fullName) 61 | return fi 62 | } 63 | 64 | func (fi *FileInfo) Name() string { 65 | return fi.name 66 | } 67 | 68 | func (fi *FileInfo) Size() int64 { 69 | return int64(fi.status.GetLength()) 70 | } 71 | 72 | func (fi *FileInfo) Mode() os.FileMode { 73 | mode := os.FileMode(fi.status.GetPermission().GetPerm()) 74 | if fi.IsDir() { 75 | mode |= os.ModeDir 76 | } 77 | 78 | return mode 79 | } 80 | 81 | func (fi *FileInfo) ModTime() time.Time { 82 | return time.Unix(int64(fi.status.GetModificationTime())/1000, 0) 83 | } 84 | 85 | func (fi *FileInfo) IsDir() bool { 86 | return fi.status.GetFileType() == hdfs.HdfsFileStatusProto_IS_DIR 87 | } 88 | 89 | // Sys returns the raw *hadoop_hdfs.HdfsFileStatusProto message from the 90 | // namenode. 91 | func (fi *FileInfo) Sys() interface{} { 92 | return fi.status 93 | } 94 | 95 | // Owner returns the name of the user that owns the file or directory. It's not 96 | // part of the os.FileInfo interface. 97 | func (fi *FileInfo) Owner() string { 98 | return fi.status.GetOwner() 99 | } 100 | 101 | // OwnerGroup returns the name of the group that owns the file or directory. 102 | // It's not part of the os.FileInfo interface. 103 | func (fi *FileInfo) OwnerGroup() string { 104 | return fi.status.GetGroup() 105 | } 106 | 107 | // AccessTime returns the last time the file was accessed. It's not part of the 108 | // os.FileInfo interface. 109 | func (fi *FileInfo) AccessTime() time.Time { 110 | return time.Unix(int64(fi.status.GetAccessTime())/1000, 0) 111 | } 112 | -------------------------------------------------------------------------------- /stat_fs.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "errors" 5 | 6 | hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" 7 | "github.com/colinmarc/hdfs/rpc" 8 | ) 9 | 10 | var StatFsError = errors.New("Failed to get HDFS usage") 11 | 12 | // FsInfo provides information about HDFS 13 | type FsInfo struct { 14 | Capacity uint64 15 | Used uint64 16 | Remaining uint64 17 | UnderReplicated uint64 18 | CorruptBlocks uint64 19 | MissingBlocks uint64 20 | MissingReplOneBlocks uint64 21 | BlocksInFuture uint64 22 | PendingDeletionBlocks uint64 23 | } 24 | 25 | func (c *Client) StatFs() (FsInfo, error) { 26 | req := &hdfs.GetFsStatusRequestProto{} 27 | resp := &hdfs.GetFsStatsResponseProto{} 28 | 29 | err := c.namenode.Execute("getFsStats", req, resp) 30 | if err != nil { 31 | if nnErr, ok := err.(*rpc.NamenodeError); ok { 32 | err = interpretException(nnErr.Exception, err) 33 | } 34 | return FsInfo{}, err 35 | } 36 | 37 | var fs FsInfo 38 | fs.Capacity = resp.GetCapacity() 39 | fs.Used = resp.GetUsed() 40 | fs.Remaining = resp.GetRemaining() 41 | fs.UnderReplicated = resp.GetUnderReplicated() 42 | fs.CorruptBlocks = resp.GetCorruptBlocks() 43 | fs.MissingBlocks = resp.GetMissingBlocks() 44 | fs.MissingReplOneBlocks = resp.GetMissingReplOneBlocks() 45 | fs.BlocksInFuture = resp.GetBlocksInFuture() 46 | fs.PendingDeletionBlocks = resp.GetPendingDeletionBlocks() 47 | 48 | return fs, nil 49 | } 50 | -------------------------------------------------------------------------------- /stat_fs_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestStatFs(t *testing.T) { 10 | client := getClient(t) 11 | 12 | _, err := client.StatFs() 13 | require.NoError(t, err) 14 | } 15 | -------------------------------------------------------------------------------- /stat_test.go: -------------------------------------------------------------------------------- 1 | package hdfs 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestStat(t *testing.T) { 13 | client := getClient(t) 14 | 15 | resp, err := client.Stat("/_test/foo.txt") 16 | require.NoError(t, err) 17 | 18 | assert.EqualValues(t, "foo.txt", resp.Name()) 19 | assert.False(t, resp.IsDir()) 20 | assert.EqualValues(t, 4, resp.Size()) 21 | assert.EqualValues(t, time.Now().Year(), resp.ModTime().Year()) 22 | assert.EqualValues(t, time.Now().Month(), resp.ModTime().Month()) 23 | } 24 | 25 | func TestStatEmptyFile(t *testing.T) { 26 | client := getClient(t) 27 | 28 | touch(t, "/_test/emptyfile2") 29 | 30 | resp, err := client.Stat("/_test/emptyfile2") 31 | require.NoError(t, err) 32 | 33 | assert.EqualValues(t, "emptyfile2", resp.Name()) 34 | assert.False(t, resp.IsDir()) 35 | assert.EqualValues(t, 0, resp.Size()) 36 | assert.EqualValues(t, time.Now().Year(), resp.ModTime().Year()) 37 | assert.EqualValues(t, time.Now().Month(), resp.ModTime().Month()) 38 | } 39 | 40 | func TestStatNotExistent(t *testing.T) { 41 | client := getClient(t) 42 | 43 | resp, err := client.Stat("/_test/nonexistent") 44 | assertPathError(t, err, "stat", "/_test/nonexistent", os.ErrNotExist) 45 | assert.Nil(t, resp) 46 | } 47 | 48 | func TestStatDir(t *testing.T) { 49 | client := getClient(t) 50 | 51 | mkdirp(t, "/_test/dir") 52 | 53 | resp, err := client.Stat("/_test/dir") 54 | require.NoError(t, err) 55 | 56 | assert.EqualValues(t, "dir", resp.Name()) 57 | assert.True(t, resp.IsDir()) 58 | assert.EqualValues(t, 0, resp.Size(), 0) 59 | assert.EqualValues(t, time.Now().Year(), resp.ModTime().Year()) 60 | assert.EqualValues(t, time.Now().Month(), resp.ModTime().Month()) 61 | } 62 | 63 | func TestStatDirWithoutPermission(t *testing.T) { 64 | otherClient := getClientForUser(t, "other") 65 | 66 | mkdirp(t, "/_test/accessdenied") 67 | touch(t, "/_test/accessdenied/foo") 68 | 69 | resp, err := otherClient.Stat("/_test/accessdenied") 70 | assert.NoError(t, err) 71 | assert.NotEqual(t, "", resp.(*FileInfo).Owner()) 72 | 73 | _, err = otherClient.Stat("/_test/accessdenied/foo") 74 | assertPathError(t, err, "stat", "/_test/accessdenied/foo", os.ErrPermission) 75 | } 76 | -------------------------------------------------------------------------------- /test/conf/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | dfs.nameservices 20 | tests 21 | 22 | 23 | dfs.ha.automatic-failover.enabled 24 | true 25 | 26 | 27 | dfs.ha.namenodes.tests 28 | nn1,nn2 29 | 30 | 31 | dfs.namenode.rpc-address.tests.nn1 32 | namenode1:8020 33 | 34 | 35 | dfs.namenode.rpc-address.tests.nn2 36 | namenode2:8020 37 | 38 | 39 | -------------------------------------------------------------------------------- /test/conf2/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | fs.defaultFS 22 | hdfs://namenode3:8020 23 | 24 | 25 | -------------------------------------------------------------------------------- /test/conf3/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | fs.default.name 22 | hdfs://namenode4:8020 23 | 24 | 25 | -------------------------------------------------------------------------------- /test/foo.txt: -------------------------------------------------------------------------------- 1 | bar 2 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/LICENSE: -------------------------------------------------------------------------------- 1 | ISC License 2 | 3 | Copyright (c) 2012-2016 Dave Collins 4 | 5 | Permission to use, copy, modify, and distribute this software for any 6 | purpose with or without fee is hereby granted, provided that the above 7 | copyright notice and this permission notice appear in all copies. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/spew/bypasssafe.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2016 Dave Collins 2 | // 3 | // Permission to use, copy, modify, and distribute this software for any 4 | // purpose with or without fee is hereby granted, provided that the above 5 | // copyright notice and this permission notice appear in all copies. 6 | // 7 | // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | 15 | // NOTE: Due to the following build constraints, this file will only be compiled 16 | // when the code is running on Google App Engine, compiled by GopherJS, or 17 | // "-tags safe" is added to the go build command line. The "disableunsafe" 18 | // tag is deprecated and thus should not be used. 19 | // +build js appengine safe disableunsafe 20 | 21 | package spew 22 | 23 | import "reflect" 24 | 25 | const ( 26 | // UnsafeDisabled is a build-time constant which specifies whether or 27 | // not access to the unsafe package is available. 28 | UnsafeDisabled = true 29 | ) 30 | 31 | // unsafeReflectValue typically converts the passed reflect.Value into a one 32 | // that bypasses the typical safety restrictions preventing access to 33 | // unaddressable and unexported data. However, doing this relies on access to 34 | // the unsafe package. This is a stub version which simply returns the passed 35 | // reflect.Value when the unsafe package is not available. 36 | func unsafeReflectValue(v reflect.Value) reflect.Value { 37 | return v 38 | } 39 | -------------------------------------------------------------------------------- /vendor/github.com/golang/protobuf/AUTHORS: -------------------------------------------------------------------------------- 1 | # This source code refers to The Go Authors for copyright purposes. 2 | # The master list of authors is in the main Go distribution, 3 | # visible at http://tip.golang.org/AUTHORS. 4 | -------------------------------------------------------------------------------- /vendor/github.com/golang/protobuf/CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | # This source code was written by the Go contributors. 2 | # The master list of contributors is in the main Go distribution, 3 | # visible at http://tip.golang.org/CONTRIBUTORS. 4 | -------------------------------------------------------------------------------- /vendor/github.com/golang/protobuf/LICENSE: -------------------------------------------------------------------------------- 1 | Go support for Protocol Buffers - Google's data interchange format 2 | 3 | Copyright 2010 The Go Authors. All rights reserved. 4 | https://github.com/golang/protobuf 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are 8 | met: 9 | 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above 13 | copyright notice, this list of conditions and the following disclaimer 14 | in the documentation and/or other materials provided with the 15 | distribution. 16 | * Neither the name of Google Inc. nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | 32 | -------------------------------------------------------------------------------- /vendor/github.com/golang/protobuf/proto/Makefile: -------------------------------------------------------------------------------- 1 | # Go support for Protocol Buffers - Google's data interchange format 2 | # 3 | # Copyright 2010 The Go Authors. All rights reserved. 4 | # https://github.com/golang/protobuf 5 | # 6 | # Redistribution and use in source and binary forms, with or without 7 | # modification, are permitted provided that the following conditions are 8 | # met: 9 | # 10 | # * Redistributions of source code must retain the above copyright 11 | # notice, this list of conditions and the following disclaimer. 12 | # * Redistributions in binary form must reproduce the above 13 | # copyright notice, this list of conditions and the following disclaimer 14 | # in the documentation and/or other materials provided with the 15 | # distribution. 16 | # * Neither the name of Google Inc. nor the names of its 17 | # contributors may be used to endorse or promote products derived from 18 | # this software without specific prior written permission. 19 | # 20 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | 32 | install: 33 | go install 34 | 35 | test: install generate-test-pbs 36 | go test 37 | 38 | 39 | generate-test-pbs: 40 | make install 41 | make -C testdata 42 | protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto 43 | make 44 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/AUTHORS: -------------------------------------------------------------------------------- 1 | Paul Borman 2 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute 2 | 3 | We definitely welcome patches and contribution to this project! 4 | 5 | ### Legal requirements 6 | 7 | In order to protect both you and ourselves, you will need to sign the 8 | [Contributor License Agreement](https://cla.developers.google.com/clas). 9 | 10 | You may have already signed it for other Google projects. 11 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Google Inc. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of Google, nor the names of other 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/bool.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strings" 10 | ) 11 | 12 | type boolValue bool 13 | 14 | func (b *boolValue) Set(value string, opt Option) error { 15 | switch strings.ToLower(value) { 16 | case "", "1", "true", "on", "t": 17 | *b = true 18 | case "0", "false", "off", "f": 19 | *b = false 20 | default: 21 | return fmt.Errorf("invalid value for bool %s: %q", opt.Name(), value) 22 | } 23 | return nil 24 | } 25 | 26 | func (b *boolValue) String() string { 27 | if *b { 28 | return "true" 29 | } 30 | return "false" 31 | } 32 | 33 | // Bool creates a flag option that is a bool. Bools normally do not take a 34 | // value however one can be assigned by using the long form of the option: 35 | // 36 | // --option=true 37 | // --o=false 38 | // 39 | // Its value is case insenstive and one of true, false, t, f, on, off, t and 0. 40 | func Bool(name rune, helpvalue ...string) *bool { 41 | return CommandLine.Bool(name, helpvalue...) 42 | } 43 | 44 | func (s *Set) Bool(name rune, helpvalue ...string) *bool { 45 | var p bool 46 | s.BoolVarLong(&p, "", name, helpvalue...) 47 | return &p 48 | } 49 | 50 | func BoolLong(name string, short rune, helpvalue ...string) *bool { 51 | return CommandLine.BoolLong(name, short, helpvalue...) 52 | } 53 | 54 | func (s *Set) BoolLong(name string, short rune, helpvalue ...string) *bool { 55 | var p bool 56 | s.BoolVarLong(&p, name, short, helpvalue...) 57 | return &p 58 | } 59 | 60 | func BoolVar(p *bool, name rune, helpvalue ...string) Option { 61 | return CommandLine.BoolVar(p, name, helpvalue...) 62 | } 63 | 64 | func (s *Set) BoolVar(p *bool, name rune, helpvalue ...string) Option { 65 | return s.BoolVarLong(p, "", name, helpvalue...) 66 | } 67 | 68 | func BoolVarLong(p *bool, name string, short rune, helpvalue ...string) Option { 69 | return CommandLine.BoolVarLong(p, name, short, helpvalue...) 70 | } 71 | 72 | func (s *Set) BoolVarLong(p *bool, name string, short rune, helpvalue ...string) Option { 73 | return s.VarLong((*boolValue)(p), name, short, helpvalue...).SetFlag() 74 | } 75 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/counter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type counterValue int 13 | 14 | func (b *counterValue) Set(value string, opt Option) error { 15 | if value == "" { 16 | *b++ 17 | } else { 18 | v, err := strconv.ParseInt(value, 0, strconv.IntSize) 19 | if err != nil { 20 | if e, ok := err.(*strconv.NumError); ok { 21 | switch e.Err { 22 | case strconv.ErrRange: 23 | err = fmt.Errorf("value out of range: %s", value) 24 | case strconv.ErrSyntax: 25 | err = fmt.Errorf("not a valid number: %s", value) 26 | } 27 | } 28 | return err 29 | } 30 | *b = counterValue(v) 31 | } 32 | return nil 33 | } 34 | 35 | func (b *counterValue) String() string { 36 | return strconv.Itoa(int(*b)) 37 | } 38 | 39 | // Counter creates a counting flag stored as an int. Each time the option 40 | // is seen while parsing the value is incremented. The value of the counter 41 | // may be explicitly set by using the long form: 42 | // 43 | // --counter=5 44 | // --c=5 45 | // 46 | // Further instances of the option will increment from the set value. 47 | func Counter(name rune, helpvalue ...string) *int { 48 | return CommandLine.Counter(name, helpvalue...) 49 | } 50 | 51 | func (s *Set) Counter(name rune, helpvalue ...string) *int { 52 | var p int 53 | s.CounterVarLong(&p, "", name, helpvalue...) 54 | return &p 55 | } 56 | 57 | func CounterLong(name string, short rune, helpvalue ...string) *int { 58 | return CommandLine.CounterLong(name, short, helpvalue...) 59 | } 60 | 61 | func (s *Set) CounterLong(name string, short rune, helpvalue ...string) *int { 62 | var p int 63 | s.CounterVarLong(&p, name, short, helpvalue...) 64 | return &p 65 | } 66 | 67 | func CounterVar(p *int, name rune, helpvalue ...string) Option { 68 | return CommandLine.CounterVar(p, name, helpvalue...) 69 | } 70 | 71 | func (s *Set) CounterVar(p *int, name rune, helpvalue ...string) Option { 72 | return s.CounterVarLong(p, "", name, helpvalue...) 73 | } 74 | 75 | func CounterVarLong(p *int, name string, short rune, helpvalue ...string) Option { 76 | return CommandLine.CounterVarLong(p, name, short, helpvalue...) 77 | } 78 | 79 | func (s *Set) CounterVarLong(p *int, name string, short rune, helpvalue ...string) Option { 80 | return s.VarLong((*counterValue)(p), name, short, helpvalue...).SetFlag() 81 | } 82 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/duration.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import "time" 8 | 9 | type durationValue time.Duration 10 | 11 | func (d *durationValue) Set(value string, opt Option) error { 12 | v, err := time.ParseDuration(value) 13 | if err != nil { 14 | return err 15 | } 16 | *d = durationValue(v) 17 | return nil 18 | } 19 | 20 | func (d *durationValue) String() string { 21 | return time.Duration(*d).String() 22 | } 23 | 24 | // Duration creates an option that parses its value as a time.Duration. 25 | func Duration(name rune, value time.Duration, helpvalue ...string) *time.Duration { 26 | return CommandLine.Duration(name, value, helpvalue...) 27 | } 28 | 29 | func (s *Set) Duration(name rune, value time.Duration, helpvalue ...string) *time.Duration { 30 | return s.DurationLong("", name, value, helpvalue...) 31 | } 32 | 33 | func DurationLong(name string, short rune, value time.Duration, helpvalue ...string) *time.Duration { 34 | return CommandLine.DurationLong(name, short, value, helpvalue...) 35 | } 36 | 37 | func (s *Set) DurationLong(name string, short rune, value time.Duration, helpvalue ...string) *time.Duration { 38 | s.DurationVarLong(&value, name, short, helpvalue...) 39 | return &value 40 | } 41 | 42 | func DurationVar(p *time.Duration, name rune, helpvalue ...string) Option { 43 | return CommandLine.DurationVar(p, name, helpvalue...) 44 | } 45 | 46 | func (s *Set) DurationVar(p *time.Duration, name rune, helpvalue ...string) Option { 47 | return s.DurationVarLong(p, "", name, helpvalue...) 48 | } 49 | 50 | func DurationVarLong(p *time.Duration, name string, short rune, helpvalue ...string) Option { 51 | return CommandLine.DurationVarLong(p, name, short, helpvalue...) 52 | } 53 | 54 | func (s *Set) DurationVarLong(p *time.Duration, name string, short rune, helpvalue ...string) Option { 55 | return s.VarLong((*durationValue)(p), name, short, helpvalue...) 56 | } 57 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/enum.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import "errors" 8 | 9 | type enumValue string 10 | 11 | var enumValues = make(map[*enumValue]map[string]struct{}) 12 | 13 | func (s *enumValue) Set(value string, opt Option) error { 14 | es, ok := enumValues[s] 15 | if !ok || es == nil { 16 | return errors.New("this option has no values") 17 | } 18 | if _, ok := es[value]; !ok { 19 | return errors.New("invalid value: " + value) 20 | } 21 | *s = enumValue(value) 22 | return nil 23 | } 24 | 25 | func (s *enumValue) String() string { 26 | return string(*s) 27 | } 28 | 29 | // Enum creates an option that can only be set to one of the enumerated strings 30 | // passed in values. Passing nil or an empty slice results in an option that 31 | // will always fail. 32 | func Enum(name rune, values []string, helpvalue ...string) *string { 33 | return CommandLine.Enum(name, values, helpvalue...) 34 | } 35 | 36 | func (s *Set) Enum(name rune, values []string, helpvalue ...string) *string { 37 | var p string 38 | s.EnumVarLong(&p, "", name, values, helpvalue...) 39 | return &p 40 | } 41 | 42 | func EnumLong(name string, short rune, values []string, helpvalue ...string) *string { 43 | return CommandLine.EnumLong(name, short, values, helpvalue...) 44 | } 45 | 46 | func (s *Set) EnumLong(name string, short rune, values []string, helpvalue ...string) *string { 47 | var p string 48 | s.EnumVarLong(&p, name, short, values, helpvalue...) 49 | return &p 50 | } 51 | 52 | // EnumVar creates an enum option that defaults to the starting value of *p. 53 | // If *p is not found in values then a reset of this option will fail. 54 | func EnumVar(p *string, name rune, values []string, helpvalue ...string) Option { 55 | return CommandLine.EnumVar(p, name, values, helpvalue...) 56 | } 57 | 58 | func (s *Set) EnumVar(p *string, name rune, values []string, helpvalue ...string) Option { 59 | return s.EnumVarLong(p, "", name, values, helpvalue...) 60 | } 61 | 62 | func EnumVarLong(p *string, name string, short rune, values []string, helpvalue ...string) Option { 63 | return CommandLine.EnumVarLong(p, name, short, values, helpvalue...) 64 | } 65 | 66 | func (s *Set) EnumVarLong(p *string, name string, short rune, values []string, helpvalue ...string) Option { 67 | m := make(map[string]struct{}) 68 | for _, v := range values { 69 | m[v] = struct{}{} 70 | } 71 | enumValues[(*enumValue)(p)] = m 72 | return s.VarLong((*enumValue)(p), name, short, helpvalue...) 73 | } 74 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/error.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import "fmt" 8 | 9 | // An Error is returned by Getopt when it encounters an error. 10 | type Error struct { 11 | ErrorCode // General reason of failure. 12 | Err error // The actual error. 13 | Parameter string // Parameter passed to option, if any 14 | Name string // Option that cause error, if any 15 | } 16 | 17 | // Error returns the error message, implementing the error interface. 18 | func (i *Error) Error() string { return i.Err.Error() } 19 | 20 | // An ErrorCode indicates what sort of error was encountered. 21 | type ErrorCode int 22 | 23 | const ( 24 | NoError = ErrorCode(iota) 25 | UnknownOption // an invalid option was encountered 26 | MissingParameter // the options parameter is missing 27 | ExtraParameter // a value was set to a long flag 28 | Invalid // attempt to set an invalid value 29 | ) 30 | 31 | func (e ErrorCode) String() string { 32 | switch e { 33 | case UnknownOption: 34 | return "unknow option" 35 | case MissingParameter: 36 | return "missing argument" 37 | case ExtraParameter: 38 | return "unxpected value" 39 | case Invalid: 40 | return "error setting value" 41 | } 42 | return "unknown error" 43 | } 44 | 45 | // unknownOption returns an Error indicating an unknown option was 46 | // encountered. 47 | func unknownOption(name interface{}) *Error { 48 | i := &Error{ErrorCode: UnknownOption} 49 | switch n := name.(type) { 50 | case rune: 51 | if n == '-' { 52 | i.Name = "-" 53 | } else { 54 | i.Name = "-" + string(n) 55 | } 56 | case string: 57 | i.Name = "--" + n 58 | } 59 | i.Err = fmt.Errorf("unknown option: %s", i.Name) 60 | return i 61 | } 62 | 63 | // missingArg returns an Error inidicating option o was not passed 64 | // a required paramter. 65 | func missingArg(o Option) *Error { 66 | return &Error{ 67 | ErrorCode: MissingParameter, 68 | Name: o.Name(), 69 | Err: fmt.Errorf("missing parameter for %s", o.Name()), 70 | } 71 | } 72 | 73 | // extraArg returns an Error inidicating option o was passed the 74 | // unexpected paramter value. 75 | func extraArg(o Option, value string) *Error { 76 | return &Error{ 77 | ErrorCode: ExtraParameter, 78 | Name: o.Name(), 79 | Parameter: value, 80 | Err: fmt.Errorf("unexpected parameter passed to %s: %q", o.Name(), value), 81 | } 82 | } 83 | 84 | // setError returns an Error inidicating option o and the specified 85 | // error while setting it to value. 86 | func setError(o Option, value string, err error) *Error { 87 | return &Error{ 88 | ErrorCode: Invalid, 89 | Name: o.Name(), 90 | Parameter: value, 91 | Err: err, 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/int.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type intValue int 13 | 14 | func (i *intValue) Set(value string, opt Option) error { 15 | v, err := strconv.ParseInt(value, 0, strconv.IntSize) 16 | if err != nil { 17 | if e, ok := err.(*strconv.NumError); ok { 18 | switch e.Err { 19 | case strconv.ErrRange: 20 | err = fmt.Errorf("value out of range: %s", value) 21 | case strconv.ErrSyntax: 22 | err = fmt.Errorf("not a valid number: %s", value) 23 | } 24 | } 25 | return err 26 | } 27 | *i = intValue(v) 28 | return nil 29 | } 30 | 31 | func (i *intValue) String() string { 32 | return strconv.FormatInt(int64(*i), 10) 33 | } 34 | 35 | // Int creates an option that parses its value as an integer. 36 | func Int(name rune, value int, helpvalue ...string) *int { 37 | return CommandLine.Int(name, value, helpvalue...) 38 | } 39 | 40 | func (s *Set) Int(name rune, value int, helpvalue ...string) *int { 41 | return s.IntLong("", name, value, helpvalue...) 42 | } 43 | 44 | func IntLong(name string, short rune, value int, helpvalue ...string) *int { 45 | return CommandLine.IntLong(name, short, value, helpvalue...) 46 | } 47 | 48 | func (s *Set) IntLong(name string, short rune, value int, helpvalue ...string) *int { 49 | s.IntVarLong(&value, name, short, helpvalue...) 50 | return &value 51 | } 52 | 53 | func IntVar(p *int, name rune, helpvalue ...string) Option { 54 | return CommandLine.IntVar(p, name, helpvalue...) 55 | } 56 | 57 | func (s *Set) IntVar(p *int, name rune, helpvalue ...string) Option { 58 | return s.IntVarLong(p, "", name, helpvalue...) 59 | } 60 | 61 | func IntVarLong(p *int, name string, short rune, helpvalue ...string) Option { 62 | return CommandLine.IntVarLong(p, name, short, helpvalue...) 63 | } 64 | 65 | func (s *Set) IntVarLong(p *int, name string, short rune, helpvalue ...string) Option { 66 | return s.VarLong((*intValue)(p), name, short, helpvalue...) 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/int16.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type int16Value int16 13 | 14 | func (i *int16Value) Set(value string, opt Option) error { 15 | v, err := strconv.ParseInt(value, 0, 16) 16 | if err != nil { 17 | if e, ok := err.(*strconv.NumError); ok { 18 | switch e.Err { 19 | case strconv.ErrRange: 20 | err = fmt.Errorf("value out of range: %s", value) 21 | case strconv.ErrSyntax: 22 | err = fmt.Errorf("not a valid number: %s", value) 23 | } 24 | } 25 | return err 26 | } 27 | *i = int16Value(v) 28 | return nil 29 | } 30 | 31 | func (i *int16Value) String() string { 32 | return strconv.FormatInt(int64(*i), 10) 33 | } 34 | 35 | // Int16 creates an option that parses its value as an int16. 36 | func Int16(name rune, value int16, helpvalue ...string) *int16 { 37 | return CommandLine.Int16(name, value, helpvalue...) 38 | } 39 | 40 | func (s *Set) Int16(name rune, value int16, helpvalue ...string) *int16 { 41 | return s.Int16Long("", name, value, helpvalue...) 42 | } 43 | 44 | func Int16Long(name string, short rune, value int16, helpvalue ...string) *int16 { 45 | return CommandLine.Int16Long(name, short, value, helpvalue...) 46 | } 47 | 48 | func (s *Set) Int16Long(name string, short rune, value int16, helpvalue ...string) *int16 { 49 | s.Int16VarLong(&value, name, short, helpvalue...) 50 | return &value 51 | } 52 | 53 | func Int16Var(p *int16, name rune, helpvalue ...string) Option { 54 | return CommandLine.Int16Var(p, name, helpvalue...) 55 | } 56 | 57 | func (s *Set) Int16Var(p *int16, name rune, helpvalue ...string) Option { 58 | return s.Int16VarLong(p, "", name, helpvalue...) 59 | } 60 | 61 | func Int16VarLong(p *int16, name string, short rune, helpvalue ...string) Option { 62 | return CommandLine.Int16VarLong(p, name, short, helpvalue...) 63 | } 64 | 65 | func (s *Set) Int16VarLong(p *int16, name string, short rune, helpvalue ...string) Option { 66 | return s.VarLong((*int16Value)(p), name, short, helpvalue...) 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/int32.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type int32Value int32 13 | 14 | func (i *int32Value) Set(value string, opt Option) error { 15 | v, err := strconv.ParseInt(value, 0, 32) 16 | if err != nil { 17 | if e, ok := err.(*strconv.NumError); ok { 18 | switch e.Err { 19 | case strconv.ErrRange: 20 | err = fmt.Errorf("value out of range: %s", value) 21 | case strconv.ErrSyntax: 22 | err = fmt.Errorf("not a valid number: %s", value) 23 | } 24 | } 25 | return err 26 | } 27 | *i = int32Value(v) 28 | return nil 29 | } 30 | 31 | func (i *int32Value) String() string { 32 | return strconv.FormatInt(int64(*i), 10) 33 | } 34 | 35 | // Int32 creates an option that parses its value as an int32. 36 | func Int32(name rune, value int32, helpvalue ...string) *int32 { 37 | return CommandLine.Int32(name, value, helpvalue...) 38 | } 39 | 40 | func (s *Set) Int32(name rune, value int32, helpvalue ...string) *int32 { 41 | return s.Int32Long("", name, value, helpvalue...) 42 | } 43 | 44 | func Int32Long(name string, short rune, value int32, helpvalue ...string) *int32 { 45 | return CommandLine.Int32Long(name, short, value, helpvalue...) 46 | } 47 | 48 | func (s *Set) Int32Long(name string, short rune, value int32, helpvalue ...string) *int32 { 49 | s.Int32VarLong(&value, name, short, helpvalue...) 50 | return &value 51 | } 52 | 53 | func Int32Var(p *int32, name rune, helpvalue ...string) Option { 54 | return CommandLine.Int32Var(p, name, helpvalue...) 55 | } 56 | 57 | func (s *Set) Int32Var(p *int32, name rune, helpvalue ...string) Option { 58 | return s.Int32VarLong(p, "", name, helpvalue...) 59 | } 60 | 61 | func Int32VarLong(p *int32, name string, short rune, helpvalue ...string) Option { 62 | return CommandLine.Int32VarLong(p, name, short, helpvalue...) 63 | } 64 | 65 | func (s *Set) Int32VarLong(p *int32, name string, short rune, helpvalue ...string) Option { 66 | return s.VarLong((*int32Value)(p), name, short, helpvalue...) 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/int64.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type int64Value int64 13 | 14 | func (i *int64Value) Set(value string, opt Option) error { 15 | v, err := strconv.ParseInt(value, 0, 64) 16 | if err != nil { 17 | if e, ok := err.(*strconv.NumError); ok { 18 | switch e.Err { 19 | case strconv.ErrRange: 20 | err = fmt.Errorf("value out of range: %s", value) 21 | case strconv.ErrSyntax: 22 | err = fmt.Errorf("not a valid number: %s", value) 23 | } 24 | } 25 | return err 26 | } 27 | *i = int64Value(v) 28 | return nil 29 | } 30 | 31 | func (i *int64Value) String() string { 32 | return strconv.FormatInt(int64(*i), 10) 33 | } 34 | 35 | // Int64 creates an option that parses its value as an int64. 36 | func Int64(name rune, value int64, helpvalue ...string) *int64 { 37 | return CommandLine.Int64(name, value, helpvalue...) 38 | } 39 | 40 | func (s *Set) Int64(name rune, value int64, helpvalue ...string) *int64 { 41 | return s.Int64Long("", name, value, helpvalue...) 42 | } 43 | 44 | func Int64Long(name string, short rune, value int64, helpvalue ...string) *int64 { 45 | return CommandLine.Int64Long(name, short, value, helpvalue...) 46 | } 47 | 48 | func (s *Set) Int64Long(name string, short rune, value int64, helpvalue ...string) *int64 { 49 | s.Int64VarLong(&value, name, short, helpvalue...) 50 | return &value 51 | } 52 | 53 | func Int64Var(p *int64, name rune, helpvalue ...string) Option { 54 | return CommandLine.Int64Var(p, name, helpvalue...) 55 | } 56 | 57 | func (s *Set) Int64Var(p *int64, name rune, helpvalue ...string) Option { 58 | return s.Int64VarLong(p, "", name, helpvalue...) 59 | } 60 | 61 | func Int64VarLong(p *int64, name string, short rune, helpvalue ...string) Option { 62 | return CommandLine.Int64VarLong(p, name, short, helpvalue...) 63 | } 64 | 65 | func (s *Set) Int64VarLong(p *int64, name string, short rune, helpvalue ...string) Option { 66 | return s.VarLong((*int64Value)(p), name, short, helpvalue...) 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/list.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import "strings" 8 | 9 | type listValue []string 10 | 11 | func (s *listValue) Set(value string, opt Option) error { 12 | a := strings.Split(value, ",") 13 | // If this is the first time we are seen then nil out the 14 | // default value. 15 | if opt.Count() <= 1 { 16 | *s = nil 17 | } 18 | *s = append(*s, a...) 19 | return nil 20 | } 21 | 22 | func (s *listValue) String() string { 23 | return strings.Join([]string(*s), ",") 24 | } 25 | 26 | // List creates an option that returns a slice of strings. The parameters 27 | // passed are converted from a comma seperated value list into a slice. 28 | // Subsequent occurrences append to the list. 29 | func List(name rune, helpvalue ...string) *[]string { 30 | return CommandLine.List(name, helpvalue...) 31 | } 32 | 33 | func (s *Set) List(name rune, helpvalue ...string) *[]string { 34 | p := []string{} 35 | s.ListVar(&p, name, helpvalue...) 36 | return &p 37 | } 38 | 39 | func ListLong(name string, short rune, helpvalue ...string) *[]string { 40 | return CommandLine.ListLong(name, short, helpvalue...) 41 | } 42 | 43 | func (s *Set) ListLong(name string, short rune, helpvalue ...string) *[]string { 44 | p := []string{} 45 | s.ListVarLong(&p, name, short, helpvalue...) 46 | return &p 47 | } 48 | 49 | // ListVar creats a list option and places the values in p. If p is pointing 50 | // to a list of values then those are considered the default values. The first 51 | // time name is seen in the options the list will be set to list specified by 52 | // the parameter to the option. Subsequent instances of the option will append 53 | // to the list. 54 | func ListVar(p *[]string, name rune, helpvalue ...string) Option { 55 | return CommandLine.ListVar(p, name, helpvalue...) 56 | } 57 | 58 | func (s *Set) ListVar(p *[]string, name rune, helpvalue ...string) Option { 59 | return s.ListVarLong(p, "", name, helpvalue...) 60 | } 61 | 62 | func ListVarLong(p *[]string, name string, short rune, helpvalue ...string) Option { 63 | return CommandLine.ListVarLong(p, name, short, helpvalue...) 64 | } 65 | 66 | func (s *Set) ListVarLong(p *[]string, name string, short rune, helpvalue ...string) Option { 67 | opt := s.VarLong((*listValue)(p), name, short, helpvalue...) 68 | return opt 69 | } 70 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/string.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | type stringValue string 8 | 9 | func (s *stringValue) Set(value string, opt Option) error { 10 | *s = stringValue(value) 11 | return nil 12 | } 13 | 14 | func (s *stringValue) String() string { 15 | return string(*s) 16 | } 17 | 18 | // String returns a value option that stores is value as a string. The 19 | // initial value of the string is passed in value. 20 | func String(name rune, value string, helpvalue ...string) *string { 21 | return CommandLine.String(name, value, helpvalue...) 22 | } 23 | 24 | func (s *Set) String(name rune, value string, helpvalue ...string) *string { 25 | p := value 26 | s.StringVarLong(&p, "", name, helpvalue...) 27 | return &p 28 | } 29 | 30 | func StringLong(name string, short rune, value string, helpvalue ...string) *string { 31 | return CommandLine.StringLong(name, short, value, helpvalue...) 32 | } 33 | 34 | func (s *Set) StringLong(name string, short rune, value string, helpvalue ...string) *string { 35 | s.StringVarLong(&value, name, short, helpvalue...) 36 | return &value 37 | } 38 | 39 | func StringVar(p *string, name rune, helpvalue ...string) Option { 40 | return CommandLine.StringVar(p, name, helpvalue...) 41 | } 42 | 43 | func (s *Set) StringVar(p *string, name rune, helpvalue ...string) Option { 44 | return s.VarLong((*stringValue)(p), "", name, helpvalue...) 45 | } 46 | 47 | func StringVarLong(p *string, name string, short rune, helpvalue ...string) Option { 48 | return CommandLine.StringVarLong(p, name, short, helpvalue...) 49 | } 50 | 51 | func (s *Set) StringVarLong(p *string, name string, short rune, helpvalue ...string) Option { 52 | return s.VarLong((*stringValue)(p), name, short, helpvalue...) 53 | } 54 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/uint.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type uintValue uint 13 | 14 | func (i *uintValue) Set(value string, opt Option) error { 15 | v, err := strconv.ParseUint(value, 0, strconv.IntSize) 16 | if err != nil { 17 | if e, ok := err.(*strconv.NumError); ok { 18 | switch e.Err { 19 | case strconv.ErrRange: 20 | err = fmt.Errorf("value out of range: %s", value) 21 | case strconv.ErrSyntax: 22 | err = fmt.Errorf("not a valid number: %s", value) 23 | } 24 | } 25 | return err 26 | } 27 | *i = uintValue(v) 28 | return nil 29 | } 30 | 31 | func (i *uintValue) String() string { 32 | return strconv.FormatUint(uint64(*i), 10) 33 | } 34 | 35 | // Uint creates an option that parses its value as an unsigned integer. 36 | func Uint(name rune, value uint, helpvalue ...string) *uint { 37 | return CommandLine.Uint(name, value, helpvalue...) 38 | } 39 | 40 | func (s *Set) Uint(name rune, value uint, helpvalue ...string) *uint { 41 | return s.UintLong("", name, value, helpvalue...) 42 | } 43 | 44 | func UintLong(name string, short rune, value uint, helpvalue ...string) *uint { 45 | return CommandLine.UintLong(name, short, value, helpvalue...) 46 | } 47 | 48 | func (s *Set) UintLong(name string, short rune, value uint, helpvalue ...string) *uint { 49 | s.UintVarLong(&value, name, short, helpvalue...) 50 | return &value 51 | } 52 | 53 | func UintVar(p *uint, name rune, helpvalue ...string) Option { 54 | return CommandLine.UintVar(p, name, helpvalue...) 55 | } 56 | 57 | func (s *Set) UintVar(p *uint, name rune, helpvalue ...string) Option { 58 | return s.UintVarLong(p, "", name, helpvalue...) 59 | } 60 | 61 | func UintVarLong(p *uint, name string, short rune, helpvalue ...string) Option { 62 | return CommandLine.UintVarLong(p, name, short, helpvalue...) 63 | } 64 | 65 | func (s *Set) UintVarLong(p *uint, name string, short rune, helpvalue ...string) Option { 66 | return s.VarLong((*uintValue)(p), name, short, helpvalue...) 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/uint16.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type uint16Value uint16 13 | 14 | func (i *uint16Value) Set(value string, opt Option) error { 15 | v, err := strconv.ParseUint(value, 0, 16) 16 | if err != nil { 17 | if e, ok := err.(*strconv.NumError); ok { 18 | switch e.Err { 19 | case strconv.ErrRange: 20 | err = fmt.Errorf("value out of range: %s", value) 21 | case strconv.ErrSyntax: 22 | err = fmt.Errorf("not a valid number: %s", value) 23 | } 24 | } 25 | return err 26 | } 27 | *i = uint16Value(v) 28 | return nil 29 | } 30 | 31 | func (i *uint16Value) String() string { 32 | return strconv.FormatUint(uint64(*i), 10) 33 | } 34 | 35 | // Uint16 creates an option that parses its value as an uint16. 36 | func Uint16(name rune, value uint16, helpvalue ...string) *uint16 { 37 | return CommandLine.Uint16(name, value, helpvalue...) 38 | } 39 | 40 | func (s *Set) Uint16(name rune, value uint16, helpvalue ...string) *uint16 { 41 | return s.Uint16Long("", name, value, helpvalue...) 42 | } 43 | 44 | func Uint16Long(name string, short rune, value uint16, helpvalue ...string) *uint16 { 45 | return CommandLine.Uint16Long(name, short, value, helpvalue...) 46 | } 47 | 48 | func (s *Set) Uint16Long(name string, short rune, value uint16, helpvalue ...string) *uint16 { 49 | s.Uint16VarLong(&value, name, short, helpvalue...) 50 | return &value 51 | } 52 | 53 | func Uint16Var(p *uint16, name rune, helpvalue ...string) Option { 54 | return CommandLine.Uint16Var(p, name, helpvalue...) 55 | } 56 | 57 | func (s *Set) Uint16Var(p *uint16, name rune, helpvalue ...string) Option { 58 | return s.Uint16VarLong(p, "", name, helpvalue...) 59 | } 60 | 61 | func Uint16VarLong(p *uint16, name string, short rune, helpvalue ...string) Option { 62 | return CommandLine.Uint16VarLong(p, name, short, helpvalue...) 63 | } 64 | 65 | func (s *Set) Uint16VarLong(p *uint16, name string, short rune, helpvalue ...string) Option { 66 | return s.VarLong((*uint16Value)(p), name, short, helpvalue...) 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/uint32.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type uint32Value uint32 13 | 14 | func (i *uint32Value) Set(value string, opt Option) error { 15 | v, err := strconv.ParseUint(value, 0, 32) 16 | if err != nil { 17 | if e, ok := err.(*strconv.NumError); ok { 18 | switch e.Err { 19 | case strconv.ErrRange: 20 | err = fmt.Errorf("value out of range: %s", value) 21 | case strconv.ErrSyntax: 22 | err = fmt.Errorf("not a valid number: %s", value) 23 | } 24 | } 25 | return err 26 | } 27 | *i = uint32Value(v) 28 | return nil 29 | } 30 | 31 | func (i *uint32Value) String() string { 32 | return strconv.FormatUint(uint64(*i), 10) 33 | } 34 | 35 | // Uint32 creates an option that parses its value as an uint32. 36 | func Uint32(name rune, value uint32, helpvalue ...string) *uint32 { 37 | return CommandLine.Uint32(name, value, helpvalue...) 38 | } 39 | 40 | func (s *Set) Uint32(name rune, value uint32, helpvalue ...string) *uint32 { 41 | return s.Uint32Long("", name, value, helpvalue...) 42 | } 43 | 44 | func Uint32Long(name string, short rune, value uint32, helpvalue ...string) *uint32 { 45 | return CommandLine.Uint32Long(name, short, value, helpvalue...) 46 | } 47 | 48 | func (s *Set) Uint32Long(name string, short rune, value uint32, helpvalue ...string) *uint32 { 49 | s.Uint32VarLong(&value, name, short, helpvalue...) 50 | return &value 51 | } 52 | 53 | func Uint32Var(p *uint32, name rune, helpvalue ...string) Option { 54 | return CommandLine.Uint32Var(p, name, helpvalue...) 55 | } 56 | 57 | func (s *Set) Uint32Var(p *uint32, name rune, helpvalue ...string) Option { 58 | return s.Uint32VarLong(p, "", name, helpvalue...) 59 | } 60 | 61 | func Uint32VarLong(p *uint32, name string, short rune, helpvalue ...string) Option { 62 | return CommandLine.Uint32VarLong(p, name, short, helpvalue...) 63 | } 64 | 65 | func (s *Set) Uint32VarLong(p *uint32, name string, short rune, helpvalue ...string) Option { 66 | return s.VarLong((*uint32Value)(p), name, short, helpvalue...) 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/uint64.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | ) 11 | 12 | type uint64Value uint64 13 | 14 | func (i *uint64Value) Set(value string, opt Option) error { 15 | v, err := strconv.ParseUint(value, 0, 64) 16 | if err != nil { 17 | if e, ok := err.(*strconv.NumError); ok { 18 | switch e.Err { 19 | case strconv.ErrRange: 20 | err = fmt.Errorf("value out of range: %s", value) 21 | case strconv.ErrSyntax: 22 | err = fmt.Errorf("not a valid number: %s", value) 23 | } 24 | } 25 | return err 26 | } 27 | *i = uint64Value(v) 28 | return nil 29 | } 30 | 31 | func (i *uint64Value) String() string { 32 | return strconv.FormatUint(uint64(*i), 10) 33 | } 34 | 35 | // Uint64 creates an option that parses its value as a uint64. 36 | func Uint64(name rune, value uint64, helpvalue ...string) *uint64 { 37 | return CommandLine.Uint64(name, value, helpvalue...) 38 | } 39 | 40 | func (s *Set) Uint64(name rune, value uint64, helpvalue ...string) *uint64 { 41 | return s.Uint64Long("", name, value, helpvalue...) 42 | } 43 | 44 | func Uint64Long(name string, short rune, value uint64, helpvalue ...string) *uint64 { 45 | return CommandLine.Uint64Long(name, short, value, helpvalue...) 46 | } 47 | 48 | func (s *Set) Uint64Long(name string, short rune, value uint64, helpvalue ...string) *uint64 { 49 | s.Uint64VarLong(&value, name, short, helpvalue...) 50 | return &value 51 | } 52 | 53 | func Uint64Var(p *uint64, name rune, helpvalue ...string) Option { 54 | return CommandLine.Uint64Var(p, name, helpvalue...) 55 | } 56 | 57 | func (s *Set) Uint64Var(p *uint64, name rune, helpvalue ...string) Option { 58 | return s.Uint64VarLong(p, "", name, helpvalue...) 59 | } 60 | 61 | func Uint64VarLong(p *uint64, name string, short rune, helpvalue ...string) Option { 62 | return CommandLine.Uint64VarLong(p, name, short, helpvalue...) 63 | } 64 | 65 | func (s *Set) Uint64VarLong(p *uint64, name string, short rune, helpvalue ...string) Option { 66 | return s.VarLong((*uint64Value)(p), name, short, helpvalue...) 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/pborman/getopt/var.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Google Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package getopt 6 | 7 | import ( 8 | "fmt" 9 | "runtime" 10 | ) 11 | 12 | // Value is the interface to the dynamic value stored in a flag. (The default 13 | // value is represented as a string.) Set is passed the string to set the 14 | // value to as well as the Option that is being processed. 15 | type Value interface { 16 | Set(string, Option) error 17 | String() string 18 | } 19 | 20 | // Var creates an option of the specified name. The type and value of the option 21 | // are represented by the first argument, of type Value, which typically holds a 22 | // user-defined implementation of Value. All options are ultimately created 23 | // as a Var. 24 | func Var(p Value, name rune, helpvalue ...string) Option { 25 | return CommandLine.VarLong(p, "", name, helpvalue...) 26 | } 27 | 28 | func VarLong(p Value, name string, short rune, helpvalue ...string) Option { 29 | return CommandLine.VarLong(p, name, short, helpvalue...) 30 | } 31 | 32 | func (s *Set) Var(p Value, name rune, helpvalue ...string) Option { 33 | return s.VarLong(p, "", name, helpvalue...) 34 | } 35 | 36 | func (s *Set) VarLong(p Value, name string, short rune, helpvalue ...string) Option { 37 | opt := &option{ 38 | short: short, 39 | long: name, 40 | value: p, 41 | defval: p.String(), 42 | } 43 | 44 | switch len(helpvalue) { 45 | case 2: 46 | opt.name = helpvalue[1] 47 | fallthrough 48 | case 1: 49 | opt.help = helpvalue[0] 50 | case 0: 51 | default: 52 | panic("Too many strings for String helpvalue") 53 | } 54 | if _, file, line, ok := runtime.Caller(1); ok { 55 | opt.where = fmt.Sprintf("%s:%d", file, line) 56 | } 57 | if opt.short == 0 && opt.long == "" { 58 | fmt.Fprintf(stderr, opt.where+": no short or long option given") 59 | exit(1) 60 | } 61 | s.AddOption(opt) 62 | return opt 63 | } 64 | -------------------------------------------------------------------------------- /vendor/github.com/pmezard/go-difflib/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013, Patrick Mezard 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | The names of its contributors may not be used to endorse or promote 14 | products derived from this software without specific prior written 15 | permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 18 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 20 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 23 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell 2 | 3 | Please consider promoting this project if you find it useful. 4 | 5 | Permission is hereby granted, free of charge, to any person 6 | obtaining a copy of this software and associated documentation 7 | files (the "Software"), to deal in the Software without restriction, 8 | including without limitation the rights to use, copy, modify, merge, 9 | publish, distribute, sublicense, and/or sell copies of the Software, 10 | and to permit persons to whom the Software is furnished to do so, 11 | subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included 14 | in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 18 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 20 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 21 | OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 22 | OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentFormat}} 2 | func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { 3 | return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) 4 | } 5 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentWithoutT "a"}} 2 | func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { 3 | return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) 4 | } 5 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/doc.go: -------------------------------------------------------------------------------- 1 | // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. 2 | // 3 | // Example Usage 4 | // 5 | // The following is a complete example using assert in a standard test function: 6 | // import ( 7 | // "testing" 8 | // "github.com/stretchr/testify/assert" 9 | // ) 10 | // 11 | // func TestSomething(t *testing.T) { 12 | // 13 | // var a string = "Hello" 14 | // var b string = "Hello" 15 | // 16 | // assert.Equal(t, a, b, "The two words should be the same.") 17 | // 18 | // } 19 | // 20 | // if you assert many times, use the format below: 21 | // 22 | // import ( 23 | // "testing" 24 | // "github.com/stretchr/testify/assert" 25 | // ) 26 | // 27 | // func TestSomething(t *testing.T) { 28 | // assert := assert.New(t) 29 | // 30 | // var a string = "Hello" 31 | // var b string = "Hello" 32 | // 33 | // assert.Equal(a, b, "The two words should be the same.") 34 | // } 35 | // 36 | // Assertions 37 | // 38 | // Assertions allow you to easily write test code, and are global funcs in the `assert` package. 39 | // All assertion functions take, as the first argument, the `*testing.T` object provided by the 40 | // testing framework. This allows the assertion funcs to write the failings and other details to 41 | // the correct place. 42 | // 43 | // Every assertion function also takes an optional string message as the final argument, 44 | // allowing custom error messages to be appended to the message the assertion method outputs. 45 | package assert 46 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/errors.go: -------------------------------------------------------------------------------- 1 | package assert 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | // AnError is an error instance useful for testing. If the code does not care 8 | // about error specifics, and only needs to return the error for example, this 9 | // error should be used to make the test code more readable. 10 | var AnError = errors.New("assert.AnError general error for testing") 11 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/forward_assertions.go: -------------------------------------------------------------------------------- 1 | package assert 2 | 3 | // Assertions provides assertion methods around the 4 | // TestingT interface. 5 | type Assertions struct { 6 | t TestingT 7 | } 8 | 9 | // New makes a new Assertions object for the specified TestingT. 10 | func New(t TestingT) *Assertions { 11 | return &Assertions{ 12 | t: t, 13 | } 14 | } 15 | 16 | //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs 17 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/doc.go: -------------------------------------------------------------------------------- 1 | // Package require implements the same assertions as the `assert` package but 2 | // stops test execution when a test fails. 3 | // 4 | // Example Usage 5 | // 6 | // The following is a complete example using require in a standard test function: 7 | // import ( 8 | // "testing" 9 | // "github.com/stretchr/testify/require" 10 | // ) 11 | // 12 | // func TestSomething(t *testing.T) { 13 | // 14 | // var a string = "Hello" 15 | // var b string = "Hello" 16 | // 17 | // require.Equal(t, a, b, "The two words should be the same.") 18 | // 19 | // } 20 | // 21 | // Assertions 22 | // 23 | // The `require` package have same global functions as in the `assert` package, 24 | // but instead of returning a boolean result they call `t.FailNow()`. 25 | // 26 | // Every assertion function also takes an optional string message as the final argument, 27 | // allowing custom error messages to be appended to the message the assertion method outputs. 28 | package require 29 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/forward_requirements.go: -------------------------------------------------------------------------------- 1 | package require 2 | 3 | // Assertions provides assertion methods around the 4 | // TestingT interface. 5 | type Assertions struct { 6 | t TestingT 7 | } 8 | 9 | // New makes a new Assertions object for the specified TestingT. 10 | func New(t TestingT) *Assertions { 11 | return &Assertions{ 12 | t: t, 13 | } 14 | } 15 | 16 | //go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs 17 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/require.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.Comment}} 2 | func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { 3 | if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { 4 | t.FailNow() 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/require_forward.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentWithoutT "a"}} 2 | func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { 3 | {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) 4 | } 5 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/requirements.go: -------------------------------------------------------------------------------- 1 | package require 2 | 3 | // TestingT is an interface wrapper around *testing.T 4 | type TestingT interface { 5 | Errorf(format string, args ...interface{}) 6 | FailNow() 7 | } 8 | 9 | //go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs 10 | --------------------------------------------------------------------------------