├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── protocolpb ├── Cargo.toml ├── build.rs ├── proto │ ├── hadoop │ │ ├── GenericRefreshProtocol.proto │ │ ├── GetUserMappingsProtocol.proto │ │ ├── HAServiceProtocol.proto │ │ ├── IpcConnectionContext.proto │ │ ├── ProtobufRpcEngine.proto │ │ ├── ProtocolInfo.proto │ │ ├── RefreshAuthorizationPolicyProtocol.proto │ │ ├── RefreshCallQueueProtocol.proto │ │ ├── RefreshUserMappingsProtocol.proto │ │ ├── RpcHeader.proto │ │ ├── Security.proto │ │ ├── TraceAdmin.proto │ │ └── ZKFCProtocol.proto │ └── hdfs │ │ ├── ClientDatanodeProtocol.proto │ │ ├── ClientNamenodeProtocol.proto │ │ ├── DatanodeProtocol.proto │ │ ├── HAZKInfo.proto │ │ ├── InterDatanodeProtocol.proto │ │ ├── JournalProtocol.proto │ │ ├── NamenodeProtocol.proto │ │ ├── QJournalProtocol.proto │ │ ├── acl.proto │ │ ├── datatransfer.proto │ │ ├── encryption.proto │ │ ├── fsimage.proto │ │ ├── hdfs.proto │ │ ├── inotify.proto │ │ └── xattr.proto └── src │ ├── lib.rs │ └── proto │ ├── hadoop │ ├── GenericRefreshProtocol.rs │ ├── GetUserMappingsProtocol.rs │ ├── HAServiceProtocol.rs │ ├── IpcConnectionContext.rs │ ├── ProtobufRpcEngine.rs │ ├── ProtocolInfo.rs │ ├── RefreshAuthorizationPolicyProtocol.rs │ ├── RefreshCallQueueProtocol.rs │ ├── RefreshUserMappingsProtocol.rs │ ├── RpcHeader.rs │ ├── Security.rs │ ├── TraceAdmin.rs │ └── ZKFCProtocol.rs │ ├── hdfs │ ├── ClientDatanodeProtocol.rs │ ├── ClientNamenodeProtocol.rs │ ├── DatanodeProtocol.rs │ ├── HAZKInfo.rs │ ├── InterDatanodeProtocol.rs │ ├── JournalProtocol.rs │ ├── NamenodeProtocol.rs │ ├── QJournalProtocol.rs │ ├── acl.rs │ ├── datatransfer.rs │ ├── encryption.rs │ ├── fsimage.rs │ ├── hdfs.rs │ ├── inotify.rs │ └── xattr.rs │ └── mod.rs ├── rhdfs-cmd ├── Cargo.toml └── src │ ├── main.rs │ └── util.rs └── rhdfs ├── Cargo.toml └── src ├── cmdx.rs ├── codec_tools.rs ├── config.rs ├── dt ├── checksum.rs ├── codec.rs ├── mod.rs ├── packet.rs ├── proto.rs ├── read_streamer.rs └── write_streamer.rs ├── error.rs ├── hdfs.rs ├── lib.rs ├── nn ├── codec.rs ├── mod.rs └── proto.rs ├── op ├── mod.rs └── read_listing.rs ├── proto_tools.rs ├── protobuf_api.rs ├── result.rs ├── types.rs └── util.rs /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .vscode 3 | /target/ 4 | **/*.rs.bk 5 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members=["protocolpb","rhdfs","rhdfs-cmd"] 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rust-hdfs-native 2 | 3 | Native Rust HDFS client library. Talks directly to Hadoop NN/DNs over Hadoop RPC. 4 | 5 | ## Building with `protobuf` crate 6 | 7 | Prereq: download protoc compiler v 2.5.0, e.g. from here: `https://repo1.maven.org/maven2/com/google/protobuf/protoc/2.5.0/`. For Win10 64, `protoc-2.5.0-windows-x86_64.exe` is Ok. Make the executable available in PATH as `protoc.exe` (win) or `protoc`. 8 | 9 | NOTE: this is a Work In Progress -------------------------------------------------------------------------------- /protocolpb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "protocolpb" 3 | version = "0.1.0" 4 | authors = ["Valery Vybornov "] 5 | build = "build.rs" 6 | 7 | [dependencies] 8 | protobuf = "1.4" 9 | 10 | #grpc = "0.2" 11 | #futures = "0.1" 12 | #futures-cpupool = "0.1" 13 | #tls-api = "0.1" 14 | #httpbis = "0.4" 15 | 16 | [build-dependencies] 17 | protoc-rust = "1.4" 18 | #protoc-rust-grpc = "0.2" -------------------------------------------------------------------------------- /protocolpb/build.rs: -------------------------------------------------------------------------------- 1 | /// In order to compile protobuf sources, define env var `HDFS_PROTOC_PATH` to point at a directory 2 | /// where `protoc` (`protoc.exe` under Windows) executable can be found. 3 | 4 | fn main() { 5 | extern crate protoc_rust; 6 | 7 | use std::env; 8 | use std::path::PathBuf; 9 | use std::fs; 10 | 11 | if let (Some(path), Some(protoc_path)) = (env::var_os("PATH"), env::var_os("HDFS_PROTOC_PATH")) { 12 | let mut paths = env::split_paths(&path).collect::>(); 13 | paths.push(PathBuf::from(protoc_path)); 14 | let new_path = env::join_paths(paths).unwrap(); 15 | env::set_var("PATH", &new_path); 16 | 17 | 18 | let hadoop_target_dir = "src/proto/hadoop"; 19 | let hadoop_source_dir = "proto/hadoop"; 20 | fs::create_dir_all(PathBuf::from(hadoop_target_dir)).expect("mkdir hadoop_target_dir"); 21 | protoc_rust::run(protoc_rust::Args { 22 | out_dir: hadoop_target_dir, 23 | input: &[ 24 | "proto/hadoop/GenericRefreshProtocol.proto", 25 | "proto/hadoop/GetUserMappingsProtocol.proto", 26 | "proto/hadoop/HAServiceProtocol.proto", 27 | "proto/hadoop/IpcConnectionContext.proto", 28 | "proto/hadoop/ProtobufRpcEngine.proto", 29 | "proto/hadoop/ProtocolInfo.proto", 30 | "proto/hadoop/RefreshAuthorizationPolicyProtocol.proto", 31 | "proto/hadoop/RefreshCallQueueProtocol.proto", 32 | "proto/hadoop/RefreshUserMappingsProtocol.proto", 33 | "proto/hadoop/RpcHeader.proto", 34 | "proto/hadoop/Security.proto", 35 | "proto/hadoop/TraceAdmin.proto", 36 | "proto/hadoop/ZKFCProtocol.proto", 37 | ], 38 | includes: &[hadoop_source_dir], 39 | }).expect("protoc (hadoop)"); 40 | 41 | let hdfs_target_dir = "src/proto/hdfs"; 42 | let hdfs_source_dir = "proto/hdfs"; 43 | fs::create_dir_all(PathBuf::from(hdfs_target_dir)).expect("mkdir hdfs_target_dir"); 44 | protoc_rust::run(protoc_rust::Args { 45 | out_dir: hdfs_target_dir, 46 | input: &[ 47 | "proto/hdfs/acl.proto", 48 | "proto/hdfs/ClientDatanodeProtocol.proto", 49 | "proto/hdfs/ClientNamenodeProtocol.proto", 50 | "proto/hdfs/DatanodeProtocol.proto", 51 | "proto/hdfs/datatransfer.proto", 52 | "proto/hdfs/encryption.proto", 53 | "proto/hdfs/fsimage.proto", 54 | "proto/hdfs/HAZKInfo.proto", 55 | "proto/hdfs/hdfs.proto", 56 | "proto/hdfs/inotify.proto", 57 | "proto/hdfs/InterDatanodeProtocol.proto", 58 | "proto/hdfs/JournalProtocol.proto", 59 | "proto/hdfs/NamenodeProtocol.proto", 60 | "proto/hdfs/QJournalProtocol.proto", 61 | "proto/hdfs/xattr.proto", 62 | ], 63 | includes: &[hadoop_source_dir, hdfs_source_dir], 64 | }).expect("protoc (hdfs)"); 65 | 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/GenericRefreshProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.proto"; 26 | option java_outer_classname = "GenericRefreshProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Refresh request. 33 | */ 34 | message GenericRefreshRequestProto { 35 | optional string identifier = 1; 36 | repeated string args = 2; 37 | } 38 | 39 | /** 40 | * A single response from a refresh handler. 41 | */ 42 | message GenericRefreshResponseProto { 43 | optional int32 exitStatus = 1; // unix exit status to return 44 | optional string userMessage = 2; // to be displayed to the user 45 | optional string senderName = 3; // which handler sent this message 46 | } 47 | 48 | /** 49 | * Collection of responses from zero or more handlers. 50 | */ 51 | message GenericRefreshResponseCollectionProto { 52 | repeated GenericRefreshResponseProto responses = 1; 53 | } 54 | 55 | /** 56 | * Protocol which is used to refresh a user-specified feature. 57 | */ 58 | service GenericRefreshProtocolService { 59 | rpc refresh(GenericRefreshRequestProto) 60 | returns(GenericRefreshResponseCollectionProto); 61 | } 62 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/GetUserMappingsProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.tools.proto"; 26 | option java_outer_classname = "GetUserMappingsProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Get groups for user request. 33 | */ 34 | message GetGroupsForUserRequestProto { 35 | required string user = 1; 36 | } 37 | 38 | /** 39 | * Response for get groups. 40 | */ 41 | message GetGroupsForUserResponseProto { 42 | repeated string groups = 1; 43 | } 44 | 45 | 46 | /** 47 | * Protocol which maps users to groups. 48 | */ 49 | service GetUserMappingsProtocolService { 50 | /** 51 | * Get the groups which are mapped to the given user. 52 | */ 53 | rpc getGroupsForUser(GetGroupsForUserRequestProto) 54 | returns(GetGroupsForUserResponseProto); 55 | } 56 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/HAServiceProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ha.proto"; 26 | option java_outer_classname = "HAServiceProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | enum HAServiceStateProto { 32 | INITIALIZING = 0; 33 | ACTIVE = 1; 34 | STANDBY = 2; 35 | } 36 | 37 | enum HARequestSource { 38 | REQUEST_BY_USER = 0; 39 | REQUEST_BY_USER_FORCED = 1; 40 | REQUEST_BY_ZKFC = 2; 41 | } 42 | 43 | message HAStateChangeRequestInfoProto { 44 | required HARequestSource reqSource = 1; 45 | } 46 | 47 | /** 48 | * void request 49 | */ 50 | message MonitorHealthRequestProto { 51 | } 52 | 53 | /** 54 | * void response 55 | */ 56 | message MonitorHealthResponseProto { 57 | } 58 | 59 | /** 60 | * void request 61 | */ 62 | message TransitionToActiveRequestProto { 63 | required HAStateChangeRequestInfoProto reqInfo = 1; 64 | } 65 | 66 | /** 67 | * void response 68 | */ 69 | message TransitionToActiveResponseProto { 70 | } 71 | 72 | /** 73 | * void request 74 | */ 75 | message TransitionToStandbyRequestProto { 76 | required HAStateChangeRequestInfoProto reqInfo = 1; 77 | } 78 | 79 | /** 80 | * void response 81 | */ 82 | message TransitionToStandbyResponseProto { 83 | } 84 | 85 | /** 86 | * void request 87 | */ 88 | message GetServiceStatusRequestProto { 89 | } 90 | 91 | /** 92 | * Returns the state of the service 93 | */ 94 | message GetServiceStatusResponseProto { 95 | required HAServiceStateProto state = 1; 96 | 97 | // If state is STANDBY, indicate whether it is 98 | // ready to become active. 99 | optional bool readyToBecomeActive = 2; 100 | // If not ready to become active, a textual explanation of why not 101 | optional string notReadyReason = 3; 102 | } 103 | 104 | /** 105 | * Protocol interface provides High availability related 106 | * primitives to monitor and failover a service. 107 | * 108 | * For details see o.a.h.ha.HAServiceProtocol. 109 | */ 110 | service HAServiceProtocolService { 111 | /** 112 | * Monitor the health of a service. 113 | */ 114 | rpc monitorHealth(MonitorHealthRequestProto) 115 | returns(MonitorHealthResponseProto); 116 | 117 | /** 118 | * Request service to tranisition to active state. 119 | */ 120 | rpc transitionToActive(TransitionToActiveRequestProto) 121 | returns(TransitionToActiveResponseProto); 122 | 123 | /** 124 | * Request service to transition to standby state. 125 | */ 126 | rpc transitionToStandby(TransitionToStandbyRequestProto) 127 | returns(TransitionToStandbyResponseProto); 128 | 129 | /** 130 | * Get the current status of the service. 131 | */ 132 | rpc getServiceStatus(GetServiceStatusRequestProto) 133 | returns(GetServiceStatusResponseProto); 134 | } 135 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/IpcConnectionContext.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.protobuf"; 26 | option java_outer_classname = "IpcConnectionContextProtos"; 27 | option java_generate_equals_and_hash = true; 28 | package hadoop.common; 29 | 30 | /** 31 | * Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext 32 | */ 33 | message UserInformationProto { 34 | optional string effectiveUser = 1; 35 | optional string realUser = 2; 36 | } 37 | 38 | /** 39 | * The connection context is sent as part of the connection establishment. 40 | * It establishes the context for ALL Rpc calls within the connection. 41 | */ 42 | message IpcConnectionContextProto { 43 | // UserInfo beyond what is determined as part of security handshake 44 | // at connection time (kerberos, tokens etc). 45 | optional UserInformationProto userInfo = 2; 46 | 47 | // Protocol name for next rpc layer. 48 | // The client created a proxy with this protocol name 49 | optional string protocol = 3; 50 | } 51 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/ProtobufRpcEngine.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | /** 26 | * These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer 27 | * to marshal the request and response in the RPC layer. 28 | * The messages are sent in addition to the normal RPC header as 29 | * defined in RpcHeader.proto 30 | */ 31 | option java_package = "org.apache.hadoop.ipc.protobuf"; 32 | option java_outer_classname = "ProtobufRpcEngineProtos"; 33 | option java_generate_equals_and_hash = true; 34 | package hadoop.common; 35 | 36 | /** 37 | * This message is the header for the Protobuf Rpc Engine 38 | * when sending a RPC request from RPC client to the RPC server. 39 | * The actual request (serialized as protobuf) follows this request. 40 | * 41 | * No special header is needed for the Rpc Response for Protobuf Rpc Engine. 42 | * The normal RPC response header (see RpcHeader.proto) are sufficient. 43 | */ 44 | message RequestHeaderProto { 45 | /** Name of the RPC method */ 46 | required string methodName = 1; 47 | 48 | /** 49 | * RPCs for a particular interface (ie protocol) are done using a 50 | * IPC connection that is setup using rpcProxy. 51 | * The rpcProxy's has a declared protocol name that is 52 | * sent form client to server at connection time. 53 | * 54 | * Each Rpc call also sends a protocol name 55 | * (called declaringClassprotocolName). This name is usually the same 56 | * as the connection protocol name except in some cases. 57 | * For example metaProtocols such ProtocolInfoProto which get metainfo 58 | * about the protocol reuse the connection but need to indicate that 59 | * the actual protocol is different (i.e. the protocol is 60 | * ProtocolInfoProto) since they reuse the connection; in this case 61 | * the declaringClassProtocolName field is set to the ProtocolInfoProto 62 | */ 63 | required string declaringClassProtocolName = 2; 64 | 65 | /** protocol version of class declaring the called method */ 66 | required uint64 clientProtocolVersion = 3; 67 | } 68 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/ProtocolInfo.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.protobuf"; 26 | option java_outer_classname = "ProtocolInfoProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Request to get protocol versions for all supported rpc kinds. 33 | */ 34 | message GetProtocolVersionsRequestProto { 35 | required string protocol = 1; // Protocol name 36 | } 37 | 38 | /** 39 | * Protocol version with corresponding rpc kind. 40 | */ 41 | message ProtocolVersionProto { 42 | required string rpcKind = 1; //RPC kind 43 | repeated uint64 versions = 2; //Protocol version corresponding to the rpc kind. 44 | } 45 | 46 | /** 47 | * Get protocol version response. 48 | */ 49 | message GetProtocolVersionsResponseProto { 50 | repeated ProtocolVersionProto protocolVersions = 1; 51 | } 52 | 53 | /** 54 | * Get protocol signature request. 55 | */ 56 | message GetProtocolSignatureRequestProto { 57 | required string protocol = 1; // Protocol name 58 | required string rpcKind = 2; // RPC kind 59 | } 60 | 61 | /** 62 | * Get protocol signature response. 63 | */ 64 | message GetProtocolSignatureResponseProto { 65 | repeated ProtocolSignatureProto protocolSignature = 1; 66 | } 67 | 68 | message ProtocolSignatureProto { 69 | required uint64 version = 1; 70 | repeated uint32 methods = 2; 71 | } 72 | 73 | /** 74 | * Protocol to get information about protocols. 75 | */ 76 | service ProtocolInfoService { 77 | /** 78 | * Return protocol version corresponding to protocol interface for each 79 | * supported rpc kind. 80 | */ 81 | rpc getProtocolVersions(GetProtocolVersionsRequestProto) 82 | returns (GetProtocolVersionsResponseProto); 83 | 84 | /** 85 | * Return protocol version corresponding to protocol interface. 86 | */ 87 | rpc getProtocolSignature(GetProtocolSignatureRequestProto) 88 | returns (GetProtocolSignatureResponseProto); 89 | } 90 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/RefreshAuthorizationPolicyProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.security.proto"; 26 | option java_outer_classname = "RefreshAuthorizationPolicyProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Refresh service acl request. 33 | */ 34 | message RefreshServiceAclRequestProto { 35 | } 36 | 37 | /** 38 | * void response 39 | */ 40 | message RefreshServiceAclResponseProto { 41 | } 42 | 43 | /** 44 | * Protocol which is used to refresh the authorization policy in use currently. 45 | */ 46 | service RefreshAuthorizationPolicyProtocolService { 47 | /** 48 | * Refresh the service-level authorization policy in-effect. 49 | */ 50 | rpc refreshServiceAcl(RefreshServiceAclRequestProto) 51 | returns(RefreshServiceAclResponseProto); 52 | } 53 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/RefreshCallQueueProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.proto"; 26 | option java_outer_classname = "RefreshCallQueueProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Refresh callqueue request. 33 | */ 34 | message RefreshCallQueueRequestProto { 35 | } 36 | 37 | /** 38 | * void response. 39 | */ 40 | message RefreshCallQueueResponseProto { 41 | } 42 | 43 | /** 44 | * Protocol which is used to refresh the callqueue. 45 | */ 46 | service RefreshCallQueueProtocolService { 47 | /** 48 | * Refresh the callqueue. 49 | */ 50 | rpc refreshCallQueue(RefreshCallQueueRequestProto) 51 | returns(RefreshCallQueueResponseProto); 52 | } 53 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/RefreshUserMappingsProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.security.proto"; 26 | option java_outer_classname = "RefreshUserMappingsProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Refresh user to group mappings request. 33 | */ 34 | message RefreshUserToGroupsMappingsRequestProto { 35 | } 36 | 37 | /** 38 | * void response 39 | */ 40 | message RefreshUserToGroupsMappingsResponseProto { 41 | } 42 | 43 | /** 44 | * Refresh superuser configuration request. 45 | */ 46 | message RefreshSuperUserGroupsConfigurationRequestProto { 47 | } 48 | 49 | /** 50 | * void response 51 | */ 52 | message RefreshSuperUserGroupsConfigurationResponseProto { 53 | } 54 | 55 | /** 56 | * Protocol to refresh the user mappings. 57 | */ 58 | service RefreshUserMappingsProtocolService { 59 | /** 60 | * Refresh user to group mappings. 61 | */ 62 | rpc refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequestProto) 63 | returns(RefreshUserToGroupsMappingsResponseProto); 64 | 65 | /** 66 | * Refresh superuser proxy group list. 67 | */ 68 | rpc refreshSuperUserGroupsConfiguration(RefreshSuperUserGroupsConfigurationRequestProto) 69 | returns(RefreshSuperUserGroupsConfigurationResponseProto); 70 | } 71 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/RpcHeader.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.protobuf"; 26 | option java_outer_classname = "RpcHeaderProtos"; 27 | option java_generate_equals_and_hash = true; 28 | package hadoop.common; 29 | 30 | /** 31 | * This is the rpc request header. It is sent with every rpc call. 32 | * 33 | * The format of RPC call is as follows: 34 | * +--------------------------------------------------------------+ 35 | * | Rpc length in bytes (4 bytes int) sum of next two parts | 36 | * +--------------------------------------------------------------+ 37 | * | RpcRequestHeaderProto - serialized delimited ie has len | 38 | * +--------------------------------------------------------------+ 39 | * | RpcRequest The actual rpc request | 40 | * | This request is serialized based on RpcKindProto | 41 | * +--------------------------------------------------------------+ 42 | * 43 | */ 44 | 45 | /** 46 | * RpcKind determine the rpcEngine and the serialization of the rpc request 47 | */ 48 | enum RpcKindProto { 49 | RPC_BUILTIN = 0; // Used for built in calls by tests 50 | RPC_WRITABLE = 1; // Use WritableRpcEngine 51 | RPC_PROTOCOL_BUFFER = 2; // Use ProtobufRpcEngine 52 | } 53 | 54 | 55 | 56 | /** 57 | * Used to pass through the information necessary to continue 58 | * a trace after an RPC is made. All we need is the traceid 59 | * (so we know the overarching trace this message is a part of), and 60 | * the id of the current span when this message was sent, so we know 61 | * what span caused the new span we will create when this message is received. 62 | */ 63 | message RPCTraceInfoProto { 64 | optional int64 traceId = 1; 65 | optional int64 parentId = 2; 66 | } 67 | 68 | message RpcRequestHeaderProto { // the header for the RpcRequest 69 | enum OperationProto { 70 | RPC_FINAL_PACKET = 0; // The final RPC Packet 71 | RPC_CONTINUATION_PACKET = 1; // not implemented yet 72 | RPC_CLOSE_CONNECTION = 2; // close the rpc connection 73 | } 74 | 75 | optional RpcKindProto rpcKind = 1; 76 | optional OperationProto rpcOp = 2; 77 | required sint32 callId = 3; // a sequence number that is sent back in response 78 | required bytes clientId = 4; // Globally unique client ID 79 | // clientId + callId uniquely identifies a request 80 | // retry count, 1 means this is the first retry 81 | optional sint32 retryCount = 5 [default = -1]; 82 | optional RPCTraceInfoProto traceInfo = 6; // tracing info 83 | } 84 | 85 | 86 | 87 | /** 88 | * Rpc Response Header 89 | * +------------------------------------------------------------------+ 90 | * | Rpc total response length in bytes (4 bytes int) | 91 | * | (sum of next two parts) | 92 | * +------------------------------------------------------------------+ 93 | * | RpcResponseHeaderProto - serialized delimited ie has len | 94 | * +------------------------------------------------------------------+ 95 | * | if request is successful: | 96 | * | - RpcResponse - The actual rpc response bytes follow | 97 | * | the response header | 98 | * | This response is serialized based on RpcKindProto | 99 | * | if request fails : | 100 | * | The rpc response header contains the necessary info | 101 | * +------------------------------------------------------------------+ 102 | * 103 | * Note that rpc response header is also used when connection setup fails. 104 | * Ie the response looks like a rpc response with a fake callId. 105 | */ 106 | message RpcResponseHeaderProto { 107 | /** 108 | * 109 | * RpcStastus - success or failure 110 | * The reponseHeader's errDetail, exceptionClassName and errMsg contains 111 | * further details on the error 112 | **/ 113 | 114 | enum RpcStatusProto { 115 | SUCCESS = 0; // RPC succeeded 116 | ERROR = 1; // RPC or error - connection left open for future calls 117 | FATAL = 2; // Fatal error - connection closed 118 | } 119 | 120 | enum RpcErrorCodeProto { 121 | 122 | // Non-fatal Rpc error - connection left open for future rpc calls 123 | ERROR_APPLICATION = 1; // RPC Failed - rpc app threw exception 124 | ERROR_NO_SUCH_METHOD = 2; // Rpc error - no such method 125 | ERROR_NO_SUCH_PROTOCOL = 3; // Rpc error - no such protocol 126 | ERROR_RPC_SERVER = 4; // Rpc error on server side 127 | ERROR_SERIALIZING_RESPONSE = 5; // error serializign response 128 | ERROR_RPC_VERSION_MISMATCH = 6; // Rpc protocol version mismatch 129 | 130 | 131 | // Fatal Server side Rpc error - connection closed 132 | FATAL_UNKNOWN = 10; // unknown Fatal error 133 | FATAL_UNSUPPORTED_SERIALIZATION = 11; // IPC layer serilization type invalid 134 | FATAL_INVALID_RPC_HEADER = 12; // fields of RpcHeader are invalid 135 | FATAL_DESERIALIZING_REQUEST = 13; // could not deserilize rpc request 136 | FATAL_VERSION_MISMATCH = 14; // Ipc Layer version mismatch 137 | FATAL_UNAUTHORIZED = 15; // Auth failed 138 | } 139 | 140 | required uint32 callId = 1; // callId used in Request 141 | required RpcStatusProto status = 2; 142 | optional uint32 serverIpcVersionNum = 3; // Sent if success or fail 143 | optional string exceptionClassName = 4; // if request fails 144 | optional string errorMsg = 5; // if request fails, often contains strack trace 145 | optional RpcErrorCodeProto errorDetail = 6; // in case of error 146 | optional bytes clientId = 7; // Globally unique client ID 147 | optional sint32 retryCount = 8 [default = -1]; 148 | } 149 | 150 | message RpcSaslProto { 151 | enum SaslState { 152 | SUCCESS = 0; 153 | NEGOTIATE = 1; 154 | INITIATE = 2; 155 | CHALLENGE = 3; 156 | RESPONSE = 4; 157 | WRAP = 5; 158 | } 159 | 160 | message SaslAuth { 161 | required string method = 1; 162 | required string mechanism = 2; 163 | optional string protocol = 3; 164 | optional string serverId = 4; 165 | optional bytes challenge = 5; 166 | } 167 | 168 | optional uint32 version = 1; 169 | required SaslState state = 2; 170 | optional bytes token = 3; 171 | repeated SaslAuth auths = 4; 172 | } 173 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/Security.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.security.proto"; 26 | option java_outer_classname = "SecurityProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | /** 32 | * Security token identifier 33 | */ 34 | message TokenProto { 35 | required bytes identifier = 1; 36 | required bytes password = 2; 37 | required string kind = 3; 38 | required string service = 4; 39 | } 40 | 41 | message GetDelegationTokenRequestProto { 42 | required string renewer = 1; 43 | } 44 | 45 | message GetDelegationTokenResponseProto { 46 | optional hadoop.common.TokenProto token = 1; 47 | } 48 | 49 | message RenewDelegationTokenRequestProto { 50 | required hadoop.common.TokenProto token = 1; 51 | } 52 | 53 | message RenewDelegationTokenResponseProto { 54 | required uint64 newExpiryTime = 1; 55 | } 56 | 57 | message CancelDelegationTokenRequestProto { 58 | required hadoop.common.TokenProto token = 1; 59 | } 60 | 61 | message CancelDelegationTokenResponseProto { // void response 62 | } 63 | 64 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/TraceAdmin.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.tracing"; 26 | option java_outer_classname = "TraceAdminPB"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | message ListSpanReceiversRequestProto { 32 | } 33 | 34 | message SpanReceiverListInfo { 35 | required int64 id = 1; 36 | required string className = 2; 37 | } 38 | 39 | message ListSpanReceiversResponseProto { 40 | repeated SpanReceiverListInfo descriptions = 1; 41 | } 42 | 43 | message ConfigPair { 44 | required string key = 1; 45 | required string value = 2; 46 | } 47 | 48 | message AddSpanReceiverRequestProto { 49 | required string className = 1; 50 | repeated ConfigPair config = 2; 51 | } 52 | 53 | message AddSpanReceiverResponseProto { 54 | required int64 id = 1; 55 | } 56 | 57 | message RemoveSpanReceiverRequestProto { 58 | required int64 id = 1; 59 | } 60 | 61 | message RemoveSpanReceiverResponseProto { 62 | } 63 | 64 | service TraceAdminService { 65 | rpc listSpanReceivers(ListSpanReceiversRequestProto) 66 | returns(ListSpanReceiversResponseProto); 67 | 68 | rpc addSpanReceiver(AddSpanReceiverRequestProto) 69 | returns(AddSpanReceiverResponseProto); 70 | 71 | rpc removeSpanReceiver(RemoveSpanReceiverRequestProto) 72 | returns(RemoveSpanReceiverResponseProto); 73 | } 74 | -------------------------------------------------------------------------------- /protocolpb/proto/hadoop/ZKFCProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ha.proto"; 26 | option java_outer_classname = "ZKFCProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.common; 30 | 31 | message CedeActiveRequestProto { 32 | required uint32 millisToCede = 1; 33 | } 34 | 35 | message CedeActiveResponseProto { 36 | } 37 | 38 | message GracefulFailoverRequestProto { 39 | } 40 | 41 | message GracefulFailoverResponseProto { 42 | } 43 | 44 | 45 | /** 46 | * Protocol provides manual control of the ZK Failover Controllers 47 | */ 48 | service ZKFCProtocolService { 49 | /** 50 | * Request that the service cede its active state, and quit the election 51 | * for some amount of time 52 | */ 53 | rpc cedeActive(CedeActiveRequestProto) 54 | returns(CedeActiveResponseProto); 55 | 56 | 57 | rpc gracefulFailover(GracefulFailoverRequestProto) 58 | returns(GracefulFailoverResponseProto); 59 | } 60 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/ClientDatanodeProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers that are used throughout HDFS -- i.e. 26 | // by the client, server, and data transfer protocols. 27 | 28 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 29 | option java_outer_classname = "ClientDatanodeProtocolProtos"; 30 | option java_generic_services = true; 31 | option java_generate_equals_and_hash = true; 32 | package hadoop.hdfs; 33 | 34 | import "Security.proto"; 35 | import "hdfs.proto"; 36 | 37 | /** 38 | * block - block for which visible length is requested 39 | */ 40 | message GetReplicaVisibleLengthRequestProto { 41 | required ExtendedBlockProto block = 1; 42 | } 43 | 44 | /** 45 | * length - visible length of the block 46 | */ 47 | message GetReplicaVisibleLengthResponseProto { 48 | required uint64 length = 1; 49 | } 50 | 51 | /** 52 | * void request 53 | */ 54 | message RefreshNamenodesRequestProto { 55 | } 56 | 57 | /** 58 | * void response 59 | */ 60 | message RefreshNamenodesResponseProto { 61 | } 62 | 63 | /** 64 | * blockPool - block pool to be deleted 65 | * force - if false, delete the block pool only if it is empty. 66 | * if true, delete the block pool even if it has blocks. 67 | */ 68 | message DeleteBlockPoolRequestProto { 69 | required string blockPool = 1; 70 | required bool force = 2; 71 | } 72 | 73 | /** 74 | * void response 75 | */ 76 | message DeleteBlockPoolResponseProto { 77 | } 78 | 79 | /** 80 | * Gets the file information where block and its metadata is stored 81 | * block - block for which path information is being requested 82 | * token - block token 83 | * 84 | * This message is deprecated in favor of file descriptor passing. 85 | */ 86 | message GetBlockLocalPathInfoRequestProto { 87 | required ExtendedBlockProto block = 1; 88 | required hadoop.common.TokenProto token = 2; 89 | } 90 | 91 | /** 92 | * block - block for which file path information is being returned 93 | * localPath - file path where the block data is stored 94 | * localMetaPath - file path where the block meta data is stored 95 | * 96 | * This message is deprecated in favor of file descriptor passing. 97 | */ 98 | message GetBlockLocalPathInfoResponseProto { 99 | required ExtendedBlockProto block = 1; 100 | required string localPath = 2; 101 | required string localMetaPath = 3; 102 | } 103 | 104 | /** 105 | * Query for the disk locations of a number of blocks on this DN. 106 | * blockPoolId - the pool to query 107 | * blockIds - list of block IDs to query 108 | * tokens - list of access tokens corresponding to list of block IDs 109 | */ 110 | message GetHdfsBlockLocationsRequestProto { 111 | // Removed: HDFS-3969 112 | // repeated ExtendedBlockProto blocks = 1; 113 | repeated hadoop.common.TokenProto tokens = 2; 114 | 115 | required string blockPoolId = 3; 116 | repeated sfixed64 blockIds = 4 [ packed = true ]; 117 | } 118 | 119 | /** 120 | * volumeIds - id of each volume, potentially multiple bytes 121 | * volumeIndexes - for each block, an index into volumeIds specifying the volume 122 | * on which it is located. If block is not present on any volume, 123 | * index is set to MAX_INT. 124 | */ 125 | message GetHdfsBlockLocationsResponseProto { 126 | repeated bytes volumeIds = 1; 127 | repeated uint32 volumeIndexes = 2 [ packed = true ]; 128 | } 129 | 130 | /** 131 | * forUpgrade - if true, clients are advised to wait for restart and quick 132 | * upgrade restart is instrumented. Otherwise, datanode does 133 | * the regular shutdown. 134 | */ 135 | message ShutdownDatanodeRequestProto { 136 | required bool forUpgrade = 1; 137 | } 138 | 139 | message ShutdownDatanodeResponseProto { 140 | } 141 | 142 | /** 143 | * Ping datanode for liveness and quick info 144 | */ 145 | message GetDatanodeInfoRequestProto { 146 | } 147 | 148 | message GetDatanodeInfoResponseProto { 149 | required DatanodeLocalInfoProto localInfo = 1; 150 | } 151 | 152 | /** Asks DataNode to reload configuration file. */ 153 | message StartReconfigurationRequestProto { 154 | } 155 | 156 | message StartReconfigurationResponseProto { 157 | } 158 | 159 | /** Query the running status of reconfiguration process */ 160 | message GetReconfigurationStatusRequestProto { 161 | } 162 | 163 | message GetReconfigurationStatusConfigChangeProto { 164 | required string name = 1; 165 | required string oldValue = 2; 166 | optional string newValue = 3; 167 | optional string errorMessage = 4; // It is empty if success. 168 | } 169 | 170 | message GetReconfigurationStatusResponseProto { 171 | required int64 startTime = 1; 172 | optional int64 endTime = 2; 173 | repeated GetReconfigurationStatusConfigChangeProto changes = 3; 174 | } 175 | 176 | /** 177 | * Protocol used from client to the Datanode. 178 | * See the request and response for details of rpc call. 179 | */ 180 | service ClientDatanodeProtocolService { 181 | /** 182 | * Returns the visible length of the replica 183 | */ 184 | rpc getReplicaVisibleLength(GetReplicaVisibleLengthRequestProto) 185 | returns(GetReplicaVisibleLengthResponseProto); 186 | 187 | /** 188 | * Refresh the list of federated namenodes from updated configuration. 189 | * Adds new namenodes and stops the deleted namenodes. 190 | */ 191 | rpc refreshNamenodes(RefreshNamenodesRequestProto) 192 | returns(RefreshNamenodesResponseProto); 193 | 194 | /** 195 | * Delete the block pool from the datanode. 196 | */ 197 | rpc deleteBlockPool(DeleteBlockPoolRequestProto) 198 | returns(DeleteBlockPoolResponseProto); 199 | 200 | /** 201 | * Retrieves the path names of the block file and metadata file stored on the 202 | * local file system. 203 | */ 204 | rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto) 205 | returns(GetBlockLocalPathInfoResponseProto); 206 | 207 | /** 208 | * Retrieve additional HDFS-specific metadata about a set of blocks stored 209 | * on the local file system. 210 | */ 211 | rpc getHdfsBlockLocations(GetHdfsBlockLocationsRequestProto) 212 | returns(GetHdfsBlockLocationsResponseProto); 213 | 214 | rpc shutdownDatanode(ShutdownDatanodeRequestProto) 215 | returns(ShutdownDatanodeResponseProto); 216 | 217 | rpc getDatanodeInfo(GetDatanodeInfoRequestProto) 218 | returns(GetDatanodeInfoResponseProto); 219 | 220 | rpc getReconfigurationStatus(GetReconfigurationStatusRequestProto) 221 | returns(GetReconfigurationStatusResponseProto); 222 | 223 | rpc startReconfiguration(StartReconfigurationRequestProto) 224 | returns(StartReconfigurationResponseProto); 225 | } 226 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/DatanodeProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers that are used throughout HDFS -- i.e. 26 | // by the client, server, and data transfer protocols. 27 | 28 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 29 | option java_outer_classname = "DatanodeProtocolProtos"; 30 | option java_generic_services = true; 31 | option java_generate_equals_and_hash = true; 32 | package hadoop.hdfs.datanode; 33 | 34 | import "hdfs.proto"; 35 | 36 | /** 37 | * Information to identify a datanode to a namenode 38 | */ 39 | message DatanodeRegistrationProto { 40 | required DatanodeIDProto datanodeID = 1; // Datanode information 41 | required StorageInfoProto storageInfo = 2; // Node information 42 | required ExportedBlockKeysProto keys = 3; // Block keys 43 | required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0" 44 | } 45 | 46 | /** 47 | * Commands sent from namenode to the datanodes 48 | */ 49 | message DatanodeCommandProto { 50 | enum Type { 51 | BalancerBandwidthCommand = 0; 52 | BlockCommand = 1; 53 | BlockRecoveryCommand = 2; 54 | FinalizeCommand = 3; 55 | KeyUpdateCommand = 4; 56 | RegisterCommand = 5; 57 | UnusedUpgradeCommand = 6; 58 | NullDatanodeCommand = 7; 59 | BlockIdCommand = 8; 60 | } 61 | 62 | required Type cmdType = 1; // Type of the command 63 | 64 | // One of the following command is available when the corresponding 65 | // cmdType is set 66 | optional BalancerBandwidthCommandProto balancerCmd = 2; 67 | optional BlockCommandProto blkCmd = 3; 68 | optional BlockRecoveryCommandProto recoveryCmd = 4; 69 | optional FinalizeCommandProto finalizeCmd = 5; 70 | optional KeyUpdateCommandProto keyUpdateCmd = 6; 71 | optional RegisterCommandProto registerCmd = 7; 72 | optional BlockIdCommandProto blkIdCmd = 8; 73 | } 74 | 75 | /** 76 | * Command sent from namenode to datanode to set the 77 | * maximum bandwidth to be used for balancing. 78 | */ 79 | message BalancerBandwidthCommandProto { 80 | 81 | // Maximum bandwidth to be used by datanode for balancing 82 | required uint64 bandwidth = 1; 83 | } 84 | 85 | /** 86 | * Command to instruct datanodes to perform certain action 87 | * on the given set of blocks. 88 | */ 89 | message BlockCommandProto { 90 | enum Action { 91 | TRANSFER = 1; // Transfer blocks to another datanode 92 | INVALIDATE = 2; // Invalidate blocks 93 | SHUTDOWN = 3; // Shutdown the datanode 94 | } 95 | 96 | required Action action = 1; 97 | required string blockPoolId = 2; 98 | repeated BlockProto blocks = 3; 99 | repeated DatanodeInfosProto targets = 4; 100 | repeated StorageUuidsProto targetStorageUuids = 5; 101 | repeated StorageTypesProto targetStorageTypes = 6; 102 | } 103 | 104 | /** 105 | * Command to instruct datanodes to perform certain action 106 | * on the given set of block IDs. 107 | */ 108 | message BlockIdCommandProto { 109 | enum Action { 110 | CACHE = 1; 111 | UNCACHE = 2; 112 | } 113 | required Action action = 1; 114 | required string blockPoolId = 2; 115 | repeated uint64 blockIds = 3 [packed=true]; 116 | } 117 | 118 | /** 119 | * List of blocks to be recovered by the datanode 120 | */ 121 | message BlockRecoveryCommandProto { 122 | repeated RecoveringBlockProto blocks = 1; 123 | } 124 | 125 | /** 126 | * Finalize the upgrade at the datanode 127 | */ 128 | message FinalizeCommandProto { 129 | required string blockPoolId = 1; // Block pool to be finalized 130 | } 131 | 132 | /** 133 | * Update the block keys at the datanode 134 | */ 135 | message KeyUpdateCommandProto { 136 | required ExportedBlockKeysProto keys = 1; 137 | } 138 | 139 | /** 140 | * Instruct datanode to register with the namenode 141 | */ 142 | message RegisterCommandProto { 143 | // void 144 | } 145 | 146 | /** 147 | * registration - Information of the datanode registering with the namenode 148 | */ 149 | message RegisterDatanodeRequestProto { 150 | required DatanodeRegistrationProto registration = 1; // Datanode info 151 | } 152 | 153 | /** 154 | * registration - Update registration of the datanode that successfully 155 | * registered. StorageInfo will be updated to include new 156 | * storage ID if the datanode did not have one in the request. 157 | */ 158 | message RegisterDatanodeResponseProto { 159 | required DatanodeRegistrationProto registration = 1; // Datanode info 160 | } 161 | 162 | /** 163 | * registration - datanode registration information 164 | * capacity - total storage capacity available at the datanode 165 | * dfsUsed - storage used by HDFS 166 | * remaining - remaining storage available for HDFS 167 | * blockPoolUsed - storage used by the block pool 168 | * xmitsInProgress - number of transfers from this datanode to others 169 | * xceiverCount - number of active transceiver threads 170 | * failedVolumes - number of failed volumes 171 | * cacheCapacity - total cache capacity available at the datanode 172 | * cacheUsed - amount of cache used 173 | */ 174 | message HeartbeatRequestProto { 175 | required DatanodeRegistrationProto registration = 1; // Datanode info 176 | repeated StorageReportProto reports = 2; 177 | optional uint32 xmitsInProgress = 3 [ default = 0 ]; 178 | optional uint32 xceiverCount = 4 [ default = 0 ]; 179 | optional uint32 failedVolumes = 5 [ default = 0 ]; 180 | optional uint64 cacheCapacity = 6 [ default = 0 ]; 181 | optional uint64 cacheUsed = 7 [default = 0 ]; 182 | } 183 | 184 | /** 185 | * state - State the NN is in when returning response to the DN 186 | * txid - Highest transaction ID this NN has seen 187 | */ 188 | message NNHAStatusHeartbeatProto { 189 | enum State { 190 | ACTIVE = 0; 191 | STANDBY = 1; 192 | } 193 | required State state = 1; 194 | required uint64 txid = 2; 195 | } 196 | 197 | /** 198 | * cmds - Commands from namenode to datanode. 199 | * haStatus - Status (from an HA perspective) of the NN sending this response 200 | */ 201 | message HeartbeatResponseProto { 202 | repeated DatanodeCommandProto cmds = 1; // Returned commands can be null 203 | required NNHAStatusHeartbeatProto haStatus = 2; 204 | optional RollingUpgradeStatusProto rollingUpgradeStatus = 3; 205 | } 206 | 207 | /** 208 | * registration - datanode registration information 209 | * blockPoolID - block pool ID of the reported blocks 210 | * blocks - each block is represented as multiple longs in the array. 211 | * first long represents block ID 212 | * second long represents length 213 | * third long represents gen stamp 214 | * fourth long (if under construction) represents replica state 215 | */ 216 | message BlockReportRequestProto { 217 | required DatanodeRegistrationProto registration = 1; 218 | required string blockPoolId = 2; 219 | repeated StorageBlockReportProto reports = 3; 220 | } 221 | 222 | /** 223 | * Report of blocks in a storage 224 | */ 225 | message StorageBlockReportProto { 226 | required DatanodeStorageProto storage = 1; // Storage 227 | repeated uint64 blocks = 2 [packed=true]; 228 | } 229 | 230 | /** 231 | * cmd - Command from namenode to the datanode 232 | */ 233 | message BlockReportResponseProto { 234 | optional DatanodeCommandProto cmd = 1; 235 | } 236 | 237 | /** 238 | * registration - datanode registration information 239 | * blockPoolId - block pool ID of the reported blocks 240 | * blocks - representation of blocks as longs for efficiency reasons 241 | */ 242 | message CacheReportRequestProto { 243 | required DatanodeRegistrationProto registration = 1; 244 | required string blockPoolId = 2; 245 | repeated uint64 blocks = 3 [packed=true]; 246 | } 247 | 248 | message CacheReportResponseProto { 249 | optional DatanodeCommandProto cmd = 1; 250 | } 251 | 252 | /** 253 | * Data structure to send received or deleted block information 254 | * from datanode to namenode. 255 | */ 256 | message ReceivedDeletedBlockInfoProto { 257 | enum BlockStatus { 258 | RECEIVING = 1; // block being created 259 | RECEIVED = 2; // block creation complete 260 | DELETED = 3; 261 | } 262 | 263 | required BlockProto block = 1; 264 | required BlockStatus status = 3; 265 | optional string deleteHint = 2; 266 | } 267 | 268 | /** 269 | * List of blocks received and deleted for a storage. 270 | */ 271 | message StorageReceivedDeletedBlocksProto { 272 | required string storageUuid = 1 [ deprecated = true ]; 273 | repeated ReceivedDeletedBlockInfoProto blocks = 2; 274 | optional DatanodeStorageProto storage = 3; // supersedes storageUuid. 275 | } 276 | 277 | /** 278 | * registration - datanode registration information 279 | * blockPoolID - block pool ID of the reported blocks 280 | * blocks - Received/deleted block list 281 | */ 282 | message BlockReceivedAndDeletedRequestProto { 283 | required DatanodeRegistrationProto registration = 1; 284 | required string blockPoolId = 2; 285 | repeated StorageReceivedDeletedBlocksProto blocks = 3; 286 | } 287 | 288 | /** 289 | * void response 290 | */ 291 | message BlockReceivedAndDeletedResponseProto { 292 | } 293 | 294 | /** 295 | * registartion - Datanode reporting the error 296 | * errorCode - error code indicating the error 297 | * msg - Free text description of the error 298 | */ 299 | message ErrorReportRequestProto { 300 | enum ErrorCode { 301 | NOTIFY = 0; // Error report to be logged at the namenode 302 | DISK_ERROR = 1; // DN has disk errors but still has valid volumes 303 | INVALID_BLOCK = 2; // Command from namenode has invalid block ID 304 | FATAL_DISK_ERROR = 3; // No valid volumes left on datanode 305 | } 306 | required DatanodeRegistrationProto registartion = 1; // Registartion info 307 | required uint32 errorCode = 2; // Error code 308 | required string msg = 3; // Error message 309 | } 310 | 311 | /** 312 | * void response 313 | */ 314 | message ErrorReportResponseProto { 315 | } 316 | 317 | /** 318 | * blocks - list of blocks that are reported as corrupt 319 | */ 320 | message ReportBadBlocksRequestProto { 321 | repeated LocatedBlockProto blocks = 1; 322 | } 323 | 324 | /** 325 | * void response 326 | */ 327 | message ReportBadBlocksResponseProto { 328 | } 329 | 330 | /** 331 | * Commit block synchronization request during lease recovery 332 | */ 333 | message CommitBlockSynchronizationRequestProto { 334 | required ExtendedBlockProto block = 1; 335 | required uint64 newGenStamp = 2; 336 | required uint64 newLength = 3; 337 | required bool closeFile = 4; 338 | required bool deleteBlock = 5; 339 | repeated DatanodeIDProto newTaragets = 6; 340 | repeated string newTargetStorages = 7; 341 | } 342 | 343 | /** 344 | * void response 345 | */ 346 | message CommitBlockSynchronizationResponseProto { 347 | } 348 | 349 | /** 350 | * Protocol used from datanode to the namenode 351 | * See the request and response for details of rpc call. 352 | */ 353 | service DatanodeProtocolService { 354 | /** 355 | * Register a datanode at a namenode 356 | */ 357 | rpc registerDatanode(RegisterDatanodeRequestProto) 358 | returns(RegisterDatanodeResponseProto); 359 | 360 | /** 361 | * Send heartbeat from datanode to namenode 362 | */ 363 | rpc sendHeartbeat(HeartbeatRequestProto) returns(HeartbeatResponseProto); 364 | 365 | /** 366 | * Report blocks at a given datanode to the namenode 367 | */ 368 | rpc blockReport(BlockReportRequestProto) returns(BlockReportResponseProto); 369 | 370 | /** 371 | * Report cached blocks at a datanode to the namenode 372 | */ 373 | rpc cacheReport(CacheReportRequestProto) returns(CacheReportResponseProto); 374 | 375 | /** 376 | * Incremental block report from the DN. This contains info about recently 377 | * received and deleted blocks, as well as when blocks start being 378 | * received. 379 | */ 380 | rpc blockReceivedAndDeleted(BlockReceivedAndDeletedRequestProto) 381 | returns(BlockReceivedAndDeletedResponseProto); 382 | 383 | /** 384 | * Report from a datanode of an error to the active namenode. 385 | * Used for debugging. 386 | */ 387 | rpc errorReport(ErrorReportRequestProto) returns(ErrorReportResponseProto); 388 | 389 | /** 390 | * Request the version 391 | */ 392 | rpc versionRequest(VersionRequestProto) returns(VersionResponseProto); 393 | 394 | /** 395 | * Report corrupt blocks at the specified location 396 | */ 397 | rpc reportBadBlocks(ReportBadBlocksRequestProto) returns(ReportBadBlocksResponseProto); 398 | 399 | /** 400 | * Commit block synchronization during lease recovery. 401 | */ 402 | rpc commitBlockSynchronization(CommitBlockSynchronizationRequestProto) 403 | returns(CommitBlockSynchronizationResponseProto); 404 | } 405 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/HAZKInfo.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.hdfs.server.namenode.ha.proto"; 26 | option java_outer_classname = "HAZKInfoProtos"; 27 | package hadoop.hdfs; 28 | 29 | message ActiveNodeInfo { 30 | required string nameserviceId = 1; 31 | required string namenodeId = 2; 32 | 33 | required string hostname = 3; 34 | required int32 port = 4; 35 | required int32 zkfcPort = 5; 36 | } 37 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/InterDatanodeProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers that are used throughout HDFS -- i.e. 26 | // by the client, server, and data transfer protocols. 27 | 28 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 29 | option java_outer_classname = "InterDatanodeProtocolProtos"; 30 | option java_generic_services = true; 31 | option java_generate_equals_and_hash = true; 32 | package hadoop.hdfs; 33 | 34 | import "hdfs.proto"; 35 | 36 | /** 37 | * Block with location information and new generation stamp 38 | * to be used for recovery. 39 | */ 40 | message InitReplicaRecoveryRequestProto { 41 | required RecoveringBlockProto block = 1; 42 | } 43 | 44 | /** 45 | * Repica recovery information 46 | */ 47 | message InitReplicaRecoveryResponseProto { 48 | required bool replicaFound = 1; 49 | 50 | // The following entries are not set if there was no replica found. 51 | optional ReplicaStateProto state = 2; // State of the replica 52 | optional BlockProto block = 3; // block information 53 | } 54 | 55 | /** 56 | * Update replica with new generation stamp and length 57 | */ 58 | message UpdateReplicaUnderRecoveryRequestProto { 59 | required ExtendedBlockProto block = 1; // Block identifier 60 | required uint64 recoveryId = 2; // New genstamp of the replica 61 | required uint64 newLength = 3; // New length of the replica 62 | } 63 | 64 | /** 65 | * Response returns updated block information 66 | */ 67 | message UpdateReplicaUnderRecoveryResponseProto { 68 | optional string storageUuid = 1; // ID of the storage that stores replica 69 | } 70 | 71 | /** 72 | * Protocol used between datanodes for block recovery. 73 | * 74 | * See the request and response for details of rpc call. 75 | */ 76 | service InterDatanodeProtocolService { 77 | /** 78 | * Initialize recovery of a replica 79 | */ 80 | rpc initReplicaRecovery(InitReplicaRecoveryRequestProto) 81 | returns(InitReplicaRecoveryResponseProto); 82 | 83 | /** 84 | * Update a replica with new generation stamp and length 85 | */ 86 | rpc updateReplicaUnderRecovery(UpdateReplicaUnderRecoveryRequestProto) 87 | returns(UpdateReplicaUnderRecoveryResponseProto); 88 | } 89 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/JournalProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers that are used throughout HDFS -- i.e. 26 | // by the client, server, and data transfer protocols. 27 | 28 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 29 | option java_outer_classname = "JournalProtocolProtos"; 30 | option java_generic_services = true; 31 | option java_generate_equals_and_hash = true; 32 | package hadoop.hdfs; 33 | 34 | import "hdfs.proto"; 35 | 36 | /** 37 | * Journal information used by the journal receiver to identify a journal. 38 | */ 39 | message JournalInfoProto { 40 | required string clusterID = 1; // ID of the cluster 41 | optional uint32 layoutVersion = 2; // Layout version 42 | optional uint32 namespaceID = 3; // Namespace ID 43 | } 44 | 45 | /** 46 | * journalInfo - the information about the journal 47 | * firstTxnId - the first txid in the journal records 48 | * numTxns - Number of transactions in editlog 49 | * records - bytes containing serialized journal records 50 | * epoch - change to this represents change of journal writer 51 | */ 52 | message JournalRequestProto { 53 | required JournalInfoProto journalInfo = 1; 54 | required uint64 firstTxnId = 2; 55 | required uint32 numTxns = 3; 56 | required bytes records = 4; 57 | required uint64 epoch = 5; 58 | } 59 | 60 | /** 61 | * void response 62 | */ 63 | message JournalResponseProto { 64 | } 65 | 66 | /** 67 | * journalInfo - the information about the journal 68 | * txid - first txid in the new log 69 | */ 70 | message StartLogSegmentRequestProto { 71 | required JournalInfoProto journalInfo = 1; // Info about the journal 72 | required uint64 txid = 2; // Transaction ID 73 | required uint64 epoch = 3; 74 | } 75 | 76 | /** 77 | * void response 78 | */ 79 | message StartLogSegmentResponseProto { 80 | } 81 | 82 | /** 83 | * journalInfo - the information about the journal 84 | * txid - first txid in the new log 85 | */ 86 | message FenceRequestProto { 87 | required JournalInfoProto journalInfo = 1; // Info about the journal 88 | required uint64 epoch = 2; // Epoch - change indicates change in writer 89 | optional string fencerInfo = 3; // Info about fencer for debugging 90 | } 91 | 92 | /** 93 | * previousEpoch - previous epoch if any or zero 94 | * lastTransactionId - last valid transaction Id in the journal 95 | * inSync - if all journal segments are available and in sync 96 | */ 97 | message FenceResponseProto { 98 | optional uint64 previousEpoch = 1; 99 | optional uint64 lastTransactionId = 2; 100 | optional bool inSync = 3; 101 | } 102 | 103 | /** 104 | * Protocol used to journal edits to a remote node. Currently, 105 | * this is used to publish edits from the NameNode to a BackupNode. 106 | * 107 | * See the request and response for details of rpc call. 108 | */ 109 | service JournalProtocolService { 110 | /** 111 | * Request sent by active namenode to backup node via 112 | * EditLogBackupOutputStream to stream editlog records. 113 | */ 114 | rpc journal(JournalRequestProto) returns (JournalResponseProto); 115 | 116 | /** 117 | * Request sent by active namenode to backup node to notify 118 | * that the NameNode has rolled its edit logs and is now writing a 119 | * new log segment. 120 | */ 121 | rpc startLogSegment(StartLogSegmentRequestProto) 122 | returns (StartLogSegmentResponseProto); 123 | 124 | /** 125 | * Request to fence a journal receiver. 126 | */ 127 | rpc fence(FenceRequestProto) 128 | returns (FenceResponseProto); 129 | } 130 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/NamenodeProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers that are used throughout HDFS -- i.e. 26 | // by the client, server, and data transfer protocols. 27 | 28 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 29 | option java_outer_classname = "NamenodeProtocolProtos"; 30 | option java_generic_services = true; 31 | option java_generate_equals_and_hash = true; 32 | package hadoop.hdfs.namenode; 33 | 34 | import "hdfs.proto"; 35 | 36 | /** 37 | * Get list of blocks for a given datanode with the total length 38 | * of adding up to given size 39 | * datanode - Datanode ID to get list of block from 40 | * size - size to which the block lengths must add up to 41 | */ 42 | message GetBlocksRequestProto { 43 | required DatanodeIDProto datanode = 1; // Datanode ID 44 | required uint64 size = 2; // Size in bytes 45 | } 46 | 47 | 48 | /** 49 | * blocks - List of returned blocks 50 | */ 51 | message GetBlocksResponseProto { 52 | required BlocksWithLocationsProto blocks = 1; // List of blocks 53 | } 54 | 55 | /** 56 | * void request 57 | */ 58 | message GetBlockKeysRequestProto { 59 | } 60 | 61 | /** 62 | * keys - Information about block keys at the active namenode 63 | */ 64 | message GetBlockKeysResponseProto { 65 | optional ExportedBlockKeysProto keys = 1; 66 | } 67 | 68 | /** 69 | * void request 70 | */ 71 | message GetTransactionIdRequestProto { 72 | } 73 | 74 | /** 75 | * txId - Transaction ID of the most recently persisted edit log record 76 | */ 77 | message GetTransactionIdResponseProto { 78 | required uint64 txId = 1; // Transaction ID 79 | } 80 | 81 | /** 82 | * void request 83 | */ 84 | message RollEditLogRequestProto { 85 | } 86 | 87 | /** 88 | * signature - A unique token to identify checkpoint transaction 89 | */ 90 | message RollEditLogResponseProto { 91 | required CheckpointSignatureProto signature = 1; 92 | } 93 | 94 | /** 95 | * void request 96 | */ 97 | message GetMostRecentCheckpointTxIdRequestProto { 98 | } 99 | 100 | message GetMostRecentCheckpointTxIdResponseProto{ 101 | required uint64 txId = 1; 102 | } 103 | 104 | /** 105 | * registration - Namenode reporting the error 106 | * errorCode - error code indicating the error 107 | * msg - Free text description of the error 108 | */ 109 | message ErrorReportRequestProto { 110 | required NamenodeRegistrationProto registration = 1; // Registration info 111 | required uint32 errorCode = 2; // Error code 112 | required string msg = 3; // Error message 113 | } 114 | 115 | /** 116 | * void response 117 | */ 118 | message ErrorReportResponseProto { 119 | } 120 | 121 | /** 122 | * registration - Information of the namenode registering with primary namenode 123 | */ 124 | message RegisterRequestProto { 125 | required NamenodeRegistrationProto registration = 1; // Registration info 126 | } 127 | 128 | /** 129 | * registration - Updated registration information of the newly registered 130 | * datanode. 131 | */ 132 | message RegisterResponseProto { 133 | required NamenodeRegistrationProto registration = 1; // Registration info 134 | } 135 | 136 | /** 137 | * Start checkpoint request 138 | * registration - Namenode that is starting the checkpoint 139 | */ 140 | message StartCheckpointRequestProto { 141 | required NamenodeRegistrationProto registration = 1; // Registration info 142 | } 143 | 144 | /** 145 | * command - Command returned by the active namenode to be 146 | * be handled by the caller. 147 | */ 148 | message StartCheckpointResponseProto { 149 | required NamenodeCommandProto command = 1; 150 | } 151 | 152 | /** 153 | * End or finalize the previously started checkpoint 154 | * registration - Namenode that is ending the checkpoint 155 | * signature - unique token to identify checkpoint transaction, 156 | * that was received when checkpoint was started. 157 | */ 158 | message EndCheckpointRequestProto { 159 | required NamenodeRegistrationProto registration = 1; // Registration info 160 | required CheckpointSignatureProto signature = 2; 161 | } 162 | 163 | /** 164 | * void response 165 | */ 166 | message EndCheckpointResponseProto { 167 | } 168 | 169 | /** 170 | * sinceTxId - return the editlog information for transactions >= sinceTxId 171 | */ 172 | message GetEditLogManifestRequestProto { 173 | required uint64 sinceTxId = 1; // Transaction ID 174 | } 175 | 176 | /** 177 | * manifest - Enumeration of editlogs from namenode for 178 | * logs >= sinceTxId in the request 179 | */ 180 | message GetEditLogManifestResponseProto { 181 | required RemoteEditLogManifestProto manifest = 1; 182 | } 183 | 184 | /** 185 | * Protocol used by the sub-ordinate namenode to send requests 186 | * the active/primary namenode. 187 | * 188 | * See the request and response for details of rpc call. 189 | */ 190 | service NamenodeProtocolService { 191 | /** 192 | * Get list of blocks for a given datanode with length 193 | * of blocks adding up to given size. 194 | */ 195 | rpc getBlocks(GetBlocksRequestProto) returns(GetBlocksResponseProto); 196 | 197 | /** 198 | * Get the current block keys 199 | */ 200 | rpc getBlockKeys(GetBlockKeysRequestProto) returns(GetBlockKeysResponseProto); 201 | 202 | /** 203 | * Get the transaction ID of the most recently persisted editlog record 204 | */ 205 | rpc getTransactionId(GetTransactionIdRequestProto) 206 | returns(GetTransactionIdResponseProto); 207 | 208 | /** 209 | * Get the transaction ID of the most recently persisted editlog record 210 | */ 211 | rpc getMostRecentCheckpointTxId(GetMostRecentCheckpointTxIdRequestProto) 212 | returns(GetMostRecentCheckpointTxIdResponseProto); 213 | 214 | /** 215 | * Close the current editlog and open a new one for checkpointing purposes 216 | */ 217 | rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto); 218 | 219 | /** 220 | * Request info about the version running on this NameNode 221 | */ 222 | rpc versionRequest(VersionRequestProto) returns(VersionResponseProto); 223 | 224 | /** 225 | * Report from a sub-ordinate namenode of an error to the active namenode. 226 | * Active namenode may decide to unregister the reporting namenode 227 | * depending on the error. 228 | */ 229 | rpc errorReport(ErrorReportRequestProto) returns(ErrorReportResponseProto); 230 | 231 | /** 232 | * Request to register a sub-ordinate namenode 233 | */ 234 | rpc registerSubordinateNamenode(RegisterRequestProto) returns(RegisterResponseProto); 235 | 236 | /** 237 | * Request to start a checkpoint. 238 | */ 239 | rpc startCheckpoint(StartCheckpointRequestProto) 240 | returns(StartCheckpointResponseProto); 241 | 242 | /** 243 | * End of finalize the previously started checkpoint 244 | */ 245 | rpc endCheckpoint(EndCheckpointRequestProto) 246 | returns(EndCheckpointResponseProto); 247 | 248 | /** 249 | * Get editlog manifests from the active namenode for all the editlogs 250 | */ 251 | rpc getEditLogManifest(GetEditLogManifestRequestProto) 252 | returns(GetEditLogManifestResponseProto); 253 | } 254 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/QJournalProtocol.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.hdfs.qjournal.protocol"; 26 | option java_outer_classname = "QJournalProtocolProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package hadoop.hdfs.qjournal; 30 | 31 | import "hdfs.proto"; 32 | 33 | message JournalIdProto { 34 | required string identifier = 1; 35 | } 36 | 37 | message RequestInfoProto { 38 | required JournalIdProto journalId = 1; 39 | required uint64 epoch = 2; 40 | required uint64 ipcSerialNumber = 3; 41 | 42 | // Whenever a writer makes a request, it informs 43 | // the node of the latest committed txid. This may 44 | // be higher than the transaction data included in the 45 | // request itself, eg in the case that the node has 46 | // fallen behind. 47 | optional uint64 committedTxId = 4; 48 | } 49 | 50 | message SegmentStateProto { 51 | required uint64 startTxId = 1; 52 | required uint64 endTxId = 2; 53 | required bool isInProgress = 3; 54 | } 55 | 56 | /** 57 | * The storage format used on local disk for previously 58 | * accepted decisions. 59 | */ 60 | message PersistedRecoveryPaxosData { 61 | required SegmentStateProto segmentState = 1; 62 | required uint64 acceptedInEpoch = 2; 63 | } 64 | 65 | /** 66 | * journal() 67 | */ 68 | 69 | message JournalRequestProto { 70 | required RequestInfoProto reqInfo = 1; 71 | required uint64 firstTxnId = 2; 72 | required uint32 numTxns = 3; 73 | required bytes records = 4; 74 | required uint64 segmentTxnId = 5; 75 | } 76 | 77 | message JournalResponseProto { 78 | } 79 | 80 | /** 81 | * heartbeat() 82 | */ 83 | 84 | message HeartbeatRequestProto { 85 | required RequestInfoProto reqInfo = 1; 86 | } 87 | 88 | message HeartbeatResponseProto { // void response 89 | } 90 | 91 | /** 92 | * startLogSegment() 93 | */ 94 | message StartLogSegmentRequestProto { 95 | required RequestInfoProto reqInfo = 1; 96 | required uint64 txid = 2; // Transaction ID 97 | optional sint32 layoutVersion = 3; // the LayoutVersion in the client 98 | } 99 | 100 | message StartLogSegmentResponseProto { 101 | } 102 | 103 | /** 104 | * finalizeLogSegment() 105 | */ 106 | message FinalizeLogSegmentRequestProto { 107 | required RequestInfoProto reqInfo = 1; 108 | required uint64 startTxId = 2; 109 | required uint64 endTxId = 3; 110 | } 111 | 112 | message FinalizeLogSegmentResponseProto { 113 | } 114 | 115 | /** 116 | * purgeLogs() 117 | */ 118 | message PurgeLogsRequestProto { 119 | required RequestInfoProto reqInfo = 1; 120 | required uint64 minTxIdToKeep = 2; 121 | } 122 | 123 | message PurgeLogsResponseProto { 124 | } 125 | 126 | /** 127 | * isFormatted() 128 | */ 129 | message IsFormattedRequestProto { 130 | required JournalIdProto jid = 1; 131 | } 132 | 133 | message IsFormattedResponseProto { 134 | required bool isFormatted = 1; 135 | } 136 | 137 | /** 138 | * discardSegments() 139 | */ 140 | message DiscardSegmentsRequestProto { 141 | required JournalIdProto jid = 1; 142 | required uint64 startTxId = 2; 143 | } 144 | 145 | message DiscardSegmentsResponseProto { 146 | } 147 | 148 | /** 149 | * getJournalCTime() 150 | */ 151 | message GetJournalCTimeRequestProto { 152 | required JournalIdProto jid = 1; 153 | } 154 | 155 | message GetJournalCTimeResponseProto { 156 | required int64 resultCTime = 1; 157 | } 158 | 159 | /** 160 | * doPreUpgrade() 161 | */ 162 | message DoPreUpgradeRequestProto { 163 | required JournalIdProto jid = 1; 164 | } 165 | 166 | message DoPreUpgradeResponseProto { 167 | } 168 | 169 | /** 170 | * doUpgrade() 171 | */ 172 | message DoUpgradeRequestProto { 173 | required JournalIdProto jid = 1; 174 | required StorageInfoProto sInfo = 2; 175 | } 176 | 177 | message DoUpgradeResponseProto { 178 | } 179 | 180 | /** 181 | * doFinalize() 182 | */ 183 | message DoFinalizeRequestProto { 184 | required JournalIdProto jid = 1; 185 | } 186 | 187 | message DoFinalizeResponseProto { 188 | } 189 | 190 | /** 191 | * canRollBack() 192 | */ 193 | message CanRollBackRequestProto { 194 | required JournalIdProto jid = 1; 195 | required StorageInfoProto storage = 2; 196 | required StorageInfoProto prevStorage = 3; 197 | required int32 targetLayoutVersion = 4; 198 | } 199 | 200 | message CanRollBackResponseProto { 201 | required bool canRollBack = 1; 202 | } 203 | 204 | /** 205 | * doRollback() 206 | */ 207 | message DoRollbackRequestProto { 208 | required JournalIdProto jid = 1; 209 | } 210 | 211 | message DoRollbackResponseProto { 212 | } 213 | 214 | /** 215 | * getJournalState() 216 | */ 217 | message GetJournalStateRequestProto { 218 | required JournalIdProto jid = 1; 219 | } 220 | 221 | message GetJournalStateResponseProto { 222 | required uint64 lastPromisedEpoch = 1; 223 | // Deprecated by fromURL 224 | required uint32 httpPort = 2; 225 | optional string fromURL = 3; 226 | } 227 | 228 | /** 229 | * format() 230 | */ 231 | message FormatRequestProto { 232 | required JournalIdProto jid = 1; 233 | required NamespaceInfoProto nsInfo = 2; 234 | } 235 | 236 | message FormatResponseProto { 237 | } 238 | 239 | /** 240 | * newEpoch() 241 | */ 242 | message NewEpochRequestProto { 243 | required JournalIdProto jid = 1; 244 | required NamespaceInfoProto nsInfo = 2; 245 | required uint64 epoch = 3; 246 | } 247 | 248 | message NewEpochResponseProto { 249 | optional uint64 lastSegmentTxId = 1; 250 | } 251 | 252 | /** 253 | * getEditLogManifest() 254 | */ 255 | message GetEditLogManifestRequestProto { 256 | required JournalIdProto jid = 1; 257 | required uint64 sinceTxId = 2; // Transaction ID 258 | // Whether or not the client will be reading from the returned streams. 259 | // optional bool forReading = 3 [default = true]; 260 | optional bool inProgressOk = 4 [default = false]; 261 | } 262 | 263 | message GetEditLogManifestResponseProto { 264 | required RemoteEditLogManifestProto manifest = 1; 265 | // Deprecated by fromURL 266 | required uint32 httpPort = 2; 267 | optional string fromURL = 3; 268 | 269 | // TODO: we should add nsinfo somewhere 270 | // to verify that it matches up with our expectation 271 | // required NamespaceInfoProto nsInfo = 2; 272 | } 273 | 274 | /** 275 | * prepareRecovery() 276 | */ 277 | message PrepareRecoveryRequestProto { 278 | required RequestInfoProto reqInfo = 1; 279 | required uint64 segmentTxId = 2; 280 | } 281 | 282 | message PrepareRecoveryResponseProto { 283 | optional SegmentStateProto segmentState = 1; 284 | optional uint64 acceptedInEpoch = 2; 285 | required uint64 lastWriterEpoch = 3; 286 | 287 | // The highest committed txid that this logger has ever seen. 288 | // This may be higher than the data it actually has, in the case 289 | // that it was lagging before the old writer crashed. 290 | optional uint64 lastCommittedTxId = 4; 291 | } 292 | 293 | /** 294 | * acceptRecovery() 295 | */ 296 | message AcceptRecoveryRequestProto { 297 | required RequestInfoProto reqInfo = 1; 298 | 299 | /** Details on the segment to recover */ 300 | required SegmentStateProto stateToAccept = 2; 301 | 302 | /** The URL from which the log may be copied */ 303 | required string fromURL = 3; 304 | } 305 | 306 | message AcceptRecoveryResponseProto { 307 | } 308 | 309 | 310 | /** 311 | * Protocol used to journal edits to a JournalNode. 312 | * See the request and response for details of rpc call. 313 | */ 314 | service QJournalProtocolService { 315 | rpc isFormatted(IsFormattedRequestProto) returns (IsFormattedResponseProto); 316 | 317 | rpc discardSegments(DiscardSegmentsRequestProto) returns (DiscardSegmentsResponseProto); 318 | 319 | rpc getJournalCTime(GetJournalCTimeRequestProto) returns (GetJournalCTimeResponseProto); 320 | 321 | rpc doPreUpgrade(DoPreUpgradeRequestProto) returns (DoPreUpgradeResponseProto); 322 | 323 | rpc doUpgrade(DoUpgradeRequestProto) returns (DoUpgradeResponseProto); 324 | 325 | rpc doFinalize(DoFinalizeRequestProto) returns (DoFinalizeResponseProto); 326 | 327 | rpc canRollBack(CanRollBackRequestProto) returns (CanRollBackResponseProto); 328 | 329 | rpc doRollback(DoRollbackRequestProto) returns (DoRollbackResponseProto); 330 | 331 | rpc getJournalState(GetJournalStateRequestProto) returns (GetJournalStateResponseProto); 332 | 333 | rpc newEpoch(NewEpochRequestProto) returns (NewEpochResponseProto); 334 | 335 | rpc format(FormatRequestProto) returns (FormatResponseProto); 336 | 337 | rpc journal(JournalRequestProto) returns (JournalResponseProto); 338 | 339 | rpc heartbeat(HeartbeatRequestProto) returns (HeartbeatResponseProto); 340 | 341 | rpc startLogSegment(StartLogSegmentRequestProto) 342 | returns (StartLogSegmentResponseProto); 343 | 344 | rpc finalizeLogSegment(FinalizeLogSegmentRequestProto) 345 | returns (FinalizeLogSegmentResponseProto); 346 | 347 | rpc purgeLogs(PurgeLogsRequestProto) 348 | returns (PurgeLogsResponseProto); 349 | 350 | rpc getEditLogManifest(GetEditLogManifestRequestProto) 351 | returns (GetEditLogManifestResponseProto); 352 | 353 | rpc prepareRecovery(PrepareRecoveryRequestProto) 354 | returns (PrepareRecoveryResponseProto); 355 | 356 | rpc acceptRecovery(AcceptRecoveryRequestProto) 357 | returns (AcceptRecoveryResponseProto); 358 | } 359 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/acl.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 20 | option java_outer_classname = "AclProtos"; 21 | option java_generate_equals_and_hash = true; 22 | package hadoop.hdfs; 23 | 24 | import "hdfs.proto"; 25 | 26 | message AclEntryProto { 27 | enum AclEntryScopeProto { 28 | ACCESS = 0x0; 29 | DEFAULT = 0x1; 30 | } 31 | 32 | enum AclEntryTypeProto { 33 | USER = 0x0; 34 | GROUP = 0x1; 35 | MASK = 0x2; 36 | OTHER = 0x3; 37 | } 38 | 39 | enum FsActionProto { 40 | NONE = 0x0; 41 | EXECUTE = 0x1; 42 | WRITE = 0x2; 43 | WRITE_EXECUTE = 0x3; 44 | READ = 0x4; 45 | READ_EXECUTE = 0x5; 46 | READ_WRITE = 0x6; 47 | PERM_ALL = 0x7; 48 | } 49 | 50 | required AclEntryTypeProto type = 1; 51 | required AclEntryScopeProto scope = 2; 52 | required FsActionProto permissions = 3; 53 | optional string name = 4; 54 | } 55 | 56 | message AclStatusProto { 57 | required string owner = 1; 58 | required string group = 2; 59 | required bool sticky = 3; 60 | repeated AclEntryProto entries = 4; 61 | } 62 | 63 | message AclEditLogProto { 64 | required string src = 1; 65 | repeated AclEntryProto entries = 2; 66 | } 67 | 68 | message ModifyAclEntriesRequestProto { 69 | required string src = 1; 70 | repeated AclEntryProto aclSpec = 2; 71 | } 72 | 73 | message ModifyAclEntriesResponseProto { 74 | } 75 | 76 | message RemoveAclRequestProto { 77 | required string src = 1; 78 | } 79 | 80 | message RemoveAclResponseProto { 81 | } 82 | 83 | message RemoveAclEntriesRequestProto { 84 | required string src = 1; 85 | repeated AclEntryProto aclSpec = 2; 86 | } 87 | 88 | message RemoveAclEntriesResponseProto { 89 | } 90 | 91 | message RemoveDefaultAclRequestProto { 92 | required string src = 1; 93 | } 94 | 95 | message RemoveDefaultAclResponseProto { 96 | } 97 | 98 | message SetAclRequestProto { 99 | required string src = 1; 100 | repeated AclEntryProto aclSpec = 2; 101 | } 102 | 103 | message SetAclResponseProto { 104 | } 105 | 106 | message GetAclStatusRequestProto { 107 | required string src = 1; 108 | } 109 | 110 | message GetAclStatusResponseProto { 111 | required AclStatusProto result = 1; 112 | } 113 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/datatransfer.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers that are used to transfer data 26 | // to and from the datanode, as well as between datanodes. 27 | 28 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 29 | option java_outer_classname = "DataTransferProtos"; 30 | option java_generate_equals_and_hash = true; 31 | package hadoop.hdfs; 32 | 33 | import "Security.proto"; 34 | import "hdfs.proto"; 35 | 36 | message DataTransferEncryptorMessageProto { 37 | enum DataTransferEncryptorStatus { 38 | SUCCESS = 0; 39 | ERROR_UNKNOWN_KEY = 1; 40 | ERROR = 2; 41 | } 42 | required DataTransferEncryptorStatus status = 1; 43 | optional bytes payload = 2; 44 | optional string message = 3; 45 | repeated CipherOptionProto cipherOption = 4; 46 | } 47 | 48 | message BaseHeaderProto { 49 | required ExtendedBlockProto block = 1; 50 | optional hadoop.common.TokenProto token = 2; 51 | optional DataTransferTraceInfoProto traceInfo = 3; 52 | } 53 | 54 | message DataTransferTraceInfoProto { 55 | required uint64 traceId = 1; 56 | required uint64 parentId = 2; 57 | } 58 | 59 | message ClientOperationHeaderProto { 60 | required BaseHeaderProto baseHeader = 1; 61 | required string clientName = 2; 62 | } 63 | 64 | message CachingStrategyProto { 65 | optional bool dropBehind = 1; 66 | optional int64 readahead = 2; 67 | } 68 | 69 | message OpReadBlockProto { 70 | required ClientOperationHeaderProto header = 1; 71 | required uint64 offset = 2; 72 | required uint64 len = 3; 73 | optional bool sendChecksums = 4 [default = true]; 74 | optional CachingStrategyProto cachingStrategy = 5; 75 | } 76 | 77 | 78 | message ChecksumProto { 79 | required ChecksumTypeProto type = 1; 80 | required uint32 bytesPerChecksum = 2; 81 | } 82 | 83 | message OpWriteBlockProto { 84 | required ClientOperationHeaderProto header = 1; 85 | repeated DatanodeInfoProto targets = 2; 86 | optional DatanodeInfoProto source = 3; 87 | enum BlockConstructionStage { 88 | PIPELINE_SETUP_APPEND = 0; 89 | // pipeline set up for failed PIPELINE_SETUP_APPEND recovery 90 | PIPELINE_SETUP_APPEND_RECOVERY = 1; 91 | // data streaming 92 | DATA_STREAMING = 2; 93 | // pipeline setup for failed data streaming recovery 94 | PIPELINE_SETUP_STREAMING_RECOVERY = 3; 95 | // close the block and pipeline 96 | PIPELINE_CLOSE = 4; 97 | // Recover a failed PIPELINE_CLOSE 98 | PIPELINE_CLOSE_RECOVERY = 5; 99 | // pipeline set up for block creation 100 | PIPELINE_SETUP_CREATE = 6; 101 | // transfer RBW for adding datanodes 102 | TRANSFER_RBW = 7; 103 | // transfer Finalized for adding datanodes 104 | TRANSFER_FINALIZED = 8; 105 | } 106 | required BlockConstructionStage stage = 4; 107 | required uint32 pipelineSize = 5; 108 | required uint64 minBytesRcvd = 6; 109 | required uint64 maxBytesRcvd = 7; 110 | required uint64 latestGenerationStamp = 8; 111 | 112 | /** 113 | * The requested checksum mechanism for this block write. 114 | */ 115 | required ChecksumProto requestedChecksum = 9; 116 | optional CachingStrategyProto cachingStrategy = 10; 117 | optional StorageTypeProto storageType = 11 [default = DISK]; 118 | repeated StorageTypeProto targetStorageTypes = 12; 119 | 120 | /** 121 | * Hint to the DataNode that the block can be allocated on transient 122 | * storage i.e. memory and written to disk lazily. The DataNode is free 123 | * to ignore this hint. 124 | */ 125 | optional bool allowLazyPersist = 13 [default = false]; 126 | } 127 | 128 | message OpTransferBlockProto { 129 | required ClientOperationHeaderProto header = 1; 130 | repeated DatanodeInfoProto targets = 2; 131 | repeated StorageTypeProto targetStorageTypes = 3; 132 | } 133 | 134 | message OpReplaceBlockProto { 135 | required BaseHeaderProto header = 1; 136 | required string delHint = 2; 137 | required DatanodeInfoProto source = 3; 138 | optional StorageTypeProto storageType = 4 [default = DISK]; 139 | } 140 | 141 | message OpCopyBlockProto { 142 | required BaseHeaderProto header = 1; 143 | } 144 | 145 | message OpBlockChecksumProto { 146 | required BaseHeaderProto header = 1; 147 | } 148 | 149 | /** 150 | * An ID uniquely identifying a shared memory segment. 151 | */ 152 | message ShortCircuitShmIdProto { 153 | required int64 hi = 1; 154 | required int64 lo = 2; 155 | } 156 | 157 | /** 158 | * An ID uniquely identifying a slot within a shared memory segment. 159 | */ 160 | message ShortCircuitShmSlotProto { 161 | required ShortCircuitShmIdProto shmId = 1; 162 | required int32 slotIdx = 2; 163 | } 164 | 165 | message OpRequestShortCircuitAccessProto { 166 | required BaseHeaderProto header = 1; 167 | 168 | /** In order to get short-circuit access to block data, clients must set this 169 | * to the highest version of the block data that they can understand. 170 | * Currently 1 is the only version, but more versions may exist in the future 171 | * if the on-disk format changes. 172 | */ 173 | required uint32 maxVersion = 2; 174 | 175 | /** 176 | * The shared memory slot to use, if we are using one. 177 | */ 178 | optional ShortCircuitShmSlotProto slotId = 3; 179 | } 180 | 181 | message ReleaseShortCircuitAccessRequestProto { 182 | required ShortCircuitShmSlotProto slotId = 1; 183 | optional DataTransferTraceInfoProto traceInfo = 2; 184 | } 185 | 186 | message ReleaseShortCircuitAccessResponseProto { 187 | required Status status = 1; 188 | optional string error = 2; 189 | } 190 | 191 | message ShortCircuitShmRequestProto { 192 | // The name of the client requesting the shared memory segment. This is 193 | // purely for logging / debugging purposes. 194 | required string clientName = 1; 195 | optional DataTransferTraceInfoProto traceInfo = 2; 196 | } 197 | 198 | message ShortCircuitShmResponseProto { 199 | required Status status = 1; 200 | optional string error = 2; 201 | optional ShortCircuitShmIdProto id = 3; 202 | } 203 | 204 | message PacketHeaderProto { 205 | // All fields must be fixed-length! 206 | required sfixed64 offsetInBlock = 1; 207 | required sfixed64 seqno = 2; 208 | required bool lastPacketInBlock = 3; 209 | required sfixed32 dataLen = 4; 210 | optional bool syncBlock = 5 [default = false]; 211 | } 212 | 213 | enum Status { 214 | SUCCESS = 0; 215 | ERROR = 1; 216 | ERROR_CHECKSUM = 2; 217 | ERROR_INVALID = 3; 218 | ERROR_EXISTS = 4; 219 | ERROR_ACCESS_TOKEN = 5; 220 | CHECKSUM_OK = 6; 221 | ERROR_UNSUPPORTED = 7; 222 | OOB_RESTART = 8; // Quick restart 223 | OOB_RESERVED1 = 9; // Reserved 224 | OOB_RESERVED2 = 10; // Reserved 225 | OOB_RESERVED3 = 11; // Reserved 226 | IN_PROGRESS = 12; 227 | } 228 | 229 | message PipelineAckProto { 230 | required sint64 seqno = 1; 231 | repeated Status status = 2; 232 | optional uint64 downstreamAckTimeNanos = 3 [default = 0]; 233 | } 234 | 235 | /** 236 | * Sent as part of the BlockOpResponseProto 237 | * for READ_BLOCK and COPY_BLOCK operations. 238 | */ 239 | message ReadOpChecksumInfoProto { 240 | required ChecksumProto checksum = 1; 241 | 242 | /** 243 | * The offset into the block at which the first packet 244 | * will start. This is necessary since reads will align 245 | * backwards to a checksum chunk boundary. 246 | */ 247 | required uint64 chunkOffset = 2; 248 | } 249 | 250 | message BlockOpResponseProto { 251 | required Status status = 1; 252 | 253 | optional string firstBadLink = 2; 254 | optional OpBlockChecksumResponseProto checksumResponse = 3; 255 | optional ReadOpChecksumInfoProto readOpChecksumInfo = 4; 256 | 257 | /** explanatory text which may be useful to log on the client side */ 258 | optional string message = 5; 259 | 260 | /** If the server chooses to agree to the request of a client for 261 | * short-circuit access, it will send a response message with the relevant 262 | * file descriptors attached. 263 | * 264 | * In the body of the message, this version number will be set to the 265 | * specific version number of the block data that the client is about to 266 | * read. 267 | */ 268 | optional uint32 shortCircuitAccessVersion = 6; 269 | } 270 | 271 | /** 272 | * Message sent from the client to the DN after reading the entire 273 | * read request. 274 | */ 275 | message ClientReadStatusProto { 276 | required Status status = 1; 277 | } 278 | 279 | message DNTransferAckProto { 280 | required Status status = 1; 281 | } 282 | 283 | message OpBlockChecksumResponseProto { 284 | required uint32 bytesPerCrc = 1; 285 | required uint64 crcPerBlock = 2; 286 | required bytes md5 = 3; 287 | optional ChecksumTypeProto crcType = 4; 288 | } 289 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/encryption.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers that are used throughout HDFS -- i.e. 26 | // by the client, server, and data transfer protocols. 27 | 28 | 29 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 30 | option java_outer_classname = "EncryptionZonesProtos"; 31 | option java_generate_equals_and_hash = true; 32 | package hadoop.hdfs; 33 | 34 | import "hdfs.proto"; 35 | 36 | message CreateEncryptionZoneRequestProto { 37 | required string src = 1; 38 | optional string keyName = 2; 39 | } 40 | 41 | message CreateEncryptionZoneResponseProto { 42 | } 43 | 44 | message ListEncryptionZonesRequestProto { 45 | required int64 id = 1; 46 | } 47 | 48 | message EncryptionZoneProto { 49 | required int64 id = 1; 50 | required string path = 2; 51 | required CipherSuiteProto suite = 3; 52 | required CryptoProtocolVersionProto cryptoProtocolVersion = 4; 53 | required string keyName = 5; 54 | } 55 | 56 | message ListEncryptionZonesResponseProto { 57 | repeated EncryptionZoneProto zones = 1; 58 | required bool hasMore = 2; 59 | } 60 | 61 | message GetEZForPathRequestProto { 62 | required string src = 1; 63 | } 64 | 65 | message GetEZForPathResponseProto { 66 | optional EncryptionZoneProto zone = 1; 67 | } 68 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/fsimage.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | option java_package = "org.apache.hadoop.hdfs.server.namenode"; 20 | option java_outer_classname = "FsImageProto"; 21 | 22 | package hadoop.hdfs.fsimage; 23 | 24 | import "hdfs.proto"; 25 | import "acl.proto"; 26 | import "xattr.proto"; 27 | 28 | /** 29 | * This file defines the on-disk layout of the file system image. The 30 | * layout is defined by the following EBNF grammar, in which angle 31 | * brackets mark protobuf definitions. (e.g., ) 32 | * 33 | * FILE := MAGIC SECTION* FileSummaryLength 34 | * MAGIC := 'HDFSIMG1' 35 | * SECTION := | ... 36 | * FileSummaryLength := 4 byte int 37 | * 38 | * Some notes: 39 | * 40 | * The codec field in FileSummary describes the compression codec used 41 | * for all sections. The fileheader is always uncompressed. 42 | * 43 | * All protobuf messages are serialized in delimited form, which means 44 | * that there always will be an integer indicates the size of the 45 | * protobuf message. 46 | * 47 | */ 48 | 49 | message FileSummary { 50 | // The version of the above EBNF grammars. 51 | required uint32 ondiskVersion = 1; 52 | // layoutVersion describes which features are available in the 53 | // FSImage. 54 | required uint32 layoutVersion = 2; 55 | optional string codec = 3; 56 | // index for each section 57 | message Section { 58 | optional string name = 1; 59 | optional uint64 length = 2; 60 | optional uint64 offset = 3; 61 | } 62 | repeated Section sections = 4; 63 | } 64 | 65 | /** 66 | * Name: NS_INFO 67 | */ 68 | message NameSystemSection { 69 | optional uint32 namespaceId = 1; 70 | optional uint64 genstampV1 = 2; 71 | optional uint64 genstampV2 = 3; 72 | optional uint64 genstampV1Limit = 4; 73 | optional uint64 lastAllocatedBlockId = 5; 74 | optional uint64 transactionId = 6; 75 | optional uint64 rollingUpgradeStartTime = 7; 76 | } 77 | 78 | /** 79 | * Permission is serialized as a 64-bit long. [0:24):[25:48):[48:64) (in Big Endian). 80 | * The first and the second parts are the string ids of the user and 81 | * group name, and the last 16 bits are the permission bits. 82 | * 83 | * Name: INODE 84 | */ 85 | message INodeSection { 86 | /** 87 | * under-construction feature for INodeFile 88 | */ 89 | message FileUnderConstructionFeature { 90 | optional string clientName = 1; 91 | optional string clientMachine = 2; 92 | } 93 | 94 | message AclFeatureProto { 95 | /** 96 | * An ACL entry is represented by a 32-bit integer in Big Endian 97 | * format. The bits can be divided in four segments: 98 | * [0:2) || [2:26) || [26:27) || [27:29) || [29:32) 99 | * 100 | * [0:2) -- reserved for futute uses. 101 | * [2:26) -- the name of the entry, which is an ID that points to a 102 | * string in the StringTableSection. 103 | * [26:27) -- the scope of the entry (AclEntryScopeProto) 104 | * [27:29) -- the type of the entry (AclEntryTypeProto) 105 | * [29:32) -- the permission of the entry (FsActionProto) 106 | * 107 | */ 108 | repeated fixed32 entries = 2 [packed = true]; 109 | } 110 | 111 | message XAttrCompactProto { 112 | /** 113 | * 114 | * [0:2) -- the namespace of XAttr (XAttrNamespaceProto) 115 | * [2:26) -- the name of the entry, which is an ID that points to a 116 | * string in the StringTableSection. 117 | * [26:27) -- namespace extension. Originally there were only 4 namespaces 118 | * so only 2 bits were needed. At that time, this bit was reserved. When a 119 | * 5th namespace was created (raw) this bit became used as a 3rd namespace 120 | * bit. 121 | * [27:32) -- reserved for future uses. 122 | */ 123 | required fixed32 name = 1; 124 | optional bytes value = 2; 125 | } 126 | 127 | message XAttrFeatureProto { 128 | repeated XAttrCompactProto xAttrs = 1; 129 | } 130 | 131 | message INodeFile { 132 | optional uint32 replication = 1; 133 | optional uint64 modificationTime = 2; 134 | optional uint64 accessTime = 3; 135 | optional uint64 preferredBlockSize = 4; 136 | optional fixed64 permission = 5; 137 | repeated BlockProto blocks = 6; 138 | optional FileUnderConstructionFeature fileUC = 7; 139 | optional AclFeatureProto acl = 8; 140 | optional XAttrFeatureProto xAttrs = 9; 141 | optional uint32 storagePolicyID = 10; 142 | } 143 | 144 | message INodeDirectory { 145 | optional uint64 modificationTime = 1; 146 | // namespace quota 147 | optional uint64 nsQuota = 2; 148 | // diskspace quota 149 | optional uint64 dsQuota = 3; 150 | optional fixed64 permission = 4; 151 | optional AclFeatureProto acl = 5; 152 | optional XAttrFeatureProto xAttrs = 6; 153 | } 154 | 155 | message INodeSymlink { 156 | optional fixed64 permission = 1; 157 | optional bytes target = 2; 158 | optional uint64 modificationTime = 3; 159 | optional uint64 accessTime = 4; 160 | } 161 | 162 | message INode { 163 | enum Type { 164 | FILE = 1; 165 | DIRECTORY = 2; 166 | SYMLINK = 3; 167 | }; 168 | required Type type = 1; 169 | required uint64 id = 2; 170 | optional bytes name = 3; 171 | 172 | optional INodeFile file = 4; 173 | optional INodeDirectory directory = 5; 174 | optional INodeSymlink symlink = 6; 175 | } 176 | 177 | optional uint64 lastInodeId = 1; 178 | optional uint64 numInodes = 2; 179 | // repeated INodes.. 180 | } 181 | 182 | /** 183 | * This section records information about under-construction files for 184 | * reconstructing the lease map. 185 | * NAME: FILES_UNDERCONSTRUCTION 186 | */ 187 | message FilesUnderConstructionSection { 188 | message FileUnderConstructionEntry { 189 | optional uint64 inodeId = 1; 190 | optional string fullPath = 2; 191 | } 192 | // repeated FileUnderConstructionEntry... 193 | } 194 | 195 | /** 196 | * This section records the children of each directories 197 | * NAME: INODE_DIR 198 | */ 199 | message INodeDirectorySection { 200 | /** 201 | * A single DirEntry needs to fit in the default PB max message size of 202 | * 64MB. Please be careful when adding more fields to a DirEntry! 203 | */ 204 | message DirEntry { 205 | optional uint64 parent = 1; 206 | // children that are not reference nodes 207 | repeated uint64 children = 2 [packed = true]; 208 | // children that are reference nodes, each element is a reference node id 209 | repeated uint32 refChildren = 3 [packed = true]; 210 | } 211 | // repeated DirEntry, ended at the boundary of the section. 212 | } 213 | 214 | message INodeReferenceSection { 215 | message INodeReference { 216 | // id of the referred inode 217 | optional uint64 referredId = 1; 218 | // local name recorded in WithName 219 | optional bytes name = 2; 220 | // recorded in DstReference 221 | optional uint32 dstSnapshotId = 3; 222 | // recorded in WithName 223 | optional uint32 lastSnapshotId = 4; 224 | } 225 | // repeated INodeReference... 226 | } 227 | 228 | /** 229 | * This section records the information about snapshot 230 | * NAME: SNAPSHOT 231 | */ 232 | message SnapshotSection { 233 | message Snapshot { 234 | optional uint32 snapshotId = 1; 235 | // Snapshot root 236 | optional INodeSection.INode root = 2; 237 | } 238 | 239 | optional uint32 snapshotCounter = 1; 240 | repeated uint64 snapshottableDir = 2 [packed = true]; 241 | // total number of snapshots 242 | optional uint32 numSnapshots = 3; 243 | // repeated Snapshot... 244 | } 245 | 246 | /** 247 | * This section records information about snapshot diffs 248 | * NAME: SNAPSHOT_DIFF 249 | */ 250 | message SnapshotDiffSection { 251 | message CreatedListEntry { 252 | optional bytes name = 1; 253 | } 254 | 255 | message DirectoryDiff { 256 | optional uint32 snapshotId = 1; 257 | optional uint32 childrenSize = 2; 258 | optional bool isSnapshotRoot = 3; 259 | optional bytes name = 4; 260 | optional INodeSection.INodeDirectory snapshotCopy = 5; 261 | optional uint32 createdListSize = 6; 262 | repeated uint64 deletedINode = 7 [packed = true]; // id of deleted inodes 263 | // id of reference nodes in the deleted list 264 | repeated uint32 deletedINodeRef = 8 [packed = true]; 265 | // repeated CreatedListEntry (size is specified by createdListSize) 266 | } 267 | 268 | message FileDiff { 269 | optional uint32 snapshotId = 1; 270 | optional uint64 fileSize = 2; 271 | optional bytes name = 3; 272 | optional INodeSection.INodeFile snapshotCopy = 4; 273 | } 274 | 275 | message DiffEntry { 276 | enum Type { 277 | FILEDIFF = 1; 278 | DIRECTORYDIFF = 2; 279 | } 280 | required Type type = 1; 281 | optional uint64 inodeId = 2; 282 | optional uint32 numOfDiff = 3; 283 | 284 | // repeated DirectoryDiff or FileDiff 285 | } 286 | 287 | // repeated DiffEntry 288 | } 289 | 290 | /** 291 | * This section maps string to id 292 | * NAME: STRING_TABLE 293 | */ 294 | message StringTableSection { 295 | message Entry { 296 | optional uint32 id = 1; 297 | optional string str = 2; 298 | } 299 | optional uint32 numEntry = 1; 300 | // repeated Entry 301 | } 302 | 303 | message SecretManagerSection { 304 | message DelegationKey { 305 | optional uint32 id = 1; 306 | optional uint64 expiryDate = 2; 307 | optional bytes key = 3; 308 | } 309 | message PersistToken { 310 | optional uint32 version = 1; 311 | optional string owner = 2; 312 | optional string renewer = 3; 313 | optional string realUser = 4; 314 | optional uint64 issueDate = 5; 315 | optional uint64 maxDate = 6; 316 | optional uint32 sequenceNumber = 7; 317 | optional uint32 masterKeyId = 8; 318 | optional uint64 expiryDate = 9; 319 | } 320 | optional uint32 currentId = 1; 321 | optional uint32 tokenSequenceNumber = 2; 322 | optional uint32 numKeys = 3; 323 | optional uint32 numTokens = 4; 324 | // repeated DelegationKey keys 325 | // repeated PersistToken tokens 326 | } 327 | 328 | message CacheManagerSection { 329 | required uint64 nextDirectiveId = 1; 330 | required uint32 numPools = 2; 331 | required uint32 numDirectives = 3; 332 | // repeated CachePoolInfoProto pools 333 | // repeated CacheDirectiveInfoProto directives 334 | } 335 | -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/inotify.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | // This file contains protocol buffers used to communicate edits to clients 26 | // as part of the inotify system. 27 | 28 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 29 | option java_outer_classname = "InotifyProtos"; 30 | option java_generate_equals_and_hash = true; 31 | package hadoop.hdfs; 32 | 33 | import "acl.proto"; 34 | import "xattr.proto"; 35 | import "hdfs.proto"; 36 | 37 | enum EventType { 38 | EVENT_CREATE = 0x0; 39 | EVENT_CLOSE = 0x1; 40 | EVENT_APPEND = 0x2; 41 | EVENT_RENAME = 0x3; 42 | EVENT_METADATA = 0x4; 43 | EVENT_UNLINK = 0x5; 44 | } 45 | 46 | message EventProto { 47 | required EventType type = 1; 48 | required bytes contents = 2; 49 | } 50 | 51 | enum INodeType { 52 | I_TYPE_FILE = 0x0; 53 | I_TYPE_DIRECTORY = 0x1; 54 | I_TYPE_SYMLINK = 0x2; 55 | } 56 | 57 | enum MetadataUpdateType { 58 | META_TYPE_TIMES = 0x0; 59 | META_TYPE_REPLICATION = 0x1; 60 | META_TYPE_OWNER = 0x2; 61 | META_TYPE_PERMS = 0x3; 62 | META_TYPE_ACLS = 0x4; 63 | META_TYPE_XATTRS = 0x5; 64 | } 65 | 66 | message CreateEventProto { 67 | required INodeType type = 1; 68 | required string path = 2; 69 | required int64 ctime = 3; 70 | required string ownerName = 4; 71 | required string groupName = 5; 72 | required FsPermissionProto perms = 6; 73 | optional int32 replication = 7; 74 | optional string symlinkTarget = 8; 75 | optional bool overwrite = 9; 76 | } 77 | 78 | message CloseEventProto { 79 | required string path = 1; 80 | required int64 fileSize = 2; 81 | required int64 timestamp = 3; 82 | } 83 | 84 | message AppendEventProto { 85 | required string path = 1; 86 | } 87 | 88 | message RenameEventProto { 89 | required string srcPath = 1; 90 | required string destPath = 2; 91 | required int64 timestamp = 3; 92 | } 93 | 94 | message MetadataUpdateEventProto { 95 | required string path = 1; 96 | required MetadataUpdateType type = 2; 97 | optional int64 mtime = 3; 98 | optional int64 atime = 4; 99 | optional int32 replication = 5; 100 | optional string ownerName = 6; 101 | optional string groupName = 7; 102 | optional FsPermissionProto perms = 8; 103 | repeated AclEntryProto acls = 9; 104 | repeated XAttrProto xAttrs = 10; 105 | optional bool xAttrsRemoved = 11; 106 | } 107 | 108 | message UnlinkEventProto { 109 | required string path = 1; 110 | required int64 timestamp = 2; 111 | } 112 | 113 | message EventsListProto { 114 | repeated EventProto events = 1; 115 | required int64 firstTxid = 2; 116 | required int64 lastTxid = 3; 117 | required int64 syncTxid = 4; 118 | } -------------------------------------------------------------------------------- /protocolpb/proto/hdfs/xattr.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 20 | option java_outer_classname = "XAttrProtos"; 21 | option java_generate_equals_and_hash = true; 22 | package hadoop.hdfs; 23 | 24 | message XAttrProto { 25 | enum XAttrNamespaceProto { 26 | USER = 0; 27 | TRUSTED = 1; 28 | SECURITY = 2; 29 | SYSTEM = 3; 30 | RAW = 4; 31 | } 32 | 33 | required XAttrNamespaceProto namespace = 1; 34 | required string name = 2; 35 | optional bytes value = 3; 36 | } 37 | 38 | message XAttrEditLogProto { 39 | optional string src = 1; 40 | repeated XAttrProto xAttrs = 2; 41 | } 42 | 43 | enum XAttrSetFlagProto { 44 | XATTR_CREATE = 0x01; 45 | XATTR_REPLACE = 0x02; 46 | } 47 | 48 | message SetXAttrRequestProto { 49 | required string src = 1; 50 | optional XAttrProto xAttr = 2; 51 | optional uint32 flag = 3; //bits set using XAttrSetFlagProto 52 | } 53 | 54 | message SetXAttrResponseProto { 55 | } 56 | 57 | message GetXAttrsRequestProto { 58 | required string src = 1; 59 | repeated XAttrProto xAttrs = 2; 60 | } 61 | 62 | message GetXAttrsResponseProto { 63 | repeated XAttrProto xAttrs = 1; 64 | } 65 | 66 | message ListXAttrsRequestProto { 67 | required string src = 1; 68 | } 69 | 70 | message ListXAttrsResponseProto { 71 | repeated XAttrProto xAttrs = 1; 72 | } 73 | 74 | message RemoveXAttrRequestProto { 75 | required string src = 1; 76 | optional XAttrProto xAttr = 2; 77 | } 78 | 79 | message RemoveXAttrResponseProto { 80 | } 81 | -------------------------------------------------------------------------------- /protocolpb/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate protobuf; 2 | //extern crate grpc; 3 | //extern crate tls_api; 4 | 5 | pub mod proto; 6 | 7 | /* 8 | #[cfg(test)] 9 | mod tests { 10 | #[test] 11 | fn r0() { 12 | use proto::ClientNamenodeProtocol_grpc::ClientNamenodeProtocol; 13 | use grpc; 14 | use proto; 15 | 16 | let client = proto::ClientNamenodeProtocol_grpc::ClientNamenodeProtocolClient::new_plain("localhost", 8020, Default::default()).unwrap(); 17 | 18 | //9964 2018-01-07 00:23 /tmp/cm_api.py 19 | let mut req/*: ClientNamenodeProtocol::GetBlockLocationsRequestProto*/ = 20 | proto::ClientNamenodeProtocol::GetBlockLocationsRequestProto::new(); 21 | req.set_src("/tmp/cm_api.py".to_owned()); 22 | req.set_offset(0); 23 | req.set_length(9964); 24 | 25 | let resp = client.get_block_locations(grpc::RequestOptions::new(), req); 26 | 27 | println!("RESULT: {:?}", resp.wait()); 28 | } 29 | } 30 | */ -------------------------------------------------------------------------------- /protocolpb/src/proto/hadoop/RefreshAuthorizationPolicyProtocol.rs: -------------------------------------------------------------------------------- 1 | // This file is generated. Do not edit 2 | // @generated 3 | 4 | // https://github.com/Manishearth/rust-clippy/issues/702 5 | #![allow(unknown_lints)] 6 | #![allow(clippy)] 7 | 8 | #![cfg_attr(rustfmt, rustfmt_skip)] 9 | 10 | #![allow(box_pointers)] 11 | #![allow(dead_code)] 12 | #![allow(missing_docs)] 13 | #![allow(non_camel_case_types)] 14 | #![allow(non_snake_case)] 15 | #![allow(non_upper_case_globals)] 16 | #![allow(trivial_casts)] 17 | #![allow(unsafe_code)] 18 | #![allow(unused_imports)] 19 | #![allow(unused_results)] 20 | 21 | use protobuf::Message as Message_imported_for_functions; 22 | use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions; 23 | 24 | #[derive(PartialEq,Clone,Default)] 25 | pub struct RefreshServiceAclRequestProto { 26 | // special fields 27 | unknown_fields: ::protobuf::UnknownFields, 28 | cached_size: ::protobuf::CachedSize, 29 | } 30 | 31 | // see codegen.rs for the explanation why impl Sync explicitly 32 | unsafe impl ::std::marker::Sync for RefreshServiceAclRequestProto {} 33 | 34 | impl RefreshServiceAclRequestProto { 35 | pub fn new() -> RefreshServiceAclRequestProto { 36 | ::std::default::Default::default() 37 | } 38 | 39 | pub fn default_instance() -> &'static RefreshServiceAclRequestProto { 40 | static mut instance: ::protobuf::lazy::Lazy = ::protobuf::lazy::Lazy { 41 | lock: ::protobuf::lazy::ONCE_INIT, 42 | ptr: 0 as *const RefreshServiceAclRequestProto, 43 | }; 44 | unsafe { 45 | instance.get(RefreshServiceAclRequestProto::new) 46 | } 47 | } 48 | } 49 | 50 | impl ::protobuf::Message for RefreshServiceAclRequestProto { 51 | fn is_initialized(&self) -> bool { 52 | true 53 | } 54 | 55 | fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { 56 | while !is.eof()? { 57 | let (field_number, wire_type) = is.read_tag_unpack()?; 58 | match field_number { 59 | _ => { 60 | ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; 61 | }, 62 | }; 63 | } 64 | ::std::result::Result::Ok(()) 65 | } 66 | 67 | // Compute sizes of nested messages 68 | #[allow(unused_variables)] 69 | fn compute_size(&self) -> u32 { 70 | let mut my_size = 0; 71 | my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); 72 | self.cached_size.set(my_size); 73 | my_size 74 | } 75 | 76 | fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { 77 | os.write_unknown_fields(self.get_unknown_fields())?; 78 | ::std::result::Result::Ok(()) 79 | } 80 | 81 | fn get_cached_size(&self) -> u32 { 82 | self.cached_size.get() 83 | } 84 | 85 | fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { 86 | &self.unknown_fields 87 | } 88 | 89 | fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { 90 | &mut self.unknown_fields 91 | } 92 | 93 | fn as_any(&self) -> &::std::any::Any { 94 | self as &::std::any::Any 95 | } 96 | fn as_any_mut(&mut self) -> &mut ::std::any::Any { 97 | self as &mut ::std::any::Any 98 | } 99 | fn into_any(self: Box) -> ::std::boxed::Box<::std::any::Any> { 100 | self 101 | } 102 | 103 | fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { 104 | ::protobuf::MessageStatic::descriptor_static(None::) 105 | } 106 | } 107 | 108 | impl ::protobuf::MessageStatic for RefreshServiceAclRequestProto { 109 | fn new() -> RefreshServiceAclRequestProto { 110 | RefreshServiceAclRequestProto::new() 111 | } 112 | 113 | fn descriptor_static(_: ::std::option::Option) -> &'static ::protobuf::reflect::MessageDescriptor { 114 | static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { 115 | lock: ::protobuf::lazy::ONCE_INIT, 116 | ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, 117 | }; 118 | unsafe { 119 | descriptor.get(|| { 120 | let fields = ::std::vec::Vec::new(); 121 | ::protobuf::reflect::MessageDescriptor::new::( 122 | "RefreshServiceAclRequestProto", 123 | fields, 124 | file_descriptor_proto() 125 | ) 126 | }) 127 | } 128 | } 129 | } 130 | 131 | impl ::protobuf::Clear for RefreshServiceAclRequestProto { 132 | fn clear(&mut self) { 133 | self.unknown_fields.clear(); 134 | } 135 | } 136 | 137 | impl ::std::fmt::Debug for RefreshServiceAclRequestProto { 138 | fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { 139 | ::protobuf::text_format::fmt(self, f) 140 | } 141 | } 142 | 143 | impl ::protobuf::reflect::ProtobufValue for RefreshServiceAclRequestProto { 144 | fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { 145 | ::protobuf::reflect::ProtobufValueRef::Message(self) 146 | } 147 | } 148 | 149 | #[derive(PartialEq,Clone,Default)] 150 | pub struct RefreshServiceAclResponseProto { 151 | // special fields 152 | unknown_fields: ::protobuf::UnknownFields, 153 | cached_size: ::protobuf::CachedSize, 154 | } 155 | 156 | // see codegen.rs for the explanation why impl Sync explicitly 157 | unsafe impl ::std::marker::Sync for RefreshServiceAclResponseProto {} 158 | 159 | impl RefreshServiceAclResponseProto { 160 | pub fn new() -> RefreshServiceAclResponseProto { 161 | ::std::default::Default::default() 162 | } 163 | 164 | pub fn default_instance() -> &'static RefreshServiceAclResponseProto { 165 | static mut instance: ::protobuf::lazy::Lazy = ::protobuf::lazy::Lazy { 166 | lock: ::protobuf::lazy::ONCE_INIT, 167 | ptr: 0 as *const RefreshServiceAclResponseProto, 168 | }; 169 | unsafe { 170 | instance.get(RefreshServiceAclResponseProto::new) 171 | } 172 | } 173 | } 174 | 175 | impl ::protobuf::Message for RefreshServiceAclResponseProto { 176 | fn is_initialized(&self) -> bool { 177 | true 178 | } 179 | 180 | fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { 181 | while !is.eof()? { 182 | let (field_number, wire_type) = is.read_tag_unpack()?; 183 | match field_number { 184 | _ => { 185 | ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; 186 | }, 187 | }; 188 | } 189 | ::std::result::Result::Ok(()) 190 | } 191 | 192 | // Compute sizes of nested messages 193 | #[allow(unused_variables)] 194 | fn compute_size(&self) -> u32 { 195 | let mut my_size = 0; 196 | my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); 197 | self.cached_size.set(my_size); 198 | my_size 199 | } 200 | 201 | fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { 202 | os.write_unknown_fields(self.get_unknown_fields())?; 203 | ::std::result::Result::Ok(()) 204 | } 205 | 206 | fn get_cached_size(&self) -> u32 { 207 | self.cached_size.get() 208 | } 209 | 210 | fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { 211 | &self.unknown_fields 212 | } 213 | 214 | fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { 215 | &mut self.unknown_fields 216 | } 217 | 218 | fn as_any(&self) -> &::std::any::Any { 219 | self as &::std::any::Any 220 | } 221 | fn as_any_mut(&mut self) -> &mut ::std::any::Any { 222 | self as &mut ::std::any::Any 223 | } 224 | fn into_any(self: Box) -> ::std::boxed::Box<::std::any::Any> { 225 | self 226 | } 227 | 228 | fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { 229 | ::protobuf::MessageStatic::descriptor_static(None::) 230 | } 231 | } 232 | 233 | impl ::protobuf::MessageStatic for RefreshServiceAclResponseProto { 234 | fn new() -> RefreshServiceAclResponseProto { 235 | RefreshServiceAclResponseProto::new() 236 | } 237 | 238 | fn descriptor_static(_: ::std::option::Option) -> &'static ::protobuf::reflect::MessageDescriptor { 239 | static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { 240 | lock: ::protobuf::lazy::ONCE_INIT, 241 | ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, 242 | }; 243 | unsafe { 244 | descriptor.get(|| { 245 | let fields = ::std::vec::Vec::new(); 246 | ::protobuf::reflect::MessageDescriptor::new::( 247 | "RefreshServiceAclResponseProto", 248 | fields, 249 | file_descriptor_proto() 250 | ) 251 | }) 252 | } 253 | } 254 | } 255 | 256 | impl ::protobuf::Clear for RefreshServiceAclResponseProto { 257 | fn clear(&mut self) { 258 | self.unknown_fields.clear(); 259 | } 260 | } 261 | 262 | impl ::std::fmt::Debug for RefreshServiceAclResponseProto { 263 | fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { 264 | ::protobuf::text_format::fmt(self, f) 265 | } 266 | } 267 | 268 | impl ::protobuf::reflect::ProtobufValue for RefreshServiceAclResponseProto { 269 | fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { 270 | ::protobuf::reflect::ProtobufValueRef::Message(self) 271 | } 272 | } 273 | 274 | static file_descriptor_proto_data: &'static [u8] = b"\ 275 | \n(RefreshAuthorizationPolicyProtocol.proto\x12\rhadoop.common\"\x1f\n\ 276 | \x1dRefreshServiceAclRequestProto\"\x20\n\x1eRefreshServiceAclResponsePr\ 277 | oto2\x9d\x01\n)RefreshAuthorizationPolicyProtocolService\x12p\n\x11refre\ 278 | shServiceAcl\x12,.hadoop.common.RefreshServiceAclRequestProto\x1a-.hadoo\ 279 | p.common.RefreshServiceAclResponseProtoBR\n\x20org.apache.hadoop.securit\ 280 | y.protoB(RefreshAuthorizationPolicyProtocolProtos\xa0\x01\x01\x88\x01\ 281 | \x01\ 282 | "; 283 | 284 | static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { 285 | lock: ::protobuf::lazy::ONCE_INIT, 286 | ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto, 287 | }; 288 | 289 | fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { 290 | ::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap() 291 | } 292 | 293 | pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { 294 | unsafe { 295 | file_descriptor_proto_lazy.get(|| { 296 | parse_descriptor_proto() 297 | }) 298 | } 299 | } 300 | -------------------------------------------------------------------------------- /protocolpb/src/proto/hadoop/RefreshCallQueueProtocol.rs: -------------------------------------------------------------------------------- 1 | // This file is generated. Do not edit 2 | // @generated 3 | 4 | // https://github.com/Manishearth/rust-clippy/issues/702 5 | #![allow(unknown_lints)] 6 | #![allow(clippy)] 7 | 8 | #![cfg_attr(rustfmt, rustfmt_skip)] 9 | 10 | #![allow(box_pointers)] 11 | #![allow(dead_code)] 12 | #![allow(missing_docs)] 13 | #![allow(non_camel_case_types)] 14 | #![allow(non_snake_case)] 15 | #![allow(non_upper_case_globals)] 16 | #![allow(trivial_casts)] 17 | #![allow(unsafe_code)] 18 | #![allow(unused_imports)] 19 | #![allow(unused_results)] 20 | 21 | use protobuf::Message as Message_imported_for_functions; 22 | use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions; 23 | 24 | #[derive(PartialEq,Clone,Default)] 25 | pub struct RefreshCallQueueRequestProto { 26 | // special fields 27 | unknown_fields: ::protobuf::UnknownFields, 28 | cached_size: ::protobuf::CachedSize, 29 | } 30 | 31 | // see codegen.rs for the explanation why impl Sync explicitly 32 | unsafe impl ::std::marker::Sync for RefreshCallQueueRequestProto {} 33 | 34 | impl RefreshCallQueueRequestProto { 35 | pub fn new() -> RefreshCallQueueRequestProto { 36 | ::std::default::Default::default() 37 | } 38 | 39 | pub fn default_instance() -> &'static RefreshCallQueueRequestProto { 40 | static mut instance: ::protobuf::lazy::Lazy = ::protobuf::lazy::Lazy { 41 | lock: ::protobuf::lazy::ONCE_INIT, 42 | ptr: 0 as *const RefreshCallQueueRequestProto, 43 | }; 44 | unsafe { 45 | instance.get(RefreshCallQueueRequestProto::new) 46 | } 47 | } 48 | } 49 | 50 | impl ::protobuf::Message for RefreshCallQueueRequestProto { 51 | fn is_initialized(&self) -> bool { 52 | true 53 | } 54 | 55 | fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { 56 | while !is.eof()? { 57 | let (field_number, wire_type) = is.read_tag_unpack()?; 58 | match field_number { 59 | _ => { 60 | ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; 61 | }, 62 | }; 63 | } 64 | ::std::result::Result::Ok(()) 65 | } 66 | 67 | // Compute sizes of nested messages 68 | #[allow(unused_variables)] 69 | fn compute_size(&self) -> u32 { 70 | let mut my_size = 0; 71 | my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); 72 | self.cached_size.set(my_size); 73 | my_size 74 | } 75 | 76 | fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { 77 | os.write_unknown_fields(self.get_unknown_fields())?; 78 | ::std::result::Result::Ok(()) 79 | } 80 | 81 | fn get_cached_size(&self) -> u32 { 82 | self.cached_size.get() 83 | } 84 | 85 | fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { 86 | &self.unknown_fields 87 | } 88 | 89 | fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { 90 | &mut self.unknown_fields 91 | } 92 | 93 | fn as_any(&self) -> &::std::any::Any { 94 | self as &::std::any::Any 95 | } 96 | fn as_any_mut(&mut self) -> &mut ::std::any::Any { 97 | self as &mut ::std::any::Any 98 | } 99 | fn into_any(self: Box) -> ::std::boxed::Box<::std::any::Any> { 100 | self 101 | } 102 | 103 | fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { 104 | ::protobuf::MessageStatic::descriptor_static(None::) 105 | } 106 | } 107 | 108 | impl ::protobuf::MessageStatic for RefreshCallQueueRequestProto { 109 | fn new() -> RefreshCallQueueRequestProto { 110 | RefreshCallQueueRequestProto::new() 111 | } 112 | 113 | fn descriptor_static(_: ::std::option::Option) -> &'static ::protobuf::reflect::MessageDescriptor { 114 | static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { 115 | lock: ::protobuf::lazy::ONCE_INIT, 116 | ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, 117 | }; 118 | unsafe { 119 | descriptor.get(|| { 120 | let fields = ::std::vec::Vec::new(); 121 | ::protobuf::reflect::MessageDescriptor::new::( 122 | "RefreshCallQueueRequestProto", 123 | fields, 124 | file_descriptor_proto() 125 | ) 126 | }) 127 | } 128 | } 129 | } 130 | 131 | impl ::protobuf::Clear for RefreshCallQueueRequestProto { 132 | fn clear(&mut self) { 133 | self.unknown_fields.clear(); 134 | } 135 | } 136 | 137 | impl ::std::fmt::Debug for RefreshCallQueueRequestProto { 138 | fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { 139 | ::protobuf::text_format::fmt(self, f) 140 | } 141 | } 142 | 143 | impl ::protobuf::reflect::ProtobufValue for RefreshCallQueueRequestProto { 144 | fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { 145 | ::protobuf::reflect::ProtobufValueRef::Message(self) 146 | } 147 | } 148 | 149 | #[derive(PartialEq,Clone,Default)] 150 | pub struct RefreshCallQueueResponseProto { 151 | // special fields 152 | unknown_fields: ::protobuf::UnknownFields, 153 | cached_size: ::protobuf::CachedSize, 154 | } 155 | 156 | // see codegen.rs for the explanation why impl Sync explicitly 157 | unsafe impl ::std::marker::Sync for RefreshCallQueueResponseProto {} 158 | 159 | impl RefreshCallQueueResponseProto { 160 | pub fn new() -> RefreshCallQueueResponseProto { 161 | ::std::default::Default::default() 162 | } 163 | 164 | pub fn default_instance() -> &'static RefreshCallQueueResponseProto { 165 | static mut instance: ::protobuf::lazy::Lazy = ::protobuf::lazy::Lazy { 166 | lock: ::protobuf::lazy::ONCE_INIT, 167 | ptr: 0 as *const RefreshCallQueueResponseProto, 168 | }; 169 | unsafe { 170 | instance.get(RefreshCallQueueResponseProto::new) 171 | } 172 | } 173 | } 174 | 175 | impl ::protobuf::Message for RefreshCallQueueResponseProto { 176 | fn is_initialized(&self) -> bool { 177 | true 178 | } 179 | 180 | fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { 181 | while !is.eof()? { 182 | let (field_number, wire_type) = is.read_tag_unpack()?; 183 | match field_number { 184 | _ => { 185 | ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; 186 | }, 187 | }; 188 | } 189 | ::std::result::Result::Ok(()) 190 | } 191 | 192 | // Compute sizes of nested messages 193 | #[allow(unused_variables)] 194 | fn compute_size(&self) -> u32 { 195 | let mut my_size = 0; 196 | my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); 197 | self.cached_size.set(my_size); 198 | my_size 199 | } 200 | 201 | fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { 202 | os.write_unknown_fields(self.get_unknown_fields())?; 203 | ::std::result::Result::Ok(()) 204 | } 205 | 206 | fn get_cached_size(&self) -> u32 { 207 | self.cached_size.get() 208 | } 209 | 210 | fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { 211 | &self.unknown_fields 212 | } 213 | 214 | fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { 215 | &mut self.unknown_fields 216 | } 217 | 218 | fn as_any(&self) -> &::std::any::Any { 219 | self as &::std::any::Any 220 | } 221 | fn as_any_mut(&mut self) -> &mut ::std::any::Any { 222 | self as &mut ::std::any::Any 223 | } 224 | fn into_any(self: Box) -> ::std::boxed::Box<::std::any::Any> { 225 | self 226 | } 227 | 228 | fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { 229 | ::protobuf::MessageStatic::descriptor_static(None::) 230 | } 231 | } 232 | 233 | impl ::protobuf::MessageStatic for RefreshCallQueueResponseProto { 234 | fn new() -> RefreshCallQueueResponseProto { 235 | RefreshCallQueueResponseProto::new() 236 | } 237 | 238 | fn descriptor_static(_: ::std::option::Option) -> &'static ::protobuf::reflect::MessageDescriptor { 239 | static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { 240 | lock: ::protobuf::lazy::ONCE_INIT, 241 | ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, 242 | }; 243 | unsafe { 244 | descriptor.get(|| { 245 | let fields = ::std::vec::Vec::new(); 246 | ::protobuf::reflect::MessageDescriptor::new::( 247 | "RefreshCallQueueResponseProto", 248 | fields, 249 | file_descriptor_proto() 250 | ) 251 | }) 252 | } 253 | } 254 | } 255 | 256 | impl ::protobuf::Clear for RefreshCallQueueResponseProto { 257 | fn clear(&mut self) { 258 | self.unknown_fields.clear(); 259 | } 260 | } 261 | 262 | impl ::std::fmt::Debug for RefreshCallQueueResponseProto { 263 | fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { 264 | ::protobuf::text_format::fmt(self, f) 265 | } 266 | } 267 | 268 | impl ::protobuf::reflect::ProtobufValue for RefreshCallQueueResponseProto { 269 | fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { 270 | ::protobuf::reflect::ProtobufValueRef::Message(self) 271 | } 272 | } 273 | 274 | static file_descriptor_proto_data: &'static [u8] = b"\ 275 | \n\x1eRefreshCallQueueProtocol.proto\x12\rhadoop.common\"\x1e\n\x1cRefre\ 276 | shCallQueueRequestProto\"\x1f\n\x1dRefreshCallQueueResponseProto2\x90\ 277 | \x01\n\x1fRefreshCallQueueProtocolService\x12m\n\x10refreshCallQueue\x12\ 278 | +.hadoop.common.RefreshCallQueueRequestProto\x1a,.hadoop.common.RefreshC\ 279 | allQueueResponseProtoBC\n\x1borg.apache.hadoop.ipc.protoB\x1eRefreshCall\ 280 | QueueProtocolProtos\xa0\x01\x01\x88\x01\x01\ 281 | "; 282 | 283 | static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { 284 | lock: ::protobuf::lazy::ONCE_INIT, 285 | ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto, 286 | }; 287 | 288 | fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { 289 | ::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap() 290 | } 291 | 292 | pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { 293 | unsafe { 294 | file_descriptor_proto_lazy.get(|| { 295 | parse_descriptor_proto() 296 | }) 297 | } 298 | } 299 | -------------------------------------------------------------------------------- /protocolpb/src/proto/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod hadoop { 2 | pub mod GenericRefreshProtocol; 3 | pub mod GetUserMappingsProtocol; 4 | pub mod HAServiceProtocol; 5 | pub mod IpcConnectionContext; 6 | pub mod ProtobufRpcEngine; 7 | pub mod ProtocolInfo; 8 | pub mod RefreshAuthorizationPolicyProtocol; 9 | pub mod RefreshCallQueueProtocol; 10 | pub mod RefreshUserMappingsProtocol; 11 | pub mod RpcHeader; 12 | pub mod Security; 13 | pub mod TraceAdmin; 14 | pub mod ZKFCProtocol; 15 | } 16 | 17 | 18 | pub mod hdfs { 19 | pub use super::hadoop::Security; 20 | 21 | pub mod acl; 22 | pub mod ClientDatanodeProtocol; 23 | pub mod ClientNamenodeProtocol; 24 | pub mod DatanodeProtocol; 25 | pub mod datatransfer; 26 | pub mod encryption; 27 | pub mod fsimage; 28 | pub mod HAZKInfo; 29 | pub mod hdfs; 30 | pub mod inotify; 31 | pub mod InterDatanodeProtocol; 32 | pub mod JournalProtocol; 33 | pub mod NamenodeProtocol; 34 | pub mod QJournalProtocol; 35 | pub mod xattr; 36 | } 37 | 38 | 39 | -------------------------------------------------------------------------------- /rhdfs-cmd/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rhdfs-cmd" 3 | version = "0.1.0" 4 | authors = ["Valery Vybornov "] 5 | 6 | [dependencies] 7 | rhdfs = { path = "../rhdfs" } 8 | futures = "0.1" 9 | tokio = "0.1" 10 | log = "0.4" 11 | env_logger = "0.5" 12 | chrono = "0.4" 13 | 14 | [[bin]] 15 | name = "rhdfs" 16 | path = "src/main.rs" -------------------------------------------------------------------------------- /rhdfs-cmd/src/util.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use futures::prelude::*; 3 | 4 | 5 | pub struct ForEachSink { 6 | f: F, 7 | t_type: PhantomData, 8 | e_type: PhantomData, 9 | } 10 | 11 | impl ForEachSink { 12 | pub fn new(f: F) -> ForEachSink { ForEachSink { f, t_type: PhantomData, e_type: PhantomData } } 13 | } 14 | 15 | impl Sink for ForEachSink where F: FnMut(T) -> Result<(), E> { 16 | type SinkItem = T; 17 | type SinkError = E; 18 | 19 | fn start_send(&mut self, item: T) -> Result, E> { 20 | (self.f)(item).map(|_| AsyncSink::Ready) 21 | } 22 | 23 | fn poll_complete(&mut self) -> Result, E> { 24 | Ok(Async::Ready(())) 25 | } 26 | 27 | fn close(&mut self) -> Result, E> { 28 | Ok(Async::Ready(())) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /rhdfs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rhdfs" 3 | version = "0.1.0" 4 | authors = ["Valery Vybornov "] 5 | 6 | [dependencies] 7 | futures = "0.1" 8 | tokio-io = "0.1" 9 | tokio-tcp = "0.1" 10 | #tokio-reactor = "0.1" 11 | #tokio-executor = "0.1" 12 | 13 | protobuf = "1.4" 14 | byteorder = "1.2" 15 | bytes = "0.4" 16 | crc = "^1.0.0" 17 | uuid = { version = "0.6", features = ["v4"] } 18 | rand = "0.4" 19 | log = "0.4" 20 | 21 | protocolpb = { path = "../protocolpb" } 22 | 23 | [dev-dependencies] 24 | env_logger = "0.5" -------------------------------------------------------------------------------- /rhdfs/src/cmdx.rs: -------------------------------------------------------------------------------- 1 | 2 | use std::net::SocketAddr; 3 | use std::collections::{VecDeque, HashMap}; 4 | use futures::{Stream, Async, Sink, AsyncSink}; 5 | 6 | use dt::*; 7 | use nn::*; 8 | use *; 9 | 10 | use util::*; 11 | use proto_tools::{ProtoFrontEndSourceQ, UserDeliver}; 12 | 13 | /// Connector abstraction 14 | pub trait Connector { 15 | /// Takes a reference to an address `a` and returns boxed future of the connection `C` 16 | fn connect(&mut self, a: &A) -> BFI; 17 | } 18 | 19 | /// Misc session data 20 | pub struct SessionData { 21 | ///(NN) effective user in IpcConnectionContextProto.UserInformationProto 22 | pub effective_user: String, 23 | ///(NN) force client id (for debugging purposes). Auto-generated if None. 24 | pub forced_client_id: Option> 25 | } 26 | 27 | /// A connector that supports static NAT 28 | pub struct NatConnector { 29 | nat: HashMap, 30 | session_data: SessionData, 31 | connector_id: u64, 32 | connection_n: usize 33 | } 34 | 35 | impl NatConnector { 36 | fn new(session_data: SessionData, init_nat: Vec<(SocketAddr, SocketAddr)>) -> NatConnector { 37 | let connector_id = rand::random(); 38 | let mut nat = HashMap::new(); 39 | for (k, v) in init_nat { nat.insert(k, v); } 40 | NatConnector { nat, session_data, connector_id, connection_n: 0 } 41 | } 42 | fn next_client_name(&mut self) -> String { 43 | //org.apache.hadoop.hdfs.DFSClient.DFSClient: 44 | //this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + 45 | // DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); 46 | 47 | let rv = format!("RDFSClient_{}_{}", self.connector_id, self.connection_n); 48 | self.connection_n += 1; 49 | rv 50 | } 51 | 52 | #[inline] 53 | fn translate<'a>(&'a self, a: &'a SocketAddr) -> &'a SocketAddr { 54 | self.nat.get(a).unwrap_or(a) 55 | } 56 | } 57 | 58 | impl Connector for NatConnector { 59 | fn connect(&mut self, a: &SocketAddr) -> BFI { 60 | match &self.session_data.forced_client_id { 61 | None => 62 | nn::Connection_::connect( 63 | self.translate(a), 64 | self.session_data.effective_user.clone() 65 | ), 66 | Some(cn) => 67 | nn::Connection_::connect_det( 68 | self.translate(a), 69 | cn.clone(), 70 | self.session_data.effective_user.clone() 71 | ) 72 | } 73 | 74 | } 75 | } 76 | 77 | impl Connector for NatConnector { 78 | fn connect(&mut self, a: &SocketAddr) -> BFI { 79 | let cname = self.next_client_name(); 80 | Box::new(dt::Connection_::connect( 81 | self.translate(a), 82 | cname, 83 | )) 84 | } 85 | } 86 | 87 | /// Connection manager state 88 | enum CMS { 89 | None, 90 | Connecting(BFI), 91 | Active(C) 92 | } 93 | 94 | 95 | //TODO add idle timer functionality 96 | ///Connection manager. Does connection management for an upper layer together with passing 97 | ///messages up/down 98 | struct CM { 99 | s: CMS, 100 | q: VecDeque, 101 | a: Option 102 | } 103 | 104 | const OUT_Q_SIZE: usize = 16; 105 | 106 | impl CM where 107 | C: Sink + Stream { 108 | 109 | fn new() -> CM { 110 | CM { 111 | s: CMS::None, 112 | q: VecDeque::new(), 113 | a: None 114 | } 115 | } 116 | 117 | fn push_q(&mut self, (req, a): (Q, Option)) -> AsyncSink<(Q, Option)> { 118 | if self.q.len() >= OUT_Q_SIZE { 119 | AsyncSink::NotReady((req, a)) 120 | } else { 121 | if a.is_some() { self.a = a } 122 | self.q.push_back(req); 123 | AsyncSink::Ready 124 | } 125 | } 126 | 127 | fn run(&mut self, c: &mut O) -> Option> where O: Connector { 128 | let (s, q, a) = 129 | (&mut self.s, &mut self.q, &self.a); 130 | 131 | fsm_turn_x(s, |s| match s { 132 | CMS::None => 133 | if !q.is_empty() { //start connecting, turn over 134 | if let Some(a) = a { 135 | SxV::S(CMS::Connecting(c.connect(a))) 136 | } else { 137 | SxV::V(Some(UserDeliver::Err(app_error!(other "Empty peer address")))) 138 | } 139 | } else { 140 | SxV::V(None) 141 | } 142 | CMS::Connecting(cf) => //poll the cf; if ready, turn over 143 | match cf.poll() { 144 | Ok(Async::NotReady) => SxV::V(None), 145 | Ok(Async::Ready(conn)) => SxV::S(CMS::Active(conn)), 146 | Err(e) => SxV::SV(CMS::None, Some(UserDeliver::Err(e.into()))) 147 | } 148 | CMS::Active(conn) => 149 | match q.pop_front() { 150 | Some(req) => { 151 | let w = match conn.start_send(req) { 152 | Ok(AsyncSink::Ready) => 153 | match conn.poll_complete() { 154 | Err(e) => Err(e), //publish error 155 | _ => Ok(()) 156 | } 157 | Ok(AsyncSink::NotReady(req)) => 158 | Ok(q.push_front(req)), 159 | Err(e) => 160 | Err(e) 161 | }; 162 | SxV::V( 163 | if let Err(e) = w { 164 | Some(UserDeliver::Err(e)) 165 | } else { 166 | match conn.poll() { 167 | Ok(Async::Ready(Some(rv))) => Some(UserDeliver::Message(rv)), 168 | Ok(Async::Ready(None)) => Some(UserDeliver::EndOfStream), 169 | Ok(Async::NotReady) => None, 170 | Err(e) => Some(UserDeliver::Err(e)) 171 | } 172 | } 173 | ) 174 | } 175 | None => 176 | SxV::V(match conn.poll() { 177 | Ok(Async::Ready(Some(rv))) => Some(UserDeliver::Message(rv)), 178 | Ok(Async::Ready(None)) => Some(UserDeliver::EndOfStream), 179 | Ok(Async::NotReady) => None, 180 | Err(e) => Some(UserDeliver::Err(e)) 181 | }) 182 | } 183 | }) 184 | } 185 | 186 | fn q_size(&self) -> usize { 187 | self.q.len() 188 | } 189 | } 190 | 191 | type NnCm = CM; 192 | type DtCm = CM; 193 | type Channel = usize; 194 | 195 | #[derive(Debug)] 196 | pub enum MdxQ { 197 | NN(Channel, Option, NnaQ), 198 | DT(Channel, Option, DtaQ) 199 | } 200 | 201 | #[derive(Debug)] 202 | pub enum MdxR { 203 | NN(Channel, NnaR), 204 | DT(Channel, DtaR) 205 | } 206 | 207 | /// Connection multiplexer/demux 208 | pub struct Mdx { 209 | nn: Vec, 210 | dt: Vec, 211 | u: ProtoFrontEndSourceQ, 212 | c: NatConnector, 213 | nna: SocketAddr 214 | } 215 | 216 | impl Mdx { 217 | pub fn new(n_nn: usize, 218 | n_dt: usize, 219 | session_data: SessionData, 220 | nn_address: SocketAddr, 221 | init_nat: Vec<(SocketAddr, SocketAddr)>) -> Mdx { 222 | let mut rv = Mdx { 223 | nn: vec![], 224 | dt: vec![], 225 | u: ProtoFrontEndSourceQ::new(), 226 | nna: nn_address, 227 | c: NatConnector::new(session_data, init_nat) 228 | }; 229 | for _ in 0..n_nn { 230 | rv.nn.push(CM::new()) 231 | } 232 | for _ in 0..n_dt { 233 | rv.dt.push(CM::new()) 234 | } 235 | rv 236 | } 237 | 238 | #[inline] 239 | fn handle_nn(&mut self, channel: usize) -> usize { 240 | if let Some(ud) = self.nn[channel].run(&mut self.c) { 241 | self.u.push(ud.map(|r| MdxR::NN(channel, r))) 242 | } 243 | 244 | self.nn[channel].q_size() 245 | } 246 | 247 | #[inline] 248 | fn handle_dt(&mut self, channel: usize) -> usize { 249 | if let Some(ud) = self.dt[channel].run(&mut self.c) { 250 | self.u.push(ud.map(|r| MdxR::DT(channel, r))) 251 | } 252 | 253 | self.dt[channel].q_size() 254 | } 255 | 256 | fn run(&mut self) -> usize { 257 | let mut rv = 0; 258 | for ch in 0..self.nn.len() { 259 | rv += self.handle_nn(ch) 260 | } 261 | for ch in 0..self.dt.len() { 262 | rv += self.handle_dt(ch) 263 | } 264 | rv 265 | } 266 | 267 | #[inline] 268 | fn nn_a(&self, a: Option) -> Option { 269 | a.or_else(|| Some(self.nna.clone())) 270 | } 271 | } 272 | 273 | impl Sink for Mdx { 274 | type SinkItem = MdxQ; 275 | type SinkError = Error; 276 | 277 | fn start_send(&mut self, req: MdxQ) -> Result> { 278 | match req { 279 | MdxQ::NN(channel, a, req) => 280 | match { let a = self.nn_a(a); self.nn[channel].push_q((req, a)) } { 281 | AsyncSink::Ready => { 282 | self.handle_nn(channel); 283 | Ok(AsyncSink::Ready) 284 | } 285 | AsyncSink::NotReady((req, a)) => 286 | Ok(AsyncSink::NotReady(MdxQ::NN(channel, a, req))) 287 | } 288 | MdxQ::DT(channel, a, req) => 289 | match self.dt[channel].push_q((req, a)) { 290 | AsyncSink::Ready => { 291 | self.handle_dt(channel); 292 | Ok(AsyncSink::Ready) 293 | } 294 | AsyncSink::NotReady((req, a)) => 295 | Ok(AsyncSink::NotReady(MdxQ::DT(channel, a, req))) 296 | } 297 | } 298 | } 299 | 300 | fn poll_complete(&mut self) -> Result> { 301 | if self.run() == 0 { 302 | Ok(Async::Ready(())) 303 | } else { 304 | Ok(Async::NotReady) 305 | } 306 | } 307 | 308 | fn close(&mut self) -> Result> { 309 | self.poll_complete() 310 | } 311 | } 312 | 313 | impl Stream for Mdx { 314 | type Item = MdxR; 315 | type Error = Error; 316 | 317 | fn poll(&mut self) -> Result>> { 318 | let _ = self.run(); 319 | self.u.take_deliver() 320 | } 321 | } 322 | -------------------------------------------------------------------------------- /rhdfs/src/config.rs: -------------------------------------------------------------------------------- 1 | 2 | 3 | pub struct Common { 4 | pub nn_hostport: String, 5 | pub effective_user: String, 6 | pub nat: Vec<(String, String)> 7 | } 8 | 9 | impl Default for Common { 10 | fn default() -> Self { 11 | Common { 12 | nn_hostport: "local".to_owned(), 13 | effective_user: "cloudera".to_owned(), 14 | nat: vec![] 15 | } 16 | } 17 | } 18 | 19 | #[derive(Debug)] 20 | pub struct GetListing { 21 | pub src: Vec, 22 | pub need_location: bool 23 | } 24 | 25 | pub struct Get { 26 | pub src: Vec, 27 | pub tgt_dir: Option 28 | } 29 | 30 | -------------------------------------------------------------------------------- /rhdfs/src/dt/checksum.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::fmt::Debug; 3 | use byteorder::{ByteOrder, BigEndian}; 4 | use crc::crc32; 5 | use protobuf_api::*; 6 | 7 | 8 | pub trait ChecksumValidator: Send + Debug { 9 | fn is_trivial(&self) -> bool; 10 | fn is_checksum_ok(&self, data: &[u8], sums: &[u8]) -> bool; 11 | fn eval(&self, data: &[u8]) -> Vec; 12 | } 13 | 14 | #[derive(Debug)] 15 | pub struct CVTrivial; 16 | 17 | impl ChecksumValidator for CVTrivial { 18 | fn is_trivial(&self) -> bool { true } 19 | fn is_checksum_ok(&self, _data: &[u8], _sums: &[u8]) -> bool { 20 | true 21 | } 22 | fn eval(&self, _data: &[u8]) -> Vec { Vec::new() } 23 | } 24 | 25 | pub struct CVCRC32 { 26 | bytes_per_checksum: usize, 27 | algo: fn(&[u8]) -> u32 28 | } 29 | 30 | impl Debug for CVCRC32 { 31 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 32 | fmt.debug_struct("CVCRC32") 33 | .field("bytes_per_checksum", &self.bytes_per_checksum) 34 | .finish() 35 | } 36 | } 37 | 38 | impl ChecksumValidator for CVCRC32 { 39 | fn is_trivial(&self) -> bool { false } 40 | fn is_checksum_ok(&self, data: &[u8], sums: &[u8]) -> bool { 41 | let idata = data.chunks(self.bytes_per_checksum); 42 | let isums = sums.chunks(4); //size of checksum 43 | idata 44 | .zip(isums) 45 | .find(|&(d, s)| (self.algo)(d) != BigEndian::read_u32(s)) 46 | .is_none() 47 | } 48 | fn eval(&self, data: &[u8]) -> Vec { 49 | use bytes::BufMut; 50 | let idata = data.chunks(self.bytes_per_checksum); 51 | let checksum_count = (data.len() - 1) / self.bytes_per_checksum + 1; 52 | let mut rv = Vec::with_capacity(4 * checksum_count); 53 | for chunk in idata { 54 | rv.put_u32::((self.algo)(chunk)) 55 | } 56 | rv 57 | } 58 | } 59 | 60 | impl CVCRC32 { 61 | pub fn new_crc32(bytes_per_checksum: usize) -> CVCRC32 { 62 | CVCRC32 { bytes_per_checksum, algo: crc32::checksum_ieee } 63 | } 64 | pub fn new_crc32c(bytes_per_checksum: usize) -> CVCRC32 { 65 | CVCRC32 { bytes_per_checksum, algo: crc32::checksum_castagnoli } 66 | } 67 | } 68 | 69 | pub fn checksum_from_proto(csp: ChecksumProto) -> Box { 70 | let (ctype, bpc) = pb_decons!(ChecksumProto, csp, type, bytes_per_checksum); 71 | checksum_from_args(ctype, bpc as usize) 72 | } 73 | 74 | pub fn checksum_from_args(ctype: ChecksumTypeProto, bytes_per_checksum: usize) -> Box { 75 | match if bytes_per_checksum == 0 { ChecksumTypeProto::CHECKSUM_NULL } else { ctype } { 76 | ChecksumTypeProto::CHECKSUM_NULL => 77 | Box::new(CVTrivial), 78 | ChecksumTypeProto::CHECKSUM_CRC32 => 79 | Box::new(CVCRC32::new_crc32(bytes_per_checksum)), 80 | ChecksumTypeProto::CHECKSUM_CRC32C => 81 | Box::new(CVCRC32::new_crc32c(bytes_per_checksum)) 82 | } 83 | } -------------------------------------------------------------------------------- /rhdfs/src/dt/mod.rs: -------------------------------------------------------------------------------- 1 | //! Datatransfer protocol implementation (standard port 50010) 2 | 3 | mod packet; 4 | mod codec; 5 | #[macro_use] 6 | mod proto; 7 | mod read_streamer; 8 | mod write_streamer; 9 | mod checksum; 10 | 11 | pub use self::proto::{Connection_, DtaQ, DtaR}; 12 | pub use self::proto::{ExtendedBlock, Token, ReadBlock}; 13 | //pub use self::codec::{DtReq, DtRsp}; 14 | //pub use self::block_reader::*; 15 | -------------------------------------------------------------------------------- /rhdfs/src/dt/packet.rs: -------------------------------------------------------------------------------- 1 | //! Data transfer packet implementation 2 | //! General packet format 3 | //! ```text 4 | //! +-----------------------------------------------------------+ 5 | //! | uint32 length of (this field + checksums + data) | 6 | //! +-----------------------------------------------------------+ 7 | //! | uint16 length + PacketHeaderProto | 8 | //! +-----------------------------------------------------------+ 9 | //! | checksums (variable length) | 10 | //! +-----------------------------------------------------------+ 11 | //! | data | 12 | //! +-----------------------------------------------------------+ 13 | //! ``` 14 | //! See `org.apache.hadoop.hdfs.server.datanode.BlockSender.sendPacket` 15 | //! See `org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead` 16 | //! See `org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader` 17 | 18 | 19 | use tokio_io::codec::{Decoder, Encoder}; 20 | use bytes::{BytesMut, Bytes, BufMut}; 21 | use byteorder::{ByteOrder, BigEndian}; 22 | 23 | use protobuf_api::PacketHeaderProto; 24 | use codec_tools::*; 25 | use ::*; 26 | 27 | // TODO: redefine Debug so only initial bytes of BytesMut are printed, and printed in hex 28 | /// Data packet for block read|write. Note that `header.data_len == data.len()` 29 | #[derive(Debug, PartialEq, Clone)] 30 | pub struct BlockDataPacket { 31 | pub header: PacketHeaderProto, 32 | pub checksum: Bytes, 33 | pub data: Bytes 34 | } 35 | 36 | impl BlockDataPacket { 37 | pub fn is_last(&self) -> bool { self.header.get_lastPacketInBlock() || self.header.get_dataLen() == 0 } 38 | pub fn seq_no(&self) -> i64 { self.header.get_seqno() } 39 | } 40 | 41 | #[derive(Debug)] 42 | struct PacketLengths { 43 | other: u32, //everything but the header + its length 44 | header: u16 //header 45 | } 46 | 47 | impl PacketLengths { 48 | const LEN: usize = U16_BYTES + U32_BYTES; 49 | } 50 | 51 | impl PduDes for PacketLengths { 52 | fn from_bytes(b: BytesMut) -> Result { 53 | if b.len() != PacketLengths::LEN { 54 | Err(app_error!{ codec "Invalid packet lengths" }) 55 | } else { 56 | let other = BigEndian::read_u32(&b); 57 | let header = BigEndian::read_u16(&b[4..]); 58 | Ok(PacketLengths { other, header }) 59 | } 60 | } 61 | } 62 | 63 | impl PduSer for PacketLengths { 64 | fn serialized_len(&mut self) -> usize { 65 | Self::LEN 66 | } 67 | 68 | fn encode(self, b: &mut BytesMut) -> Result<()> { 69 | b.put_u32::(self.other); 70 | b.put_u16::(self.header); 71 | Ok(()) 72 | } 73 | } 74 | 75 | #[derive(Debug)] 76 | struct PacketBodyDecoder { 77 | packet_len: usize, //len of packet minus two leading length fields of 6 bytes in total 78 | header_len: usize, 79 | payload_len: usize //packet_len - header_len 80 | } 81 | 82 | impl Decoder for PacketBodyDecoder { 83 | type Item = BlockDataPacket; 84 | type Error = Error; 85 | 86 | fn decode(&mut self, src: &mut BytesMut) -> Result> { 87 | if src.len() < self.packet_len { 88 | Ok(None) 89 | } else { 90 | let hbytes = src.split_to(self.header_len); 91 | let header = PacketHeaderProto::from_bytes(hbytes)?; 92 | 93 | let datalen = header.get_dataLen(); 94 | if datalen as usize > self.payload_len || datalen < 0 { 95 | return Err(app_error!{ codec "invalid data len" }) 96 | } 97 | let checksum_len = self.payload_len - datalen as usize; 98 | let checksum = src.split_to(checksum_len); 99 | let data = src.split_to(datalen as usize); 100 | Ok(Some(BlockDataPacket { header, checksum: checksum.freeze(), data: data.freeze() })) 101 | } 102 | } 103 | } 104 | 105 | #[derive(Debug)] 106 | pub struct PacketDecoder { 107 | inner: PairDecoder, PacketBodyDecoder> 108 | } 109 | 110 | impl PacketDecoder { 111 | pub fn new() -> PacketDecoder { 112 | PacketDecoder { 113 | inner: 114 | PairDecoder::new( 115 | FixedSizeDecoder::new_sized(PacketLengths::LEN), 116 | FnTailF::new(|lengths: PacketLengths| Ok(PacketBodyDecoder { 117 | packet_len: lengths.other as usize - U32_BYTES + lengths.header as usize, 118 | header_len: lengths.header as usize, 119 | payload_len: lengths.other as usize - U32_BYTES 120 | }))) 121 | } 122 | } 123 | } 124 | 125 | impl Decoder for PacketDecoder { 126 | type Item = BlockDataPacket; 127 | type Error = Error; 128 | 129 | fn decode(&mut self, src: &mut BytesMut) -> Result> { 130 | self.inner.decode(src) 131 | } 132 | } 133 | 134 | struct PacketBodyEncoder; 135 | 136 | impl Encoder for PacketBodyEncoder { 137 | type Item = BlockDataPacket; 138 | type Error = Error; 139 | 140 | fn encode(&mut self, item: BlockDataPacket, dst: &mut BytesMut) -> Result<()> { 141 | let _ = item.header.encode(dst)?; 142 | dst.extend_from_slice(&item.checksum); 143 | dst.extend_from_slice(&item.data); 144 | Ok(()) 145 | } 146 | } 147 | 148 | 149 | pub struct PacketEncoder { 150 | inner: PairEncoder, PacketBodyEncoder> 151 | } 152 | 153 | impl PacketEncoder { 154 | pub fn new() -> PacketEncoder { 155 | PacketEncoder { 156 | inner: 157 | PairEncoder::new( 158 | FixedSizeEncoder::new(), 159 | PacketBodyEncoder, 160 | | pb | { 161 | Ok(PacketLengths { 162 | other: checked_usize_to_u32(pb.checksum.len() + pb.data.len() + U32_BYTES, "other")?, 163 | header: checked_usize_to_u16(pb.header.serialized_len(), "header")? 164 | }) 165 | } 166 | ) 167 | } 168 | } 169 | } 170 | 171 | impl Encoder for PacketEncoder { 172 | type Item = BlockDataPacket; 173 | type Error = Error; 174 | 175 | fn encode(&mut self, item: BlockDataPacket, dst: &mut BytesMut) -> Result<()> { 176 | self.inner.encode(item, dst) 177 | } 178 | } 179 | 180 | 181 | /// loopback-test encoder via decoder. Correctness of decoder is proven in codec tests 182 | #[test] 183 | fn test_encoder() { 184 | let checksum = [b'a', b'b', b'c']; 185 | let data = [10u8, 11, 12, 13, 14, 15]; 186 | let header = pb_cons!(PacketHeaderProto, 187 | offset_in_block: 100, 188 | seqno: 200, 189 | last_packet_in_block: false, 190 | data_len: data.len() as i32 191 | ); 192 | 193 | let bdp = BlockDataPacket { 194 | header, 195 | checksum: Bytes::from(&checksum[..]), 196 | data: Bytes::from(&data[..]) 197 | }; 198 | 199 | let mut e = PacketEncoder::new(); 200 | let mut b = BytesMut::new(); 201 | 202 | let _ = e.encode(bdp, &mut b).expect("encoding failed"); 203 | 204 | let mut d = PacketDecoder::new(); 205 | 206 | let BlockDataPacket { 207 | header: o_header, 208 | checksum: o_checksum, 209 | data: o_data 210 | } = d.decode(&mut b).expect("decoding failed").unwrap(); 211 | 212 | let (offset_in_block, seqno, last_packet_in_block, data_len) = 213 | pb_decons!(PacketHeaderProto, o_header, 214 | offset_in_block, seqno, last_packet_in_block, data_len); 215 | 216 | assert_eq!(offset_in_block, 100); 217 | assert_eq!(seqno, 200); 218 | assert_eq!(last_packet_in_block, false); 219 | assert_eq!(data_len, data.len() as i32); 220 | 221 | 222 | assert_eq!(&o_checksum[..], &checksum[..]); 223 | assert_eq!(&o_data[..], &data[..]); 224 | 225 | } -------------------------------------------------------------------------------- /rhdfs/src/dt/read_streamer.rs: -------------------------------------------------------------------------------- 1 | use *; 2 | use bytes::*; 3 | use super::{ 4 | packet::BlockDataPacket, 5 | checksum::{ChecksumValidator} 6 | }; 7 | 8 | pub type ReadRange = (i64, i64); 9 | 10 | #[derive(Debug)] 11 | pub(crate) struct ReadStreamer { 12 | checksum: Box, 13 | read_range: ReadRange, 14 | seqno: i64, 15 | offset: i64 16 | } 17 | 18 | impl ReadStreamer { 19 | pub fn new(read_range: ReadRange, checksum: Box) -> ReadStreamer { 20 | ReadStreamer { 21 | checksum, 22 | read_range, 23 | seqno: 0, 24 | offset: 0, 25 | } 26 | } 27 | 28 | fn check_sequencing(&mut self, seqno: i64, offset: i64) -> Result<()> { 29 | if self.seqno == seqno && self.offset == offset { 30 | Ok(()) 31 | } else if self.seqno == seqno && seqno == 0 { 32 | self.offset = offset; 33 | Ok(()) 34 | } else { 35 | Err(app_error!(dt DtStatus::ERROR_INVALID, format!( 36 | "BlockReadTracker: packet sequencing error: expected s={}, o={}, got s={}, o={}", 37 | self.seqno, self.offset, seqno, offset 38 | ))) 39 | } 40 | } 41 | 42 | #[inline] 43 | fn adjust_sequencing(&mut self, dlen: usize) { 44 | self.seqno += 1; 45 | self.offset += dlen as i64; 46 | } 47 | 48 | fn validate_checksums(&self, data: &[u8], checksums: &[u8]) -> Result<()> { 49 | if self.checksum.is_checksum_ok(data, checksums) { 50 | Ok(()) 51 | } else { 52 | Err(app_error!(dt DtStatus::ERROR_CHECKSUM, "BlockReadTracker: checksum error")) 53 | } 54 | } 55 | 56 | pub fn process_packet(&mut self, p: BlockDataPacket) -> Result { 57 | let (seqno, offset) = pb_decons!(PacketHeaderProto, p.header, seqno, offset_in_block); 58 | let _ = self.check_sequencing(seqno, offset)?; 59 | let _ = self.validate_checksums(&p.data, &p.checksum)?; 60 | self.adjust_sequencing(p.data.len()); 61 | Ok(crop_bytes(p.data, offset, self.read_range)) 62 | } 63 | 64 | pub fn get_success_status(&self) -> DtStatus { 65 | if self.checksum.is_trivial() { DtStatus::SUCCESS } else { DtStatus::CHECKSUM_OK } 66 | } 67 | } 68 | 69 | /// Crops `Bytes` so that it fits into a target range. 70 | /// `bs` is source `Bytes` where the source segment `[sl, sl + bs.len())` map, and `[rl, ru)` 71 | /// is the target (bounding) range. Returns cropped `bs` (a result of intersection of the source 72 | /// segment and the target range). The return value is empty if they do not intersect. 73 | fn crop_bytes(mut b: Bytes, sl: i64, (rl, ru): (i64, i64)) -> Bytes { 74 | let su = sl + b.len() as i64; 75 | if su <= ru { //SR 76 | if rl <= sl { //rsSR 77 | //sS 78 | b 79 | } else if su <= rl { //Sr => sSrR 80 | Bytes::new() 81 | } else { //srSR 82 | //rS: cut off initial r - s bytes 83 | b.advance((rl - sl) as usize); 84 | b 85 | } 86 | } else { //RS 87 | if ru <= sl { //Rs => rRsS 88 | Bytes::new() 89 | } else if rl <= sl { //rs => rsRS 90 | //sR: cut off trailing bytes to the length R - s 91 | b.truncate((ru - sl) as usize); 92 | b 93 | } else { //srRS 94 | //rR: advance by r - s bytes and truncate to R - r bytes 95 | b.advance((rl - sl) as usize); 96 | b.truncate((ru - rl) as usize); 97 | b 98 | } 99 | } 100 | } 101 | 102 | #[test] 103 | fn test_crop_bytes() { 104 | let sl = 10; //,20) 105 | let b = Bytes::from_static(&[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]); 106 | 107 | macro_rules! asrt { 108 | ($b:expr, $rlru:expr => $($n:expr),+) => { assert_eq!(crop_bytes($b.clone(), sl, $rlru), Bytes::from_static(&[$($n),+])); }; 109 | ($b:expr, $rlru:expr => ) => { assert_eq!(crop_bytes($b.clone(), sl, $rlru), Bytes::new()); }; 110 | } 111 | 112 | asrt!(b, (0, 30) => 10, 11, 12, 13, 14, 15, 16, 17, 18, 19); 113 | asrt!(b, (0, 20) => 10, 11, 12, 13, 14, 15, 16, 17, 18, 19); 114 | asrt!(b, (10, 30) => 10, 11, 12, 13, 14, 15, 16, 17, 18, 19); 115 | asrt!(b, (10, 20) => 10, 11, 12, 13, 14, 15, 16, 17, 18, 19); 116 | asrt!(b, (20, 30) => ); 117 | asrt!(b, (0, 10) => ); 118 | asrt!(b, (15, 25) => 15, 16, 17, 18, 19); 119 | asrt!(b, (5, 15) => 10, 11, 12, 13, 14); 120 | asrt!(b, (11, 12) => 11); 121 | asrt!(b, (11, 13) => 11, 12); 122 | 123 | let b = Bytes::from_static(&[10]); 124 | 125 | asrt!(b, (0, 30) => 10); 126 | asrt!(b, (10, 11) => 10); 127 | asrt!(b, (20, 30) => ); 128 | asrt!(b, (0, 10) => ); 129 | asrt!(b, (15, 25) => ); 130 | asrt!(b, (5, 15) => 10); 131 | asrt!(b, (11, 12) => ); 132 | asrt!(b, (11, 13) => ); 133 | 134 | let b = Bytes::new(); 135 | asrt!(b, (0, 10) => ); 136 | 137 | } -------------------------------------------------------------------------------- /rhdfs/src/dt/write_streamer.rs: -------------------------------------------------------------------------------- 1 | use *; 2 | use bytes::*; 3 | use super::{ 4 | packet::BlockDataPacket, 5 | checksum::{ChecksumValidator} 6 | }; 7 | 8 | #[derive(Debug)] 9 | pub struct WriteFramer { 10 | checksum: Box, 11 | max_offset: usize, 12 | packet_len: usize, 13 | seqno: i64, 14 | offset: usize 15 | } 16 | 17 | impl WriteFramer { 18 | pub fn new(checksum: Box, block_len: usize, packet_len: usize) -> WriteFramer { 19 | WriteFramer { 20 | checksum, 21 | max_offset: block_len, 22 | packet_len, 23 | seqno: 0, 24 | offset: 0, 25 | } 26 | } 27 | 28 | pub fn process_packet(&mut self, data: Bytes) -> Result { 29 | fn checked_conv64(a: usize) -> Result { 30 | if a > std::i64::MAX as usize { 31 | Err(app_error!(other "usize -> i64 conversion overflow")) 32 | } else { 33 | Ok(a as i64) 34 | } 35 | } 36 | fn checked_conv32(a: usize) -> Result { 37 | if a > std::i32::MAX as usize { 38 | Err(app_error!(other "usize -> i32 conversion overflow")) 39 | } else { 40 | Ok(a as i32) 41 | } 42 | } 43 | 44 | let dlen = data.len(); 45 | 46 | if dlen != self.packet_len && self.offset + dlen != self.max_offset { 47 | Err(app_error!(other "Short packet not at the end of file")) 48 | } else if self.offset + dlen > self.max_offset { 49 | Err(app_error!(other "Excess packet")) 50 | } else { 51 | let checksum = Bytes::from(self.checksum.eval(&data)); 52 | let header = pb_cons!(PacketHeaderProto, 53 | offset_in_block: checked_conv64(self.offset)?, 54 | seqno: self.seqno, 55 | last_packet_in_block: false, 56 | data_len: checked_conv32(dlen)? 57 | ); 58 | self.seqno = self.seqno.checked_add(1).ok_or_else(|| app_error!(other "WriteStreamer: Sequence number overflow"))?; 59 | self.offset = self.offset.checked_add(dlen).ok_or_else(|| app_error!(other "WriteStreamer: offset overflow"))?; 60 | Ok(BlockDataPacket { header, checksum, data }) 61 | } 62 | } 63 | 64 | } 65 | 66 | #[derive(Debug)] 67 | pub struct WriteStreamer { 68 | f: WriteFramer 69 | } 70 | 71 | impl WriteStreamer { 72 | pub fn new(checksum: Box, block_len: usize, packet_len: usize) -> WriteStreamer { 73 | WriteStreamer { f: WriteFramer::new(checksum, block_len, packet_len) } 74 | } 75 | 76 | /* 77 | pub fn push(&mut self, data: Bytes) -> Result<()> { 78 | unimplemented!() 79 | } 80 | 81 | pub fn pull(&mut self) -> Option { 82 | unimplemented!() 83 | } 84 | 85 | pub fn ack(&mut self, ack: PipelineAckProto) -> Result<()> { 86 | unimplemented!() 87 | } 88 | */ 89 | pub fn push_paused(&self) -> bool { 90 | unimplemented!() 91 | } 92 | /* 93 | /// Returns `Ok(false)` to keep running, `Ok(true)` on success, `Err(e)` otherwise 94 | pub fn process_ack(ack: PipelineAckProto) -> Result { 95 | //analyze seqno (must be acked seqno + 1) 96 | //analyse statuses (remove failed nodes and resume pipeline) 97 | unimplemented!() 98 | }*/ 99 | } 100 | 101 | 102 | /* 103 | use std::io::Read; 104 | use std::fmt; 105 | use std::fmt::Debug; 106 | 107 | use super::codec::{DtReq, DtRsp, OpBlockWriteMessage}; 108 | use super::packet::BlockDataPacket; 109 | use super::checksum::*; 110 | use super::*; 111 | use protobuf_api::*; 112 | use *; 113 | 114 | /// State of block writer 115 | #[derive(Debug)] 116 | pub struct BlockWriteState { 117 | pub r: R, 118 | ///Write position inside the block: the position written so far, not including any unacked data 119 | pub write_position: i64 120 | } 121 | 122 | impl BlockWriteState { 123 | fn new(r: R, write_position: i64) -> BlockWriteState { 124 | BlockWriteState { r, write_position } 125 | } 126 | } 127 | 128 | pub struct BlockWriteTracker { 129 | r: R, 130 | c: Box, 131 | seqno: i64, 132 | offset: i64 133 | } 134 | 135 | impl Debug for BlockWriteTracker { 136 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 137 | fmt.debug_struct("BlockWriteTracker") 138 | .field("c", &self.c) 139 | .field("seqno", &self.seqno) 140 | .field("offset", &self.offset) 141 | .finish() 142 | } 143 | } 144 | /// The return value type for `next_packet` 145 | enum PacketResult { 146 | /// There is a packet to be sent out 147 | Packet(BlockDataPacket), 148 | /// There is no packet to send (right now or forever). Some asks aren't received 149 | Acking, 150 | /// The work is done 151 | Complete, 152 | /// An error occurred 153 | Err(Error) 154 | 155 | } 156 | 157 | impl BlockWriteTracker { 158 | fn new(BlockWriteState { r, write_position }: BlockWriteState, c: Box) -> BlockWriteTracker { 159 | BlockWriteTracker { 160 | r, c, 161 | seqno: 0, 162 | offset: write_position 163 | } 164 | } 165 | 166 | fn decons(self) -> BlockWriteState { 167 | BlockWriteState::new(self.r, self.offset) 168 | } 169 | 170 | fn next_packet(self) -> (PacketResult, Self) { 171 | unimplemented!() 172 | } 173 | 174 | fn ack(self, ack: PipelineAckProto) -> (PacketResult, Self) { 175 | unimplemented!() 176 | } 177 | } 178 | 179 | fn build_block_write_tracker(csp: ChecksumProto, bws: BlockWriteState) -> BlockWriteTracker { 180 | BlockWriteTracker::new(bws, new_checksum(csp)) 181 | } 182 | 183 | 184 | #[derive(Debug)] 185 | pub enum WriteBlock { 186 | Init(OpWriteBlockProto, BlockWriteState), 187 | ResponseWait(BlockWriteState, ChecksumProto), 188 | Packet(BlockWriteTracker), 189 | End(BlockWriteState) 190 | } 191 | 192 | 193 | impl WriteBlock where 194 | R: Read + Send + Debug + 'static { 195 | pub fn new(h: BaseHeaderProto, 196 | targets: Vec, 197 | storage_types: Vec, 198 | r: R) -> Self { 199 | let tlen = targets.len() as u32; 200 | let st = storage_types[0]; 201 | 202 | //DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum" 203 | //DFS_BYTES_PER_CHECKSUM_DEFAULT = 512 204 | //DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type" 205 | //DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C" 206 | 207 | let owbp = pb_cons!(OpWriteBlockProto, 208 | header: pb_cons!(ClientOperationHeaderProto, 209 | //client_name: client_name.to_owned(), 210 | base_header: h 211 | ), 212 | targets: targets, 213 | stage: OpWriteBlockProto_BlockConstructionStage::PIPELINE_SETUP_CREATE, 214 | pipeline_size: tlen, 215 | min_bytes_rcvd: 0, 216 | max_bytes_rcvd: 0, 217 | latest_generation_stamp: 0, 218 | requested_checksum: pb_cons!(ChecksumProto, 219 | type: ChecksumTypeProto::CHECKSUM_CRC32C, 220 | bytes_per_checksum: 512), 221 | 222 | storage_type: st, 223 | target_storage_types: storage_types 224 | 225 | //caching_strategy, 226 | //allow_lazy_persist 227 | ); 228 | WriteBlock::Init(owbp, BlockWriteState::new(r, 0 as i64)) 229 | } 230 | pub fn state(self) -> BlockWriteState { 231 | match self { 232 | WriteBlock::Init(_, bws) | 233 | WriteBlock::ResponseWait(bws, _) | 234 | WriteBlock::End(bws) => 235 | bws, 236 | WriteBlock::Packet(bwt) => 237 | bwt.decons() 238 | } 239 | } 240 | 241 | fn packet_result((r, bwt): (PacketResult, BlockWriteTracker)) -> (ProtocolFsmResult, Self) { 242 | match r { 243 | PacketResult::Packet(packet) => 244 | pfsm!(send(DtReq::Packet(packet)) waiting / goto WriteBlock::Packet(bwt)), 245 | PacketResult::Acking => 246 | pfsm!(wait / goto WriteBlock::Packet(bwt)), 247 | PacketResult::Complete => 248 | pfsm!(return success / goto WriteBlock::End(bwt.decons())), 249 | PacketResult::Err(e) => 250 | pfsm!(return error(e) / goto WriteBlock::End(bwt.decons())), 251 | } 252 | } 253 | } 254 | 255 | impl ProtocolFsm for WriteBlock where 256 | R: Read + Send + Debug + 'static { 257 | fn idle(self) -> (ProtocolFsmResult, Self) { 258 | use self::WriteBlock as S; 259 | match self { 260 | S::Init(owbp, w) => { 261 | let csp = pb_get!(OpWriteBlockProto, owbp, requested_checksum).clone(); 262 | pfsm!(send(DtReq::WriteBlock(owbp)) / goto WriteBlock::ResponseWait(w, csp)) 263 | }, 264 | S::Packet(bwt) => 265 | S::packet_result(bwt.next_packet()), 266 | s @ S::ResponseWait(..) | 267 | s @ S::End(..) => 268 | pfsm!(wait / goto s) 269 | //pfsm!(return error(app_error!(other "Invalid s/e {:?}/idle", s))/ goto s) 270 | } 271 | } 272 | 273 | fn incoming(self, rsp: DtRsp) -> (ProtocolFsmResult, Self) { 274 | use self::WriteBlock as S; 275 | match (self, rsp) { 276 | (S::ResponseWait(bws, csp), DtRsp::WriteBlock(OpBlockWriteMessage::Initial(has_data, borp))) => 277 | if has_data { 278 | S::packet_result(build_block_write_tracker(csp, bws).next_packet()) 279 | } else { 280 | let (s, m) = pb_decons!(BlockOpResponseProto, borp, status, message); 281 | pfsm!(return error(app_error!(dt s, m)) / goto S::End(bws)) 282 | }, 283 | (S::Packet(bwt), DtRsp::WriteBlock(OpBlockWriteMessage::Ack(pkt))) => 284 | Self::packet_result(bwt.ack(pkt)), 285 | //abnormal conditions 286 | (S::Init(_, bws), ev) => 287 | pfsm!(return error(app_error!(other "Unexpected s/e Init/{:?}", ev)) / goto S::End(bws)), 288 | (S::Packet(bwt), ev) => 289 | pfsm!(return error(app_error!(other "Unexpected s/e Packet/{:?}", ev)) / goto S::End(bwt.decons())), 290 | (S::ResponseWait(bws, _), ev) => 291 | pfsm!(return error(app_error!(other "Unexpected s/e ResponseWait/{:?}", ev)) / goto S::End(bws)), 292 | (S::End(bws), ev) => 293 | pfsm!(return error(app_error!(other "Unexpected s/e End/{:?}", ev)) / goto S::End(bws)) 294 | } 295 | } 296 | } 297 | */ -------------------------------------------------------------------------------- /rhdfs/src/error.rs: -------------------------------------------------------------------------------- 1 | 2 | use std::fmt::{Display, Formatter, Result}; 3 | pub use std::borrow::Cow; 4 | use protobuf::ProtobufError; 5 | use protocolpb::proto::hdfs::datatransfer::Status as DtStatus; 6 | use protocolpb::proto::hadoop::RpcHeader::{ 7 | RpcResponseHeaderProto_RpcStatusProto as RpcStatusProto, 8 | RpcResponseHeaderProto_RpcErrorCodeProto as RpcErrorCodeProto 9 | }; 10 | use std::io::ErrorKind; 11 | use *; 12 | 13 | #[derive(Debug)] 14 | pub enum Error { 15 | Protobuf(ProtobufError), 16 | IO(IoError), 17 | RPC { 18 | protocol: String, 19 | status: RpcStatusProto, 20 | error_detail: RpcErrorCodeProto, 21 | error_msg: String, 22 | exception_class_name: String 23 | }, 24 | ShortBuffer(usize), 25 | Codec(Cow<'static, str>), 26 | Namenode(Cow<'static, str>), 27 | DataTransfer(DtStatus, String), 28 | Other(Cow<'static, str>) 29 | } 30 | 31 | 32 | #[macro_export] 33 | macro_rules! app_error { 34 | {codec $e:expr, $($es:expr),+} => { Error::Codec(Cow::from(format!($e, $($es),+))) }; 35 | {codec $e:expr} => { Error::Codec(Cow::from($e)) }; 36 | {nn $e:expr, $($es:expr),+} => { Error::Namenode(Cow::from(format!($e, $($es),+))) }; 37 | {nn $e:expr} => { Error::Namenode(Cow::from($e)) }; 38 | {dt $s:expr, $m:expr} => { Error::DataTransfer($s, $m.to_owned()) }; 39 | {unreachable} => { Error::Other(Cow::from("got to an unreachable point in code")) }; 40 | {premature eof} => { Error::Other(Cow::from(format!("premature end of input stream in {} {} {}", module_path!(), file!(), line!()))) }; 41 | {other $e:expr} => { Error::Other(Cow::from($e)) }; 42 | {other $($es:expr),+} => { Error::Other(Cow::from(format!($($es),+))) }; 43 | } 44 | 45 | impl StdError for Error { 46 | fn description(&self) -> &str { 47 | match self { 48 | &Error::Protobuf(ref pe) => pe.description(), 49 | &Error::IO(ref io) => io.description(), 50 | &Error::RPC { ref error_msg, protocol:_, status: _, error_detail: _, exception_class_name: _ } => error_msg, 51 | &Error::ShortBuffer(_) => "Buffer short", 52 | &Error::Codec(ref s) => s, 53 | &Error::Namenode(ref s) => s, 54 | &Error::DataTransfer(_, ref s) => s, 55 | &Error::Other(ref s) => s 56 | } 57 | } 58 | 59 | fn cause(&self) -> Option<&StdError> { 60 | match self { 61 | &Error::Protobuf(ref pe) => pe.cause(), 62 | &Error::IO(ref io) => io.cause(), 63 | &Error::RPC { error_msg: _, protocol:_, status: _, error_detail: _, exception_class_name: _} | 64 | &Error::ShortBuffer(..) | 65 | &Error::Codec(..) | 66 | &Error::Namenode(..) | 67 | &Error::DataTransfer(..) | 68 | &Error::Other(..) 69 | => None 70 | } 71 | } 72 | } 73 | 74 | impl Display for Error { 75 | fn fmt(&self, f: &mut Formatter) -> Result { 76 | match self { 77 | &Error::Protobuf(ref pe) => 78 | write!(f, "ProtobufError: {}", pe), 79 | &Error::IO(ref io) => 80 | write!(f, "IoError: {}", io), 81 | &Error::RPC { ref protocol, ref status, ref error_detail, ref error_msg, ref exception_class_name } => 82 | write!(f, "RpcError(protocol={}, status={:?}, error_detail={:?}, error_msg={}, exception_class_name={})", 83 | protocol, status, error_detail, error_msg, exception_class_name), 84 | &Error::ShortBuffer(n) => 85 | write!(f, "Buffer short by({})", n), 86 | &Error::Codec(ref s) => 87 | write!(f, "CodecError: {}", s), 88 | &Error::Namenode(ref m) => 89 | write!(f, "NameNodeError: `{}`", m), 90 | &Error::DataTransfer(ref s, ref m) => 91 | write!(f, "DataTransferError: {:?} `{}`", s, m), 92 | &Error::Other(ref s) => 93 | write!(f, "Error: {}", s) 94 | } 95 | } 96 | } 97 | 98 | impl From for Error { 99 | fn from(e: ProtobufError) -> Self { 100 | Error::Protobuf(e) 101 | } 102 | } 103 | 104 | impl From for Error { 105 | fn from(e: IoError) -> Self { 106 | Error::IO(e) 107 | } 108 | } 109 | 110 | impl From for IoError { 111 | fn from(e: Error) -> Self { 112 | match e { 113 | Error::IO(io) => io, 114 | other => IoError::new(ErrorKind::Other, other) 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /rhdfs/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Rust native HDFS client 2 | 3 | #[macro_use] extern crate log; 4 | 5 | extern crate futures; 6 | extern crate tokio_io; 7 | extern crate tokio_tcp; 8 | extern crate protobuf; 9 | extern crate byteorder; 10 | extern crate bytes; 11 | extern crate crc; 12 | extern crate uuid; 13 | extern crate rand; 14 | 15 | extern crate protocolpb; 16 | 17 | mod types; 18 | #[macro_use] mod util; 19 | #[macro_use] mod error; 20 | #[macro_use] mod protobuf_api; 21 | mod result; 22 | mod codec_tools; 23 | #[macro_use] mod proto_tools; 24 | mod dt; 25 | mod nn; 26 | mod cmdx; 27 | 28 | mod op; 29 | 30 | pub mod hdfs; 31 | pub mod config; 32 | 33 | pub use cmdx::SessionData; 34 | pub use types::*; 35 | pub use error::*; 36 | pub use result::*; 37 | pub use util::*; 38 | pub use protobuf_api::*; 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /rhdfs/src/nn/mod.rs: -------------------------------------------------------------------------------- 1 | //! Namenode protocol implementation (standard port 8020) 2 | 3 | mod codec; 4 | mod proto; 5 | 6 | pub use self::proto::{Connection_, NnaQ, NnaR}; 7 | pub use self::proto::Connection; 8 | pub use self::codec::{NnQ, NnR}; 9 | -------------------------------------------------------------------------------- /rhdfs/src/op/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod read_listing; 2 | 3 | use proto_tools::*; 4 | use nn; 5 | use *; 6 | use std::net::SocketAddr; 7 | use futures::prelude::*; 8 | 9 | #[derive(Clone, Debug)] 10 | pub struct NNCD { 11 | pub addr: SocketAddr, 12 | pub eff_user: String, 13 | pub client_id: Option> 14 | } 15 | 16 | impl Into> for NNCD { 17 | fn into(self) -> BFI { 18 | match self.client_id { 19 | Some(client_id) => nn::Connection::connect_det(&self.addr, client_id, self.eff_user), 20 | None => nn::Connection::connect(&self.addr, self.eff_user), 21 | } 22 | } 23 | } 24 | 25 | pub type NNCQ = CQ; 26 | //pub type NNChannel = AutoChannel; 27 | 28 | pub struct NNChannel { 29 | inner: AutoChannel 30 | } 31 | impl NNChannel { 32 | pub fn new(cdata: NNCD, timeout: Duration) -> NNChannel { 33 | NNChannel { inner: AutoChannel::new(cdata, timeout) } 34 | } 35 | } 36 | impl Stream for NNChannel { 37 | type Item = nn::NnR; 38 | type Error = Error; 39 | 40 | fn poll(&mut self) -> Result>> { 41 | self.inner.poll() 42 | } 43 | } 44 | 45 | impl Sink for NNChannel { 46 | type SinkItem = nn::NnQ; 47 | type SinkError = Error; 48 | 49 | fn start_send(&mut self, item: nn::NnQ) -> Result> { 50 | self.inner.start_send(item) 51 | } 52 | 53 | fn poll_complete(&mut self) -> Result> { 54 | self.inner.poll_complete() 55 | } 56 | 57 | fn close(&mut self) -> Result> { 58 | self.inner.close() 59 | } 60 | } 61 | 62 | 63 | pub use self::read_listing::GetListing; 64 | pub fn get_listing(c: NNChannel, source: String, need_location: bool) -> GetListing { 65 | GetListing::new(c, source, need_location) 66 | } -------------------------------------------------------------------------------- /rhdfs/src/op/read_listing.rs: -------------------------------------------------------------------------------- 1 | use proto_tools::*; 2 | use super::*; 3 | use *; 4 | use futures::Stream; 5 | 6 | 7 | #[derive(Debug)] 8 | pub struct GetListingOps { 9 | source: String, 10 | need_location: bool, 11 | } 12 | 13 | impl GetListingOps { 14 | fn new(source: String, need_location: bool) -> GetListingOps { 15 | GetListingOps { source, need_location } 16 | } 17 | 18 | #[inline] 19 | fn q(&self, start: Vec) -> GetListingRequestProto { 20 | pb_cons!(GetListingRequestProto, 21 | src: self.source.clone(), 22 | start_after: start, 23 | need_location: self.need_location 24 | ) 25 | } 26 | 27 | fn s(&self) -> GetListingRequestProto { 28 | self.q(vec![]) 29 | } 30 | 31 | fn n(&mut self, nr: GetListingResponseProto) -> (Vec, Option) { 32 | let dir_list = pb_decons!(GetListingResponseProto, nr, dir_list); 33 | let (fs, remaining_entries) = pb_decons!(DirectoryListingProto, dir_list, 34 | partial_listing, remaining_entries); 35 | 36 | if remaining_entries == 0 { 37 | (fs, None) 38 | } else { 39 | let last_filename = Vec::from( 40 | fs.last().map(|o| pb_get!(HdfsFileStatusProto, o, path)).unwrap_or(&[]) 41 | ); 42 | (fs, Some(self.q(last_filename))) 43 | } 44 | } 45 | } 46 | 47 | 48 | #[derive(Debug)] 49 | pub struct GetListingState { 50 | ops: GetListingOps, 51 | s: Option 52 | } 53 | 54 | impl GetListingState { 55 | fn new(source: String, need_location: bool) -> GetListingState { 56 | let ops = GetListingOps::new(source, need_location); 57 | let s = Some(ops.s()); 58 | GetListingState { ops, s } 59 | } 60 | 61 | } 62 | 63 | impl StreamProtocolFsm for GetListingState { 64 | type R = (Vec, bool); 65 | type LQ = nn::NnQ; 66 | type LR = nn::NnR; 67 | 68 | fn get_downstream(&mut self) -> Result> { 69 | match self.s.take() { 70 | Some(glrp) => Ok(SendReq::EnqueueAndFlush(nn::NnQ::GetListing(glrp))), 71 | None => Ok(SendReq::NOP) 72 | } 73 | } 74 | 75 | fn handle_upstream(&mut self, lr: Option) -> Result, bool)>>> { 76 | match lr { 77 | Some(nn::NnR::GetListing(glrp)) => { 78 | let (r, s) = self.ops.n(glrp); 79 | self.s = s; 80 | Ok(Async::Ready(Some((r, self.s.is_some())))) 81 | } 82 | None => 83 | Err(app_error!(other "GetListingState: Premature EOS")), 84 | other => 85 | Err(app_error!(other "GetListingState: invalid response {:?}", other)) 86 | } 87 | } 88 | } 89 | 90 | pub struct GetListing { 91 | s: StreamProtocol, 92 | eos: bool 93 | } 94 | 95 | impl GetListing { 96 | pub fn new(c: NNChannel, source: String, need_location: bool) -> GetListing { 97 | GetListing { 98 | s: StreamProtocol::new(c, GetListingState::new(source, need_location)), 99 | eos: false 100 | } 101 | } 102 | pub fn into_inner(self) -> NNChannel { 103 | self.s.into_parts().0 104 | } 105 | } 106 | 107 | impl Stream for GetListing { 108 | type Item = Vec; 109 | type Error = Error; 110 | 111 | fn poll(&mut self) -> Result>>> { 112 | if self.eos { 113 | Ok(Async::Ready(None)) 114 | } else { 115 | match self.s.poll()? { 116 | Async::Ready(Some((v, more))) => { 117 | self.eos = !more; 118 | Ok(Async::Ready(Some(v))) 119 | } 120 | Async::Ready(None) => 121 | Err(app_error!(other "GetListing: premature EOS")), 122 | Async::NotReady => 123 | Ok(Async::NotReady) 124 | } 125 | } 126 | } 127 | } 128 | 129 | 130 | #[test] 131 | fn test_get_listing() { 132 | init_env_logger!(); 133 | 134 | use util::test::ptk::*; 135 | let host_port = "127.0.0.1:58019"; 136 | let t = spawn_test_server(host_port, test_script! { 137 | 138 | expect "68:72:70:63:09:00:00:00:00:00:1e:10:08:02:10:00:18:05:22:08:01:02:03:04:04:03:02:01:\ 139 | 0c:12:0a:0a:08:63:6c:6f:75:64:65:72:61:00:00:00:58:10:08:02:10:00:18:00:22:08:01:02:03:04:04:\ 140 | 03:02:01:3e:0a:0a:67:65:74:4c:69:73:74:69:6e:67:12:2e:6f:72:67:2e:61:70:61:63:68:65:2e:68:61:\ 141 | 64:6f:6f:70:2e:68:64:66:73:2e:70:72:6f:74:6f:63:6f:6c:2e:43:6c:69:65:6e:74:50:72:6f:74:6f:63:\ 142 | 6f:6c:18:01:07:0a:01:2f:12:00:18:00", 143 | 144 | send "00:00:01:70:12:08:00:10:00:18:09:3a:08:01:02:03:04:04:03:02:01:40:01:db:02:0a:d8:02:0a:\ 145 | 3d:08:01:12:0a:62:65:6e:63:68:6d:61:72:6b:73:18:00:22:03:08:ff:03:2a:04:68:64:66:73:32:0a:73:\ 146 | 75:70:65:72:67:72:6f:75:70:38:e1:d7:df:d6:d5:2b:40:00:50:00:58:00:68:8e:80:01:70:00:80:01:00:\ 147 | 0a:39:08:01:12:05:68:62:61:73:65:18:00:22:03:08:ed:03:2a:05:68:62:61:73:65:32:0a:73:75:70:65:\ 148 | 72:67:72:6f:75:70:38:b4:be:e4:95:f7:2b:40:00:50:00:58:00:68:8d:80:01:70:09:80:01:00:0a:31:08:\ 149 | 01:12:04:73:6f:6c:72:18:00:22:03:08:ed:03:2a:04:73:6f:6c:72:32:04:73:6f:6c:72:38:e1:91:e8:d6:\ 150 | d5:2b:40:00:50:00:58:00:68:f9:81:01:70:00:80:01:00:0a:36:08:01:12:03:74:6d:70:18:00:22:03:08:\ 151 | ff:07:2a:04:68:64:66:73:32:0a:73:75:70:65:72:67:72:6f:75:70:38:eb:b2:ab:b4:97:2c:40:00:50:00:\ 152 | 58:00:68:84:80:01:70:05:80:01:00:0a:37:08:01:12:04:75:73:65:72:18:00:22:03:08:ed:03:2a:04:68:\ 153 | 64:66:73:32:0a:73:75:70:65:72:67:72:6f:75:70:38:b7:b5:e6:d6:d5:2b:40:00:50:00:58:00:68:82:80:\ 154 | 01:70:08:80:01:00:0a:36:08:01:12:03:76:61:72:18:00:22:03:08:ed:03:2a:04:68:64:66:73:32:0a:73:\ 155 | 75:70:65:72:67:72:6f:75:70:38:a4:f2:e5:d6:d5:2b:40:00:50:00:58:00:68:85:80:01:70:02:80:01:00:\ 156 | 10:00" 157 | }); 158 | 159 | use std::net::ToSocketAddrs; 160 | 161 | let addr = host_port.to_socket_addrs().unwrap().next().unwrap(); 162 | 163 | let nncd = NNCD { 164 | addr, 165 | eff_user: "cloudera".to_owned(), 166 | client_id: Some(vec![1, 2, 3, 4, 4, 3, 2, 1]) 167 | }; 168 | let c = NNChannel::new(nncd, Duration::new(30, 0)); 169 | 170 | let gls = get_listing(c, "/".to_owned(), false); 171 | 172 | let result: Vec = 173 | gls.map(|s| futures::stream::iter_ok::<_, Error>(s.into_iter())) 174 | .flatten() 175 | .collect() 176 | .wait() 177 | .expect("gls.wait()"); 178 | 179 | let y: Vec> = result.iter().map(|fs| String::from_utf8_lossy(fs.get_path())).collect(); 180 | let z: Vec> =(["benchmarks", "hbase", "solr", "tmp", "user", "var"]).iter().map(|x|Cow::from(*x)).collect(); 181 | assert_eq!(y, z); 182 | 183 | //----------------------------------- 184 | let _ = t.join().unwrap(); 185 | 186 | } 187 | -------------------------------------------------------------------------------- /rhdfs/src/result.rs: -------------------------------------------------------------------------------- 1 | use ::Error; 2 | use *; 3 | 4 | pub type Result = StdResult; 5 | 6 | 7 | pub trait ErrorConverter { 8 | fn c_err(self) -> T; 9 | } 10 | 11 | impl ErrorConverter> for Result where E: From { 12 | fn c_err(self) -> StdResult { 13 | self.map_err(|e| E::from(e)) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /rhdfs/src/types.rs: -------------------------------------------------------------------------------- 1 | pub use std::io::Error as IoError; 2 | pub use std::io::Result as IoResult; 3 | pub use std::error::Error as StdError; 4 | pub use std::result::Result as StdResult; 5 | 6 | use futures::Future; 7 | 8 | pub type BFI = Box + Send>; 9 | -------------------------------------------------------------------------------- /rhdfs/src/util.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::fmt; 4 | use std::fmt::Debug; 5 | 6 | 7 | /// Vector of `n` default values 8 | #[inline] 9 | pub fn vector_of_size(n: usize) -> Vec { 10 | let mut v = Vec::with_capacity(n); 11 | v.resize(n, T::default()); 12 | v 13 | } 14 | 15 | /* 16 | /// ??? Discard contents of `v` and make it a vector of `n` default values 17 | #[inline] 18 | pub fn recycle_to_vector_of_size(mut v: Vec, n: usize) -> Vec { 19 | v.resize(n, T::default()); 20 | v.shrink_to_fit(); 21 | v 22 | } 23 | /// `resize` followed by `shrink_to_fit` 24 | #[inline] 25 | pub fn to_vector_of_size(v: &mut Vec, n: usize) -> () { 26 | v.resize(n, T::default()); 27 | v.shrink_to_fit(); 28 | } 29 | */ 30 | 31 | 32 | #[inline] 33 | pub fn vec_cons(mut v: Vec, t: T) -> Vec { 34 | v.push(t); 35 | v 36 | } 37 | 38 | #[inline] 39 | pub fn vec_cons_opt(mut v: Vec, t: Option) -> Vec { 40 | t.map(|t| v.push(t)); 41 | v 42 | } 43 | 44 | #[inline] 45 | pub fn vec_plus(mut v: Vec, mut t: Vec) -> Vec { 46 | v.append(&mut t); 47 | v 48 | } 49 | 50 | 51 | /* 52 | pub trait LazyMonoid { 53 | fn lazy_plus(self, F) -> T where F: FnOnce() -> T; 54 | } 55 | 56 | impl LazyMonoid, E>> for Result, E> { 57 | fn lazy_plus(self, t: F) -> Result, E> where F: FnOnce() -> Result, E> { 58 | match self { 59 | r @ Ok(Some(..)) | r @ Err(..) => r, 60 | Ok(None) => t() 61 | } 62 | } 63 | } 64 | */ 65 | 66 | 67 | /// Opt-State-Delta-and-Value 68 | #[derive(Debug)] 69 | pub enum SnV { 70 | SV(S, V), 71 | V(V) 72 | } 73 | 74 | /// Handles `SnV` 75 | #[inline] 76 | pub fn switch_state_v(s: &mut S, sr: SnV) -> V { 77 | match sr { 78 | SnV::SV(ns, r) => { *s = ns; r } 79 | SnV::V(r) => r 80 | } 81 | } 82 | /// Handles `SnV`, plus trace 83 | #[inline] 84 | pub fn switch_state_vt(tgt: &'static str, s: &mut S, sr: SnV) -> V { 85 | trace!(target: tgt, "switch-state: {:?} => {:?}", s, sr); 86 | switch_state_v(s, sr) 87 | } 88 | 89 | /// Generates (via `f`) and handles `SnV` 90 | #[inline] 91 | pub fn switch_state(s: &mut S, f: F) -> V where F: FnOnce(&mut S) -> SnV { 92 | let snv = f(s); 93 | switch_state_v(s, snv) 94 | } 95 | 96 | 97 | /// Generates (via `f`) and handles `SnV`, plus trace 98 | #[inline] 99 | pub fn switch_state_t(tgt: &'static str, s: &mut S, f: F) -> V where F: FnOnce(&mut S) -> SnV { 100 | let snv = f(s); 101 | switch_state_vt(tgt, s, snv) 102 | } 103 | 104 | //FSM handling primitives 105 | 106 | ///Either switch to a (S)tate or return a (V)alue 107 | #[derive(Debug)] 108 | pub enum SV { 109 | S(S), 110 | V(V) 111 | } 112 | 113 | #[inline] 114 | pub fn fsm_turn(s: &mut S, mut f: F) -> V where F: FnMut(&mut S) -> SV { 115 | loop { 116 | let sv = f(s); 117 | match sv { 118 | SV::S(ns) => *s = ns, 119 | SV::V(v) => break v 120 | } 121 | } 122 | } 123 | 124 | #[inline] 125 | pub fn fsm_turn_t(tgt: &'static str, s: &mut S, mut f: F) -> V where F: FnMut(&mut S) -> SV { 126 | loop { 127 | let sv = f(s); 128 | trace!(target: tgt, "fsm_turn: {:?} => {:?}", s, sv); 129 | match sv { 130 | SV::S(ns) => *s = ns, 131 | SV::V(v) => break v 132 | } 133 | } 134 | } 135 | 136 | #[macro_export] 137 | macro_rules! fsm_turn_tm { 138 | ($s:expr, $f:expr) => { 139 | loop { 140 | let sv = $f; 141 | trace!("fsm_turn(m): {:?} => {:?}", $s, sv); 142 | match sv { 143 | SV::S(ns) => $s = ns, 144 | SV::V(v) => break v 145 | } 146 | } 147 | }; 148 | } 149 | 150 | #[derive(Debug)] 151 | pub enum SxV { 152 | S(S), 153 | V(V), 154 | SV(S, V) 155 | } 156 | 157 | #[inline] 158 | pub fn fsm_turn_x(s: &mut S, mut f: F) -> V where F: FnMut(&mut S) -> SxV { 159 | loop { 160 | let sv = f(s); 161 | match sv { 162 | SxV::S(ns) => *s = ns, 163 | SxV::V(v) => break v, 164 | SxV::SV(ns, v) => { *s = ns; break v } 165 | } 166 | } 167 | } 168 | 169 | #[inline] 170 | pub fn fsm_turn_xt(tgt: &'static str, s: &mut S, mut f: F) -> V where F: FnMut(&mut S) -> SxV { 171 | loop { 172 | let sv = f(s); 173 | trace!(target: tgt, "fsm_turn_x: {:?} => {:?}", s, sv); 174 | match sv { 175 | SxV::S(ns) => *s = ns, 176 | SxV::V(v) => break v, 177 | SxV::SV(ns, v) => { *s = ns; break v } 178 | } 179 | } 180 | } 181 | 182 | /// Compact binary writer 183 | /// If a slice is longer than `T` bytes, writes only `LH` initial bytes followed by 184 | /// omitted/total bytes count and trailing `LT` bytes 185 | /// Also writes string representation along with byte dump 186 | pub struct CBinary<'a>(pub &'a [u8]); 187 | 188 | impl<'a> CBinary<'a> { 189 | const T: usize = 36; 190 | const LH: usize = 16; 191 | const LT: usize = 16; 192 | } 193 | 194 | impl<'a> fmt::Debug for CBinary<'a> { 195 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 196 | ::fmt(self, fmt) 197 | } 198 | } 199 | 200 | impl<'a> fmt::Display for CBinary<'a> { 201 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 202 | fn cw(a: &[u8], fmt: &mut fmt::Formatter) -> fmt::Result { 203 | write!(fmt, "b\"")?; 204 | for &c in a { 205 | // https://doc.rust-lang.org/reference.html#byte-escapes 206 | if c == b'\n' { 207 | write!(fmt, "\\n")?; 208 | } else if c == b'\r' { 209 | write!(fmt, "\\r")?; 210 | } else if c == b'\t' { 211 | write!(fmt, "\\t")?; 212 | } else if c == b'\\' || c == b'"' { 213 | write!(fmt, "\\{}", c as char)?; 214 | } else if c == b'\0' { 215 | write!(fmt, "\\0")?; 216 | // ASCII printable 217 | } else if c >= 0x20 && c < 0x7f { 218 | write!(fmt, "{}", c as char)?; 219 | } else { 220 | write!(fmt, "\\x{:02x}", c)?; 221 | } 222 | } 223 | write!(fmt, "\"")?; 224 | Ok(()) 225 | } 226 | 227 | fn xw(a: &[u8], fmt: &mut fmt::Formatter) -> fmt::Result { 228 | for byte in a { 229 | write!(fmt, "{:02x} ", byte)?; 230 | } 231 | Ok(()) 232 | } 233 | 234 | let l = self.0.len(); 235 | let (a, b) = if l <= CBinary::T { 236 | (self.0, None) 237 | } else { 238 | (&self.0[..CBinary::LH], Some(( 239 | l - CBinary::LH - CBinary::LT, 240 | &self.0[l - CBinary::LT..] 241 | ))) 242 | }; 243 | 244 | write!(fmt, "[")?; 245 | cw(a, fmt)?; 246 | write!(fmt, " ")?; 247 | xw(a, fmt)?; 248 | if let Some((c, b)) = b { 249 | write!(fmt, "<{}/{} bytes> ", c, l)?; 250 | cw(b, fmt)?; 251 | write!(fmt, " ")?; 252 | xw(b, fmt)?; 253 | } 254 | write!(fmt, "]")?; 255 | Ok(()) 256 | } 257 | } 258 | 259 | #[inline] 260 | pub fn trace_arg_result(comment: &'static str, a: A, f: impl Fn(A) -> R) -> R { 261 | use log::Level::Trace; 262 | if log_enabled!(Trace) { 263 | let s_a = format!("{:?}", a); 264 | let rv = f(a); 265 | trace!("{}: {} -> {:?}", comment, s_a, rv); 266 | rv 267 | } else { 268 | f(a) 269 | } 270 | } 271 | 272 | 273 | macro_rules! trace_ar { 274 | { $comment:tt: ($a:expr) $r:expr } => { { 275 | use log::Level::Trace; 276 | if log_enabled!(Trace) { 277 | let s_a = format!("{:?}", $a); 278 | let rv = $r; 279 | trace!("{}: {} -> {:?}", $comment, s_a, rv); 280 | rv 281 | } else { 282 | $r 283 | } 284 | } }; 285 | { $comment:tt: $s:expr => ($a:expr) $r:expr } => { { 286 | use log::Level::Trace; 287 | if log_enabled!(Trace) { 288 | let (s_s, s_a) = (format!("{:?}", $s), format!("{:?}", $a)); 289 | let rv = $r; 290 | trace!("{}: {}/{} -> {:?}/{:?}", $comment, s_s, s_a, rv, $s); 291 | rv 292 | } else { 293 | $r 294 | } 295 | } }; 296 | } 297 | 298 | 299 | 300 | //---------------- 301 | 302 | #[cfg(test)] 303 | #[macro_use] 304 | pub mod test { 305 | use std::fmt::{Display, Debug, Formatter, Result}; 306 | 307 | macro_rules! init_env_logger { 308 | () => { { 309 | extern crate env_logger; 310 | let _ = env_logger::try_init(); 311 | } }; 312 | } 313 | 314 | pub trait ToBytes { 315 | fn to_bytes(&self) -> Vec; 316 | } 317 | 318 | impl ToBytes for str { 319 | fn to_bytes(&self) -> Vec { 320 | enum S { 321 | N, 322 | B(u8) 323 | }; 324 | 325 | let mut rv = Vec::new(); 326 | 327 | self.chars().fold(S::N, |s, ch| match (s, ch) { 328 | (S::N, c) if c.is_digit(16) => S::B(c.to_digit(16).unwrap() as u8), 329 | (S::B(b), c) if c.is_digit(16) => { 330 | rv.push((b << 4) | c.to_digit(16).unwrap() as u8); 331 | S::N 332 | }, 333 | (S::N, ':') => S::N, 334 | (S::N, c) if c == ' ' || c == '\x0a' || c == '\t' => S::N, 335 | _ => panic!("Invalid hex string") 336 | }); 337 | rv 338 | } 339 | } 340 | 341 | #[test] 342 | fn test_to_bytes() { 343 | assert_eq!("00:01:02".to_bytes(), vec![0x00u8, 0x01, 0x02]); 344 | assert_eq!(" 345 | 00: 346 | 01: 347 | 02: 348 | 03".to_bytes(), vec![0x00u8, 0x01, 0x02, 0x03]); 349 | 350 | assert_eq!(" 351 | 00 01 02 03 352 | 07 06 05 04".to_bytes(), 353 | vec![0x00u8, 0x01, 0x02, 0x03, 0x07, 0x06, 0x05, 0x04] 354 | ); 355 | } 356 | 357 | 358 | pub struct HexSlice<'a>(pub &'a [u8]); 359 | 360 | impl<'a> HexSlice<'a> { 361 | pub fn new(data: &'a T) -> HexSlice<'a> 362 | where T: ?Sized + AsRef<[u8]> + 'a 363 | { 364 | HexSlice(data.as_ref()) 365 | } 366 | } 367 | 368 | impl<'a> Display for HexSlice<'a> { 369 | fn fmt(&self, f: &mut Formatter) -> Result { 370 | for byte in self.0 { 371 | write!(f, "{:02x} ", byte)?; 372 | } 373 | Ok(()) 374 | } 375 | } 376 | impl<'a> Debug for HexSlice<'a> { 377 | fn fmt(&self, f: &mut Formatter) -> Result { 378 | ::fmt(self, f) 379 | } 380 | } 381 | 382 | #[macro_export] 383 | macro_rules! assert_enum_variant { 384 | ($e: expr, $p:pat) => { match $e { 385 | $p => (), 386 | other => panic!("assertion failed: expected {}, got {:?}", stringify!($p), other) 387 | }}; 388 | } 389 | 390 | 391 | /// Protocols Test Kit 392 | #[macro_use] 393 | pub mod ptk { 394 | use util::*; 395 | use util::test::*; 396 | use std::net::{TcpListener, TcpStream}; 397 | use std::thread; 398 | use std::io::{Read, Write}; 399 | 400 | /// Test Script Instruction 401 | pub enum TsInstr { 402 | Expect(&'static str), 403 | Send(&'static str) 404 | } 405 | 406 | macro_rules! test_script_cmd { 407 | { expect $v:expr } => { TsInstr::Expect($v) }; 408 | { send $v:expr } => { TsInstr::Send($v) }; 409 | } 410 | 411 | #[macro_export] 412 | macro_rules! test_script { 413 | { $($c:ident $v:expr),* } => { vec![$(test_script_cmd!($c $v)),*] }; 414 | } 415 | 416 | fn expect(conn: &mut TcpStream, e: Vec) { 417 | 418 | let mut v = vector_of_size(e.len()); 419 | let r = conn.read_exact(&mut v); 420 | trace!("ptk: expect: received {}", HexSlice(&v)); 421 | assert!(r.is_ok()); 422 | assert_eq!(v, e); 423 | } 424 | 425 | fn send(conn: &mut TcpStream, e: Vec) { 426 | let r = conn.write(&e); 427 | assert_eq!(r.ok(), Some(e.len())); 428 | } 429 | 430 | /// gets addr as `"host:port"` and test script 431 | pub fn spawn_test_server<'a>(bind_addr: &'a str, script: Vec) -> thread::JoinHandle<()> { 432 | let listener = TcpListener::bind(bind_addr).unwrap(); 433 | 434 | let t = thread::spawn(move || { 435 | let mut conn = listener.incoming().next().unwrap().unwrap(); 436 | 437 | for instr in script { 438 | match instr { 439 | TsInstr::Expect(s) => expect(&mut conn, s.to_bytes()), 440 | TsInstr::Send(s) => send(&mut conn, s.to_bytes()) 441 | } 442 | } 443 | }); 444 | 445 | t 446 | } 447 | } 448 | 449 | 450 | } 451 | 452 | 453 | --------------------------------------------------------------------------------