├── jar
├── jar.md5
└── chdfs_hadoop_plugin_network-3.7.jar
├── src
└── main
│ └── java
│ ├── chdfsHadoopPluginNetworkVersionInfo.properties
│ └── com
│ └── qcloud
│ └── chdfs
│ ├── fs
│ ├── FileSystemWithCleanerAndSSE.java
│ ├── FileSystemWithLockCleaner.java
│ ├── FileLockCleaner.java
│ ├── ServerSideEncryption.java
│ ├── RangerPermissionChecker.java
│ ├── AlreadyLoadedFileSystemInfo.java
│ ├── utils.java
│ ├── CHDFSDelegateFSAdapter.java
│ ├── CHDFSHadoopFileSystemJarLoader.java
│ └── CHDFSHadoopFileSystemAdapter.java
│ └── permission
│ └── RangerAccessType.java
├── .gitignore
├── README.md
├── LICENSE
└── pom.xml
/jar/jar.md5:
--------------------------------------------------------------------------------
1 | MD5 (chdfs_hadoop_plugin_network-3.7.jar) = 4ed9d21bb6f95e03efd8f4fafc710d43
--------------------------------------------------------------------------------
/src/main/java/chdfsHadoopPluginNetworkVersionInfo.properties:
--------------------------------------------------------------------------------
1 | network_version=${project.version}
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | chdfs_hadoop_plugin_network.iml
2 | .idea/
3 | target/
4 | .DS_Store
5 | dependency-reduced-pom.xml
--------------------------------------------------------------------------------
/jar/chdfs_hadoop_plugin_network-3.7.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tencentyun/chdfs-hadoop-plugin/HEAD/jar/chdfs_hadoop_plugin_network-3.7.jar
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ### 背景
2 | 本插件用于hadoop环境下访问chdfs
3 |
4 | ### 使用背景
5 | 1. 将jar目录下的jar包放于hadoop的lib目录下,对于emr构建的hadoop集群, 路径通常是/usr/local/service/hadoop/share/hadoop/common/lib/,放置jar包后建议重启node_manager
6 | 2. 配置请参考官网文档
7 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/FileSystemWithCleanerAndSSE.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | public abstract class FileSystemWithCleanerAndSSE extends FileSystemWithLockCleaner implements ServerSideEncryption {
4 | }
5 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/FileSystemWithLockCleaner.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | import org.apache.hadoop.fs.FileSystem;
4 |
5 | public abstract class FileSystemWithLockCleaner extends FileSystem implements FileLockCleaner {
6 | }
7 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/permission/RangerAccessType.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.permission;
2 |
3 | public enum RangerAccessType {
4 | LIST,
5 | WRITE,
6 | READ,
7 | DELETE;
8 |
9 | RangerAccessType() {
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/FileLockCleaner.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | import org.apache.hadoop.fs.Path;
4 |
5 | import java.io.IOException;
6 |
7 | public interface FileLockCleaner {
8 | void releaseFileLock(Path p) throws IOException;
9 | }
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/ServerSideEncryption.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | import java.io.IOException;
4 |
5 | public interface ServerSideEncryption {
6 | void enableSSECos() throws IOException;
7 | void disableSSE() throws IOException;
8 | }
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/RangerPermissionChecker.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | import com.qcloud.chdfs.permission.RangerAccessType;
4 | import org.apache.hadoop.fs.Path;
5 |
6 | import java.io.IOException;
7 |
8 | public interface RangerPermissionChecker {
9 | void checkPermission(Path f, RangerAccessType rangerAccessType) throws IOException;
10 | }
11 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/AlreadyLoadedFileSystemInfo.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | public class AlreadyLoadedFileSystemInfo {
4 | String versionId;
5 | String jarPath;
6 | String jarMd5;
7 | Class chdfsFSClass;
8 |
9 | public AlreadyLoadedFileSystemInfo(String versionId, String jarPath, String jarMd5,
10 | Class chdfsFSClass) {
11 | this.versionId = versionId;
12 | this.jarPath = jarPath;
13 | this.jarMd5 = jarMd5;
14 | this.chdfsFSClass = chdfsFSClass;
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/utils.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | import java.io.Closeable;
4 | import java.io.IOException;
5 | import java.io.InputStream;
6 |
7 | public class utils {
8 | public static void closeQuietly(InputStream input) {
9 | closeQuietly((Closeable)input);
10 | }
11 |
12 | public static void closeQuietly(Closeable closeable) {
13 | try {
14 | if (closeable != null) {
15 | closeable.close();
16 | }
17 | } catch (IOException ignore) {
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/CHDFSDelegateFSAdapter.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | import org.apache.hadoop.conf.Configuration;
4 | import org.apache.hadoop.fs.DelegateToFileSystem;
5 |
6 | import java.io.IOException;
7 | import java.net.URI;
8 | import java.net.URISyntaxException;
9 |
10 | public class CHDFSDelegateFSAdapter extends DelegateToFileSystem {
11 |
12 | public CHDFSDelegateFSAdapter(URI theUri, Configuration conf) throws IOException, URISyntaxException {
13 | super(theUri, new CHDFSHadoopFileSystemAdapter(), conf, CHDFSHadoopFileSystemAdapter.SCHEME, false);
14 | }
15 |
16 | @Override
17 | public int getUriDefaultPort() {
18 | return -1;
19 | }
20 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 腾讯云
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
4 | 4.0.0
5 |
6 | com.qcloud
7 | chdfs_hadoop_plugin_network
8 | 3.7
9 | jar
10 |
11 | chdfs_hadoop_plugin_network
12 | Tencent Qcloud chdfs hadoop plugins.
13 | https://cloud.tencent.com/document/product/1105/36368
14 |
15 |
16 |
17 | chdfs-hadoop-plugin
18 | https://github.com/tencentyun/chdfs-hadoop-plugin
19 |
20 |
21 |
22 |
23 |
24 | chengwu
25 | chengwu@tencent.com
26 |
27 |
28 |
29 |
30 |
31 | UTF-8
32 | 1.7
33 | 1.7
34 | 2.8.5
35 | 4.5.13
36 | 4.4.13
37 |
38 |
39 |
40 | scm:git:https://github.com/tencentyun/chdfs-hadoop-plugin.git
41 |
42 |
43 | scm:git:https://github.com/tencentyun/chdfs-hadoop-plugin.git
44 |
45 | https://github.com/tencentyun/chdfs-hadoop-plugin
46 |
47 |
48 |
49 |
50 |
51 | org.apache.hadoop
52 | hadoop-common
53 | ${hadoop.version}
54 | provided
55 |
56 |
57 |
58 | org.apache.httpcomponents
59 | httpclient
60 | ${httpclient.version}
61 |
62 |
63 |
64 | org.apache.httpcomponents
65 | httpcore
66 | ${httpcore.version}
67 |
68 |
69 |
70 | junit
71 | junit
72 | 4.13.1
73 | test
74 |
75 |
76 |
77 |
78 |
79 | oss
80 | chdfs_hadoop_plugin_network
81 |
82 | https://oss.sonatype.org/service/local/staging/deploy/maven2
83 |
84 |
85 |
86 |
87 | oss
88 | chdfs_hadoop_plugin_network
89 |
90 | https://oss.sonatype.org/content/repositories/snapshots
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 | src/main/java
100 | true
101 |
102 | chdfsHadoopPluginNetworkVersionInfo.properties
103 |
104 |
105 |
106 |
107 |
108 |
109 | org.apache.maven.plugins
110 | maven-source-plugin
111 | 3.2.0
112 |
113 | true
114 |
115 |
116 |
117 | compile
118 |
119 | jar
120 |
121 |
122 |
123 |
124 |
125 |
126 | org.apache.maven.plugins
127 | maven-javadoc-plugin
128 | 2.9.1
129 |
130 | -Xdoclint:none
131 |
132 |
133 |
134 | javadoc-jar
135 | package
136 |
137 | jar
138 |
139 |
140 |
141 |
142 |
143 |
144 | org.apache.maven.plugins
145 | maven-deploy-plugin
146 | 2.8.2
147 |
148 |
149 | deploy
150 | deploy
151 |
152 | deploy
153 |
154 |
155 |
156 |
157 |
158 |
159 | org.apache.maven.plugins
160 | maven-gpg-plugin
161 | 1.5
162 |
163 |
164 | sign-artifacts
165 | verify
166 |
167 | sign
168 |
169 |
170 |
171 |
172 |
173 |
174 | org.sonatype.central
175 | central-publishing-maven-plugin
176 | 0.8.0
177 | true
178 |
179 | central
180 | required
181 |
182 |
183 |
184 |
185 | org.apache.maven.plugins
186 | maven-shade-plugin
187 | 3.4.1
188 |
189 |
190 | package
191 |
192 | shade
193 |
194 |
195 | false
196 |
197 |
198 | org.apache.http
199 | com.qcloud.shaded.org.apache.http
200 |
201 |
202 | org.apache.commons.logging
203 | com.qcloud.shaded.org.apache.commons.logging
204 |
205 |
206 | org.apache.httpcomponents
207 | com.qcloud.shaded.org.apache.httpcomponents
208 |
209 |
210 | org.apache.http.impl
211 | com.qcloud.shaded.org.apache.http.impl
212 |
213 |
214 | org.apache.http.client
215 | com.qcloud.shaded.org.apache.http.client
216 |
217 |
218 | org.apache.commons.codec
219 | com.qcloud.shaded.org.apache.commons.codec
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/CHDFSHadoopFileSystemJarLoader.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | import com.google.gson.JsonObject;
4 | import com.google.gson.JsonParser;
5 | import org.apache.commons.codec.binary.Hex;
6 | import org.apache.hadoop.fs.FileSystem;
7 | import org.apache.hadoop.io.IOUtils;
8 | import org.apache.hadoop.io.MD5Hash;
9 | import org.apache.hadoop.util.VersionInfo;
10 | import org.apache.http.client.config.RequestConfig;
11 | import org.apache.http.client.methods.CloseableHttpResponse;
12 | import org.apache.http.client.methods.HttpGet;
13 | import org.apache.http.impl.client.CloseableHttpClient;
14 | import org.apache.http.impl.client.HttpClients;
15 | import org.slf4j.Logger;
16 | import org.slf4j.LoggerFactory;
17 |
18 | import java.io.BufferedInputStream;
19 | import java.io.BufferedOutputStream;
20 | import java.io.ByteArrayOutputStream;
21 | import java.io.File;
22 | import java.io.FileInputStream;
23 | import java.io.FileOutputStream;
24 | import java.io.IOException;
25 | import java.io.UnsupportedEncodingException;
26 | import java.net.HttpURLConnection;
27 | import java.net.MalformedURLException;
28 | import java.net.URL;
29 | import java.net.URLClassLoader;
30 | import java.net.URLConnection;
31 | import java.net.URLEncoder;
32 | import java.nio.channels.FileLock;
33 | import java.nio.channels.OverlappingFileLockException;
34 | import java.util.concurrent.ThreadLocalRandom;
35 |
36 | class CHDFSHadoopFileSystemJarLoader {
37 |
38 | private static final Logger log = LoggerFactory.getLogger(CHDFSHadoopFileSystemJarLoader.class);
39 | private static AlreadyLoadedFileSystemInfo alreadyLoadedFileSystemInfo;
40 | private String versionId;
41 | private String jarPath;
42 |
43 | private String jarHost;
44 | private String jarMd5;
45 | private FileSystem actualFileSystem;
46 |
47 | CHDFSHadoopFileSystemJarLoader() {
48 | }
49 |
50 | synchronized void init(String mountPointAddr, long appid, int jarPluginServerPort, String tmpDirPath,
51 | boolean jarPluginServerHttps, String cosEndPointSuffix, boolean distinguishHost, String networkVersionId)
52 | throws IOException {
53 | if (this.actualFileSystem == null) {
54 | long queryStartMs = System.currentTimeMillis();
55 | queryJarPluginInfo(mountPointAddr, appid, jarPluginServerPort, jarPluginServerHttps, cosEndPointSuffix);
56 | log.debug("query jar plugin info usedMs: {}", System.currentTimeMillis() - queryStartMs);
57 | this.actualFileSystem = getAlreadyLoadedClassInfo(this.getClass().getClassLoader(), this.jarPath,
58 | this.versionId, this.jarMd5, tmpDirPath, this.jarHost, distinguishHost, networkVersionId);
59 | }
60 | }
61 |
62 | private void parseJarPluginInfoResp(String respStr, String cosEndPointSuffix) throws IOException {
63 | JsonObject respJson = new JsonParser().parse(respStr).getAsJsonObject();
64 | if (!respJson.has("Response")) {
65 | String errMsg = String.format("resp json miss element Response, resp: %s", respStr);
66 | throw new IOException(errMsg);
67 | }
68 |
69 | if (!respJson.get("Response").getAsJsonObject().has("HadoopPluginJar")) {
70 | String errMsg = String.format("resp json miss element Response.HadoopPluginJar, resp: %s", respStr);
71 | throw new IOException(errMsg);
72 | }
73 | JsonObject jarInfoJson = respJson.get("Response").getAsJsonObject().get("HadoopPluginJar").getAsJsonObject();
74 | if (!jarInfoJson.has("VersionId")) {
75 | String errMsg = String.format("resp miss config Response.HadoopPluginJar.VersionId, resp: %s", respStr);
76 | throw new IOException(errMsg);
77 | } else {
78 | this.versionId = jarInfoJson.get("VersionId").getAsString();
79 | }
80 |
81 | if (!jarInfoJson.has("JarPath")) {
82 | String errMsg = String.format("resp miss config Response.HadoopPluginJar.JarPath, resp: %s", respStr);
83 | throw new IOException(errMsg);
84 | } else {
85 | this.jarHost = new URL(jarInfoJson.get("JarPath").getAsString()).getAuthority();
86 | if (cosEndPointSuffix != null) {
87 | String jarPath = jarInfoJson.get("JarPath").getAsString();
88 | int dotIndex = jarPath.indexOf('.');
89 | if (dotIndex == -1) {
90 | String errMsg = String.format("invalid jar path : %s", jarPath);
91 | throw new IOException(errMsg);
92 | }
93 |
94 | int slashIndex = jarPath.indexOf('/', dotIndex);
95 | if (slashIndex == -1) {
96 | String errMsg = String.format("invalid jar path : %s", jarPath);
97 | throw new IOException(errMsg);
98 | }
99 | this.jarPath = jarPath.substring(0, dotIndex + 1) + cosEndPointSuffix + jarPath.substring(slashIndex);
100 | } else {
101 | this.jarPath = jarInfoJson.get("JarPath").getAsString();
102 | }
103 | }
104 |
105 | if (!jarInfoJson.has("JarMd5")) {
106 | String errMsg = String.format("resp miss config Response.HadoopPluginJar.JarMd5, resp: %s", respStr);
107 | throw new IOException(errMsg);
108 | } else {
109 | this.jarMd5 = jarInfoJson.get("JarMd5").getAsString();
110 | }
111 | }
112 |
113 | private void doQueryJarPluginInfo(String mountPointAddr, long appid, int jarPluginServerPort,
114 | boolean jarPluginServerHttpsFlag, String cosEndPointSuffix) throws IOException {
115 | String hadoopVersion = VersionInfo.getVersion();
116 | if (hadoopVersion == null) {
117 | hadoopVersion = "unknown";
118 | }
119 |
120 | URL queryJarUrl;
121 | String queryJarUrlStr = "";
122 | try {
123 | queryJarUrlStr = String.format("%s://%s:%d/chdfs-hadoop-plugin?appid=%d&hadoop_version=%s",
124 | jarPluginServerHttpsFlag ? "https" : "http", mountPointAddr, jarPluginServerPort, appid,
125 | URLEncoder.encode(hadoopVersion.trim(), "UTF-8"));
126 | queryJarUrl = new URL(queryJarUrlStr);
127 | } catch (MalformedURLException | UnsupportedEncodingException e) {
128 | String errMsg = String.format("invalid url %s", queryJarUrlStr);
129 | throw new IOException(errMsg, e);
130 | }
131 |
132 | long startTimeNs = System.nanoTime();
133 | URLConnection conn = null;
134 | BufferedInputStream bis = null;
135 | ByteArrayOutputStream bos = null;
136 | try {
137 | conn = queryJarUrl.openConnection();
138 | conn.setRequestProperty("Connection", "Keep-Alive");
139 | conn.setReadTimeout(60000);
140 | conn.setConnectTimeout(10000);
141 | conn.connect();
142 |
143 | bis = new BufferedInputStream(conn.getInputStream());
144 | bos = new ByteArrayOutputStream();
145 | byte[] buf = new byte[4096];
146 | int readLen = 0;
147 | while ((readLen = bis.read(buf)) != -1) {
148 | bos.write(buf, 0, readLen);
149 | }
150 | String respStr = bos.toString();
151 | parseJarPluginInfoResp(respStr, cosEndPointSuffix);
152 | } catch (IOException e) {
153 | String errMsg = "queryJarPluginInfo occur an io exception";
154 | log.warn(errMsg, e);
155 | throw new IOException(errMsg, e);
156 | } finally {
157 | if (bis != null) {
158 | IOUtils.closeStream(bis);
159 | }
160 | if (bos != null) {
161 | IOUtils.closeStream(bos);
162 | }
163 | if (conn instanceof HttpURLConnection) {
164 | ((HttpURLConnection) conn).disconnect();
165 | }
166 | }
167 | log.debug("query jarPluginInfo, usedTimeMs: {}", (System.nanoTime() - startTimeNs) * 1.0 / 1000000);
168 | }
169 |
170 | private void queryJarPluginInfo(String mountPointAddr, long appid, int jarPluginServerPort,
171 | boolean jarPluginServerHttpsFlag, String cosEndPointSuffix) throws IOException {
172 |
173 | final int maxRetry = 3;
174 | IOException finalException = null;
175 | for (int retryIndex = 0; retryIndex <= maxRetry; retryIndex++) {
176 | try {
177 | doQueryJarPluginInfo(mountPointAddr, appid, jarPluginServerPort, jarPluginServerHttpsFlag,
178 | cosEndPointSuffix);
179 | return;
180 | } catch (IOException e) {
181 | log.warn(String.format("query jar plugin info failed, retryIndex: [%d/%d]", retryIndex, maxRetry), e);
182 | finalException = e;
183 | }
184 |
185 | int sleepInterval = ThreadLocalRandom.current().nextInt(600, 2000);
186 | try {
187 | Thread.sleep(sleepInterval);
188 | } catch (InterruptedException ignored) {
189 | }
190 | }
191 |
192 | log.error("query jar plugin info failed after retry", finalException);
193 | throw finalException;
194 | }
195 |
196 | private static synchronized FileSystem getAlreadyLoadedClassInfo(ClassLoader currentClassLoader, String jarPath,
197 | String versionId, String jarMd5, String tmpDirPath, String jarHost, boolean distinguishHost,
198 | String networkVersionId) throws IOException {
199 | if (alreadyLoadedFileSystemInfo != null && alreadyLoadedFileSystemInfo.jarPath.equals(jarPath)
200 | && alreadyLoadedFileSystemInfo.versionId.equals(versionId) && alreadyLoadedFileSystemInfo.jarMd5.equals(
201 | jarMd5)) {
202 | try {
203 | return (FileSystem) alreadyLoadedFileSystemInfo.chdfsFSClass.newInstance();
204 | } catch (InstantiationException | IllegalAccessException e) {
205 | String errMsg = String.format("load chdfs class failed, className: %s",
206 | alreadyLoadedFileSystemInfo.chdfsFSClass.getName());
207 | log.error(errMsg, e);
208 | throw new IOException(errMsg, e);
209 | }
210 | }
211 |
212 | File jarFile = downloadJarPath(jarPath, versionId, jarMd5, tmpDirPath, jarHost, distinguishHost,
213 | networkVersionId);
214 | URL jarUrl;
215 | try {
216 | jarUrl = jarFile.toURI().toURL();
217 | } catch (MalformedURLException e) {
218 | String errMsg = String.format("get jar file url failed, jarPath: %s", jarFile.getAbsolutePath());
219 | log.error(errMsg, e);
220 | throw new IOException(errMsg, e);
221 | }
222 | URLClassLoader chdfsJarClassLoader = new URLClassLoader(new URL[]{jarUrl}, currentClassLoader);
223 | final String className = String.format("chdfs.%s.com.qcloud.chdfs.fs.CHDFSHadoopFileSystem", versionId);
224 | try {
225 | Class> chdfsFSClass = chdfsJarClassLoader.loadClass(className);
226 | FileSystem actualFileSystem = (FileSystem) chdfsFSClass.newInstance();
227 | alreadyLoadedFileSystemInfo = new AlreadyLoadedFileSystemInfo(versionId, jarPath, jarMd5, chdfsFSClass);
228 | return actualFileSystem;
229 | } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
230 | String errMsg = String.format("load class failed, className: %s", className);
231 | log.error(errMsg, e);
232 | throw new IOException(errMsg, e);
233 | }
234 | }
235 |
236 | private static File downloadJarPath(String jarPath, String versionId, String jarMd5, String tmpDirPath,
237 | String jarHost, boolean distinguishHost, String networkVersionId) throws IOException {
238 | File localCacheJarFile = new File(String.format("%s/chdfs_hadoop_plugin-%s-shaded.jar", tmpDirPath, versionId));
239 | File localCacheJarLockFile = new File(
240 | String.format("%s/chdfs_hadoop_plugin-%s-shaded.jar.LOCK", tmpDirPath, versionId));
241 | if (localCacheJarFile.exists()) {
242 | String md5Hex = getFileHexMd5(localCacheJarFile);
243 | if (md5Hex.equalsIgnoreCase(jarMd5)) {
244 | return localCacheJarFile;
245 | }
246 | }
247 |
248 | FileOutputStream fileLockOutPut;
249 | try {
250 | fileLockOutPut = new FileOutputStream(localCacheJarLockFile);
251 | } catch (IOException e) {
252 | String errMsg = String.format("download jar failed, open lock file failed, lockPath: %s",
253 | localCacheJarLockFile.getAbsolutePath());
254 | log.error(errMsg, e);
255 | throw new IOException(errMsg, e);
256 | }
257 |
258 | FileLock fileLock;
259 | while (true) {
260 | try {
261 | fileLock = fileLockOutPut.getChannel().lock();
262 | break;
263 | } catch (OverlappingFileLockException ofle) {
264 | try {
265 | Thread.sleep(10L);
266 | } catch (InterruptedException e) {
267 | try {
268 | fileLockOutPut.close();
269 | } catch (IOException ignore) {
270 | }
271 | String errMsg = String.format("download jar failed, lock file failed, lockPath: %s",
272 | localCacheJarLockFile.getAbsolutePath());
273 | log.error(errMsg, e);
274 | throw new IOException(errMsg, e);
275 | }
276 | } catch (IOException e) {
277 | try {
278 | fileLockOutPut.close();
279 | } catch (IOException ignore) {
280 | }
281 | String errMsg = String.format("download jar failed, lock file failed, lockPath: %s",
282 | localCacheJarLockFile.getAbsolutePath());
283 | log.error(errMsg, e);
284 | throw new IOException(errMsg, e);
285 | }
286 | }
287 |
288 | BufferedInputStream bis = null;
289 | BufferedOutputStream fos = null;
290 | try {
291 |
292 | // judge again may be other process has download the jar
293 | if (localCacheJarFile.exists()) {
294 | String md5Hex = getFileHexMd5(localCacheJarFile);
295 | if (md5Hex.equalsIgnoreCase(jarMd5)) {
296 | return localCacheJarFile;
297 | }
298 | }
299 | CloseableHttpClient httpclient = null;
300 | CloseableHttpResponse response = null;
301 | HttpGet httpGet = null;
302 | try {
303 | httpclient = HttpClients.createDefault();
304 | httpGet = new HttpGet(jarPath);
305 | httpGet.setHeader("User-Agent", String.format("chdfs_hadoop-plugin_network-%s", networkVersionId));
306 | if (distinguishHost) {
307 | httpGet.addHeader("Host", jarHost);
308 | log.debug("host: {} already set", jarHost);
309 | }
310 |
311 | RequestConfig requestConfig = RequestConfig.custom().setConnectionRequestTimeout(
312 | 10000).setConnectTimeout(10000).setSocketTimeout(30000).build();
313 | httpGet.setConfig(requestConfig);
314 |
315 | // execute request
316 | response = httpclient.execute(httpGet);
317 | // judge status code == 200
318 | if (response.getStatusLine().getStatusCode() == 200) {
319 | // get content
320 | bis = new BufferedInputStream(response.getEntity().getContent());
321 | fos = new BufferedOutputStream(new FileOutputStream(localCacheJarFile));
322 | IOUtils.copyBytes(bis, fos, 4096, true);
323 |
324 | // set jar and lock file permission 777
325 | localCacheJarFile.setReadable(true, false);
326 | localCacheJarFile.setWritable(true, false);
327 | localCacheJarFile.setExecutable(true, false);
328 |
329 | localCacheJarLockFile.setReadable(true, false);
330 | localCacheJarLockFile.setWritable(true, false);
331 | localCacheJarLockFile.setExecutable(true, false);
332 | }
333 |
334 | httpGet.releaseConnection();
335 | } catch (IOException e) {
336 | httpGet.abort();
337 | String errMsg = String.format("download jar failed, localJarPath: %s",
338 | localCacheJarFile.getAbsolutePath());
339 | log.error(errMsg, e);
340 | throw new IOException(errMsg);
341 | } finally {
342 | if (response != null) {
343 | try {
344 | response.close();
345 | } catch (IOException ignored) {
346 | }
347 | }
348 | if (httpclient != null) {
349 | try {
350 | httpclient.close();
351 | } catch (IOException ignored) {
352 | }
353 | }
354 | }
355 |
356 | String md5Hex = getFileHexMd5(localCacheJarFile);
357 | if (!md5Hex.equalsIgnoreCase(jarMd5)) {
358 | String errMsg = String.format("download jar md5 check failed, local jar md5: %s, query jar md5: %s",
359 | md5Hex, jarMd5);
360 | log.error(errMsg);
361 | throw new IOException(errMsg);
362 | }
363 | return localCacheJarFile;
364 | } finally {
365 | if (fileLock != null) {
366 | try {
367 | fileLock.release();
368 | } catch (IOException ignored) {
369 | }
370 | }
371 |
372 | if (fileLockOutPut != null) {
373 | try {
374 | fileLockOutPut.close();
375 | } catch (IOException ignored) {
376 | }
377 | }
378 |
379 | if (bis != null) {
380 | try {
381 | bis.close();
382 | } catch (IOException ignored) {
383 | }
384 | }
385 |
386 | if (fos != null) {
387 | try {
388 | fos.close();
389 | } catch (IOException ignored) {
390 | }
391 | }
392 | }
393 | }
394 |
395 | private static String getFileHexMd5(File inFile) throws IOException {
396 | FileInputStream in = null;
397 | try {
398 | in = new FileInputStream(inFile);
399 | byte[] md5Byte = MD5Hash.digest(in).getDigest();
400 | return Hex.encodeHexString(md5Byte);
401 | } catch (IOException e) {
402 | String errMsg = String.format("getFileHexMd5 failed, inFile path: %s", inFile.getAbsolutePath());
403 | log.error(errMsg, e);
404 | throw new IOException(errMsg, e);
405 | } finally {
406 | IOUtils.closeStream(in);
407 | }
408 | }
409 |
410 | FileSystem getActualFileSystem() {
411 | return actualFileSystem;
412 | }
413 | }
414 |
--------------------------------------------------------------------------------
/src/main/java/com/qcloud/chdfs/fs/CHDFSHadoopFileSystemAdapter.java:
--------------------------------------------------------------------------------
1 | package com.qcloud.chdfs.fs;
2 |
3 | import com.qcloud.chdfs.permission.RangerAccessType;
4 | import org.apache.hadoop.conf.Configuration;
5 | import org.apache.hadoop.fs.ContentSummary;
6 | import org.apache.hadoop.fs.CreateFlag;
7 | import org.apache.hadoop.fs.FSDataInputStream;
8 | import org.apache.hadoop.fs.FSDataOutputStream;
9 | import org.apache.hadoop.fs.FileAlreadyExistsException;
10 | import org.apache.hadoop.fs.FileChecksum;
11 | import org.apache.hadoop.fs.FileStatus;
12 | import org.apache.hadoop.fs.FileSystem;
13 | import org.apache.hadoop.fs.FsStatus;
14 | import org.apache.hadoop.fs.ParentNotDirectoryException;
15 | import org.apache.hadoop.fs.Path;
16 | import org.apache.hadoop.fs.UnsupportedFileSystemException;
17 | import org.apache.hadoop.fs.XAttrSetFlag;
18 | import org.apache.hadoop.fs.permission.AclEntry;
19 | import org.apache.hadoop.fs.permission.AclStatus;
20 | import org.apache.hadoop.fs.permission.FsPermission;
21 | import org.apache.hadoop.security.AccessControlException;
22 | import org.apache.hadoop.security.token.Token;
23 | import org.apache.hadoop.util.Progressable;
24 | import org.slf4j.Logger;
25 | import org.slf4j.LoggerFactory;
26 |
27 | import java.io.File;
28 | import java.io.FileNotFoundException;
29 | import java.io.IOException;
30 | import java.io.InputStream;
31 | import java.net.URI;
32 | import java.util.EnumSet;
33 | import java.util.List;
34 | import java.util.Map;
35 | import java.util.Properties;
36 | import java.util.concurrent.ThreadLocalRandom;
37 | import java.util.regex.Pattern;
38 |
39 | public class CHDFSHadoopFileSystemAdapter extends FileSystemWithCleanerAndSSE implements RangerPermissionChecker {
40 | static final String SCHEME = "ofs";
41 | private static final Logger log = LoggerFactory.getLogger(CHDFSHadoopFileSystemAdapter.class);
42 | private static final String MOUNT_POINT_ADDR_PATTERN_CHDFS_TYPE =
43 | "^([a-zA-Z0-9-]+)\\.chdfs(-dualstack)?(\\.inner)?\\.([a-z0-9-]+)\\.([a-z0-9-.]+)";
44 | private static final String MOUNT_POINT_ADDR_PATTERN_COS_TYPE =
45 | "^([a-z0-9-]+)-([a-zA-Z0-9]+)$";
46 | private static final String CHDFS_USER_APPID_KEY = "fs.ofs.user.appid";
47 | private static final String CHDFS_DOWNLOAD_JAR_APPID_KEY = "fs.ofs.downloadjar.appid";
48 | /**
49 | * This configuration item has been deprecated, please use fs.ofs.jar.cache.dir instead.
50 | */
51 | @Deprecated
52 | private static final String CHDFS_TMP_CACHE_DIR_KEY = "fs.ofs.tmp.cache.dir";
53 |
54 | private static final String CHDFS_JAR_CACHE_DIR_KEY = "fs.ofs.jar.cache.dir";
55 | private static final String CHDFS_META_SERVER_PORT_KEY = "fs.ofs.meta.server.port";
56 | private static final String CHDFS_META_TRANSFER_USE_TLS_KEY = "fs.ofs.meta.transfer.tls";
57 | private static final String CHDFS_BUCKET_REGION = "fs.ofs.bucket.region";
58 | private static final String COS_ENDPOINT_SUFFIX = "fs.ofs.data.transfer.endpoint.suffix";
59 |
60 | private static final String CHDFS_META_ENDPOINT_SUFFIX_KEY = "fs.ofs.meta.endpoint.suffix";
61 | private static final boolean DEFAULT_CHDFS_META_TRANSFER_USE_TLS = true;
62 | private static final int DEFAULT_CHDFS_META_SERVER_PORT = 443;
63 |
64 | public static final String CHDFS_DATA_TRANSFER_DISTINGUISH_HOST = "fs.ofs.data.transfer.distinguish.host";
65 |
66 | public static final boolean DEFAULT_CHDFS_DATA_TRANSFER_DISTINGUISH_FLAG = false;
67 |
68 | public static final String CHDFS_USE_SHORT_BUCKETNAME_KEY = "fs.ofs.use.short.bucketname";
69 |
70 | public static final boolean DEFAULT_CHDFS_USE_SHORT_BUCKETNAME = false;
71 |
72 | private final CHDFSHadoopFileSystemJarLoader jarLoader = new CHDFSHadoopFileSystemJarLoader();
73 | private FileSystem actualImplFS = null;
74 | private URI uri = null;
75 | private Path workingDir = null;
76 |
77 | @Override
78 | public String getScheme() {
79 | return CHDFSHadoopFileSystemAdapter.SCHEME;
80 | }
81 |
82 | @Override
83 | public void initialize(URI name, Configuration conf) throws IOException {
84 | log.debug("CHDFSHadoopFileSystemAdapter adapter initialize");
85 | long initStartMs = System.currentTimeMillis();
86 | log.debug("CHDFSHadoopFileSystemAdapter start-init-start time: {}", initStartMs);
87 | try {
88 | super.initialize(name, conf);
89 | this.setConf(conf);
90 | String mountPointAddr = name.getHost();
91 | if (mountPointAddr == null) {
92 | String errMsg = String.format("mountPointAddr is null, fullUri: %s, exp. f4mabcdefgh-xyzw.chdfs"
93 | + ".ap-guangzhou.myqcloud.com or examplebucket-1250000000 or f4mabcdefgh-xyzw", name);
94 | log.error(errMsg);
95 | throw new IOException(errMsg);
96 | }
97 |
98 | if (isUseShortBucketName(conf) && getAppid(conf) != 0) {
99 | // 如果使用 short bucket name,需要追加 appid
100 | mountPointAddr = mountPointAddr + "-" + getAppid(conf);
101 | }
102 | String ofsHost;
103 | if (isValidMountPointAddrChdfsType(mountPointAddr)) {
104 | ofsHost = mountPointAddr;
105 | } else if (isValidMountPointAddrCosType(mountPointAddr)) {
106 | String metaEndpointSuffix = getMetaEndpointSuffix(conf);
107 | if (!metaEndpointSuffix.isEmpty()) {
108 | ofsHost = mountPointAddr + "." + metaEndpointSuffix;
109 | // force close tls
110 | conf.setBoolean(CHDFS_META_TRANSFER_USE_TLS_KEY, false);
111 | } else {
112 | String bucketRegion = getChdfsBucketRegion(conf);
113 | ofsHost = String.format("%s.chdfs.%s.myqcloud.com", mountPointAddr, bucketRegion);
114 | }
115 | } else {
116 | String errMsg = String.format("mountPointAddr %s is invalid, fullUri: %s, exp. f4mabcdefgh-xyzw.chdfs"
117 | + ".ap-guangzhou.myqcloud.com or examplebucket-1250000000 or f4mabcdefgh-xyzw",
118 | mountPointAddr, name);
119 | log.error(errMsg);
120 | throw new IOException(errMsg);
121 | }
122 |
123 | String networkVersionId = initPluginNetworkVersion();
124 | conf.set("chdfs.hadoop.plugin.network.version", String.format("network:%s", networkVersionId));
125 |
126 | long appid = getDownLoadJarAppid(conf);
127 | int jarPluginServerPort = getJarPluginServerPort(conf);
128 | String tmpDirPath = initCacheTmpDir(conf);
129 | boolean jarPluginServerHttpsFlag = isJarPluginServerHttps(conf);
130 | String cosEndPointSuffix = getCosEndPointSuffix(conf);
131 | boolean distinguishHost = isDistinguishHost(conf);
132 | log.debug("fs.ofs.data.transfer.distinguish.host: {}", distinguishHost);
133 | initJarLoadWithRetry(ofsHost, appid, jarPluginServerPort, tmpDirPath, jarPluginServerHttpsFlag,
134 | cosEndPointSuffix, distinguishHost, networkVersionId);
135 |
136 | this.actualImplFS = jarLoader.getActualFileSystem();
137 | if (this.actualImplFS == null) {
138 | // should never reach here
139 | throw new IOException("impl filesystem is null");
140 | }
141 |
142 | long actualInitStartMs = System.currentTimeMillis();
143 | this.actualImplFS.initialize(name, conf);
144 | log.debug("init actual file system, [elapse-ms: {}]", System.currentTimeMillis() - actualInitStartMs);
145 | this.uri = this.actualImplFS.getUri();
146 | this.workingDir = this.actualImplFS.getWorkingDirectory();
147 | } catch (IOException ioe) {
148 | log.error("initialize failed! a ioException occur!", ioe);
149 | throw ioe;
150 | } catch (Exception e) {
151 | log.error("initialize failed! a unexpected exception occur!", e);
152 | throw new IOException("initialize failed! oops! a unexpected exception occur! " + e, e);
153 | }
154 | log.debug("total init file system, [elapse-ms: {}]", System.currentTimeMillis() - initStartMs);
155 | }
156 |
157 | boolean isValidMountPointAddrChdfsType(String mountPointAddr) {
158 | return Pattern.matches(MOUNT_POINT_ADDR_PATTERN_CHDFS_TYPE, mountPointAddr);
159 | }
160 |
161 | boolean isValidMountPointAddrCosType(String mountPointAddr) {
162 | return Pattern.matches(MOUNT_POINT_ADDR_PATTERN_COS_TYPE, mountPointAddr);
163 | }
164 |
165 | private String getCosEndPointSuffix(Configuration conf) throws IOException {
166 | return conf.get(COS_ENDPOINT_SUFFIX);
167 | }
168 |
169 | private String getChdfsBucketRegion(Configuration conf) throws IOException {
170 | String bucketRegion = conf.get(CHDFS_BUCKET_REGION);
171 | if (bucketRegion == null) {
172 | String errMsg = String.format("ofs config %s is missing", CHDFS_BUCKET_REGION);
173 | log.error(errMsg);
174 | throw new IOException(errMsg);
175 | }
176 | return bucketRegion;
177 | }
178 |
179 | private boolean isUseShortBucketName(Configuration conf) throws IOException {
180 | return conf.getBoolean(CHDFS_USE_SHORT_BUCKETNAME_KEY, DEFAULT_CHDFS_USE_SHORT_BUCKETNAME);
181 | }
182 |
183 | private String getMetaEndpointSuffix(Configuration conf) throws IOException {
184 | return initStringValue(conf, CHDFS_META_ENDPOINT_SUFFIX_KEY, "").toLowerCase();
185 | }
186 |
187 | private String initStringValue(Configuration conf, String configKey, String defaultValue)
188 | throws IOException {
189 | String retValue = conf.get(configKey);
190 | if (retValue == null) {
191 | if (defaultValue == null) {
192 | String errMsg = String.format("chdfs config %s missing", configKey);
193 | log.error(errMsg);
194 | throw new IOException(errMsg);
195 | } else {
196 | retValue = defaultValue;
197 | log.debug("chdfs config {} missing, use default value {}", configKey, defaultValue);
198 | }
199 | } else {
200 | if (retValue.trim().isEmpty()) {
201 | String errMsg = String.format("chdfs config %s value %s is invalid, value should not be empty string",
202 | configKey, retValue);
203 | log.error(errMsg);
204 | throw new IOException(errMsg);
205 | }
206 | }
207 | return retValue.trim();
208 | }
209 |
210 | private long getDownLoadJarAppid(Configuration conf) throws IOException {
211 | long appid = 0;
212 | try {
213 | appid = conf.getLong(CHDFS_DOWNLOAD_JAR_APPID_KEY, 0);
214 | } catch (NumberFormatException e) {
215 | throw new IOException(String.format("config for %s is invalid appid number", CHDFS_DOWNLOAD_JAR_APPID_KEY));
216 | }
217 | if (appid > 0) {
218 | return appid;
219 | }
220 | return getAppid(conf);
221 | }
222 |
223 | private long getAppid(Configuration conf) throws IOException {
224 | long appid = 0;
225 | try {
226 | appid = conf.getLong(CHDFS_USER_APPID_KEY, 0);
227 | } catch (NumberFormatException e) {
228 | throw new IOException(String.format("config for %s is invalid appid number", CHDFS_USER_APPID_KEY));
229 | }
230 | if (appid <= 0) {
231 | throw new IOException(
232 | String.format("config for %s is missing or invalid appid number", CHDFS_USER_APPID_KEY));
233 | }
234 | return appid;
235 | }
236 |
237 | private int getJarPluginServerPort(Configuration conf) {
238 | return conf.getInt(CHDFS_META_SERVER_PORT_KEY, DEFAULT_CHDFS_META_SERVER_PORT);
239 | }
240 |
241 | private String initCacheTmpDir(Configuration conf) throws IOException {
242 | String chdfsTmpCacheDirPath = conf.get(CHDFS_TMP_CACHE_DIR_KEY);
243 | String jarTmpCacheDirPath = conf.get(CHDFS_JAR_CACHE_DIR_KEY);
244 | if (jarTmpCacheDirPath != null && !jarTmpCacheDirPath.isEmpty()) {
245 | chdfsTmpCacheDirPath = jarTmpCacheDirPath; // compatible with old configuration items
246 | }
247 |
248 | if (chdfsTmpCacheDirPath == null) {
249 | String errMsg = String.format("chdfs config %s is missing", CHDFS_TMP_CACHE_DIR_KEY);
250 | log.error(errMsg);
251 | throw new IOException(errMsg);
252 | }
253 | if (!chdfsTmpCacheDirPath.startsWith("/")) {
254 | String errMsg = String.format("chdfs config [%s: %s] must be absolute path", CHDFS_TMP_CACHE_DIR_KEY,
255 | chdfsTmpCacheDirPath);
256 | log.error(errMsg);
257 | throw new IOException(errMsg);
258 | }
259 |
260 | File chdfsTmpCacheDir = new File(chdfsTmpCacheDirPath);
261 | if (!chdfsTmpCacheDir.exists()) {
262 | // judge exists again, may many map-reduce processes create cache dir parallel
263 | if (!chdfsTmpCacheDir.mkdirs() && !chdfsTmpCacheDir.exists()) {
264 | String errMsg = String.format("mkdir for chdfs tmp dir %s failed", chdfsTmpCacheDir.getAbsolutePath());
265 | log.error(errMsg);
266 | throw new IOException(errMsg);
267 | }
268 | chdfsTmpCacheDir.setReadable(true, false);
269 | chdfsTmpCacheDir.setWritable(true, false);
270 | chdfsTmpCacheDir.setExecutable(true, false);
271 | }
272 |
273 | if (!chdfsTmpCacheDir.isDirectory()) {
274 | String errMsg = String.format("chdfs config [%s: %s] is invalid directory", CHDFS_TMP_CACHE_DIR_KEY,
275 | chdfsTmpCacheDir.getAbsolutePath());
276 | log.error(errMsg);
277 | throw new IOException(errMsg);
278 | }
279 |
280 | if (!chdfsTmpCacheDir.canRead()) {
281 | String errMsg = String.format("chdfs config [%s: %s] is not readable", CHDFS_TMP_CACHE_DIR_KEY,
282 | chdfsTmpCacheDirPath);
283 | log.error(errMsg);
284 | throw new IOException(errMsg);
285 | }
286 |
287 | if (!chdfsTmpCacheDir.canWrite()) {
288 | String errMsg = String.format("chdfs config [%s: %s] is not writeable", CHDFS_TMP_CACHE_DIR_KEY,
289 | chdfsTmpCacheDirPath);
290 | log.error(errMsg);
291 | throw new IOException(errMsg);
292 | }
293 | return chdfsTmpCacheDirPath;
294 | }
295 |
296 | private boolean isJarPluginServerHttps(Configuration conf) {
297 | return conf.getBoolean(CHDFS_META_TRANSFER_USE_TLS_KEY, DEFAULT_CHDFS_META_TRANSFER_USE_TLS);
298 | }
299 |
300 | private boolean isDistinguishHost(Configuration conf) {
301 | return conf.getBoolean(CHDFS_DATA_TRANSFER_DISTINGUISH_HOST, DEFAULT_CHDFS_DATA_TRANSFER_DISTINGUISH_FLAG);
302 | }
303 |
304 |
305 | private void initJarLoadWithRetry(String mountPointAddr, long appid, int jarPluginServerPort, String tmpDirPath,
306 | boolean jarPluginServerHttps, String cosEndPointSuffix, boolean distinguishHost
307 | , String networkVersionId) throws IOException {
308 | int maxRetry = 5;
309 | for (int retryIndex = 0; retryIndex <= maxRetry; retryIndex++) {
310 | try {
311 | jarLoader.init(mountPointAddr, appid, jarPluginServerPort, tmpDirPath, jarPluginServerHttps,
312 | cosEndPointSuffix, distinguishHost, networkVersionId);
313 | return;
314 | } catch (Exception e) {
315 | if (retryIndex < maxRetry) {
316 | log.warn(String.format("init chdfs impl failed, we will retry again, retryInfo: %d/%d", retryIndex,
317 | maxRetry), e);
318 | } else {
319 | log.error("init chdfs impl failed", e);
320 | throw new IOException("init chdfs impl failed", e);
321 | }
322 | }
323 | try {
324 | Thread.sleep(ThreadLocalRandom.current().nextLong(500, 2000));
325 | } catch (InterruptedException ignore) {
326 | // ignore
327 | }
328 | }
329 | }
330 |
331 | @java.lang.Override
332 | public java.net.URI getUri() {
333 | return this.uri;
334 | }
335 |
336 | private void judgeActualFSInitialized() {
337 | if (this.actualImplFS == null) {
338 | throw new RuntimeException("please init the fileSystem first!");
339 | }
340 | }
341 |
342 | private String initPluginNetworkVersion() {
343 | String networkVersionId = "unknown";
344 |
345 | Properties versionProp = new Properties();
346 | InputStream in = null;
347 | final String versionPropName = "chdfsHadoopPluginNetworkVersionInfo.properties";
348 | try {
349 | in = this.getClass().getClassLoader().getResourceAsStream(versionPropName);
350 | if (in != null) {
351 | versionProp.load(in);
352 | networkVersionId = versionProp.getProperty("network_version");
353 |
354 | } else {
355 | log.error("load versionInfo properties failed, propName: {} ", versionPropName);
356 | }
357 | } catch (IOException e) {
358 | log.error("load versionInfo properties failed", e);
359 | } finally {
360 | utils.closeQuietly(in);
361 | }
362 | return networkVersionId;
363 | }
364 |
365 | @java.lang.Override
366 | public FSDataInputStream open(Path f, int bufferSize) throws IOException {
367 | judgeActualFSInitialized();
368 | return this.actualImplFS.open(f, bufferSize);
369 | }
370 |
371 | @java.lang.Override
372 | public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet flags,
373 | int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
374 | judgeActualFSInitialized();
375 | return this.actualImplFS.createNonRecursive(f, permission, flags, bufferSize, replication, blockSize, progress);
376 | }
377 |
378 | @java.lang.Override
379 | public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize,
380 | short replication, long blockSize, Progressable progress) throws IOException {
381 | judgeActualFSInitialized();
382 | return this.actualImplFS.create(f, permission, overwrite, bufferSize, replication, blockSize, progress);
383 | }
384 |
385 | @java.lang.Override
386 | public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
387 | judgeActualFSInitialized();
388 | return this.actualImplFS.append(f, bufferSize, progress);
389 | }
390 |
391 | public boolean truncate(Path f, long newLength) throws IOException {
392 | judgeActualFSInitialized();
393 | return this.actualImplFS.truncate(f, newLength);
394 | }
395 |
396 | @Override
397 | public void concat(Path trg, Path[] psrcs) throws IOException {
398 | judgeActualFSInitialized();
399 | this.actualImplFS.concat(trg, psrcs);
400 | }
401 |
402 | @java.lang.Override
403 | public boolean rename(Path src, Path dst) throws IOException {
404 | judgeActualFSInitialized();
405 | return this.actualImplFS.rename(src, dst);
406 | }
407 |
408 | @java.lang.Override
409 | public boolean delete(Path f, boolean recursive) throws IOException {
410 | judgeActualFSInitialized();
411 | return this.actualImplFS.delete(f, recursive);
412 | }
413 |
414 | @java.lang.Override
415 | public boolean deleteOnExit(Path f) throws IOException {
416 | judgeActualFSInitialized();
417 | return this.actualImplFS.deleteOnExit(f);
418 | }
419 |
420 | @java.lang.Override
421 | public boolean cancelDeleteOnExit(Path f) {
422 | judgeActualFSInitialized();
423 | return this.actualImplFS.cancelDeleteOnExit(f);
424 | }
425 |
426 |
427 | @java.lang.Override
428 | public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException {
429 | judgeActualFSInitialized();
430 | return this.actualImplFS.listStatus(f);
431 | }
432 |
433 | @java.lang.Override
434 | public Path getWorkingDirectory() {
435 | return this.workingDir;
436 | }
437 |
438 | @java.lang.Override
439 | public void setWorkingDirectory(Path new_dir) {
440 | this.workingDir = new_dir;
441 | if (this.actualImplFS == null) {
442 | log.warn("fileSystem is not init yet!");
443 | } else {
444 | this.actualImplFS.setWorkingDirectory(new_dir);
445 | }
446 | }
447 |
448 | @Override
449 | public Path getHomeDirectory() {
450 | if (this.actualImplFS == null) {
451 | return super.getHomeDirectory();
452 | }
453 | return this.actualImplFS.getHomeDirectory();
454 | }
455 |
456 | @java.lang.Override
457 | public boolean mkdirs(Path f, FsPermission permission) throws IOException {
458 | judgeActualFSInitialized();
459 | return this.actualImplFS.mkdirs(f, permission);
460 | }
461 |
462 | @java.lang.Override
463 | public FileStatus getFileStatus(Path f) throws IOException {
464 | judgeActualFSInitialized();
465 | return this.actualImplFS.getFileStatus(f);
466 | }
467 |
468 | @Override
469 | public void createSymlink(Path target, Path link, boolean createParent)
470 | throws AccessControlException, FileAlreadyExistsException, FileNotFoundException,
471 | ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
472 | judgeActualFSInitialized();
473 | this.actualImplFS.createSymlink(target, link, createParent);
474 | }
475 |
476 | @Override
477 | public FileStatus getFileLinkStatus(final Path f)
478 | throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException {
479 | judgeActualFSInitialized();
480 | return this.actualImplFS.getFileLinkStatus(f);
481 | }
482 |
483 | @Override
484 | public boolean supportsSymlinks() {
485 | if (this.actualImplFS == null) {
486 | return super.supportsSymlinks();
487 | }
488 | return this.actualImplFS.supportsSymlinks();
489 | }
490 |
491 |
492 | @Override
493 | public Path getLinkTarget(Path f) throws IOException {
494 | judgeActualFSInitialized();
495 | return this.actualImplFS.getLinkTarget(f);
496 | }
497 |
498 | @Override
499 | protected Path resolveLink(Path f) throws IOException {
500 | return getLinkTarget(f);
501 | }
502 |
503 | @Override
504 | public FileChecksum getFileChecksum(Path f, long length) throws IOException {
505 | judgeActualFSInitialized();
506 | return this.actualImplFS.getFileChecksum(f, length);
507 | }
508 |
509 | @Override
510 | public void setVerifyChecksum(boolean verifyChecksum) {
511 | judgeActualFSInitialized();
512 | this.actualImplFS.setVerifyChecksum(verifyChecksum);
513 | }
514 |
515 | @Override
516 | public void setWriteChecksum(boolean writeChecksum) {
517 | judgeActualFSInitialized();
518 | this.actualImplFS.setWriteChecksum(writeChecksum);
519 | }
520 |
521 | @Override
522 | public FsStatus getStatus(Path p) throws IOException {
523 | judgeActualFSInitialized();
524 | return this.actualImplFS.getStatus(p);
525 | }
526 |
527 | @Override
528 | public void setPermission(Path p, FsPermission permission) throws IOException {
529 | judgeActualFSInitialized();
530 | this.actualImplFS.setPermission(p, permission);
531 | }
532 |
533 | @Override
534 | public void setOwner(Path p, String username, String groupname) throws IOException {
535 | judgeActualFSInitialized();
536 | this.actualImplFS.setOwner(p, username, groupname);
537 | }
538 |
539 | @Override
540 | public void setTimes(Path p, long mtime, long atime) throws IOException {
541 | judgeActualFSInitialized();
542 | this.actualImplFS.setTimes(p, mtime, atime);
543 | }
544 |
545 | @Override
546 | public Path createSnapshot(Path path, String snapshotName) throws IOException {
547 | judgeActualFSInitialized();
548 | return this.actualImplFS.createSnapshot(path, snapshotName);
549 | }
550 |
551 | @Override
552 | public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException {
553 | judgeActualFSInitialized();
554 | this.actualImplFS.renameSnapshot(path, snapshotOldName, snapshotNewName);
555 | }
556 |
557 | @Override
558 | public void deleteSnapshot(Path path, String snapshotName) throws IOException {
559 | judgeActualFSInitialized();
560 | this.actualImplFS.deleteSnapshot(path, snapshotName);
561 | }
562 |
563 | @Override
564 | public void modifyAclEntries(Path path, List aclSpec) throws IOException {
565 | judgeActualFSInitialized();
566 | this.actualImplFS.modifyAclEntries(path, aclSpec);
567 | }
568 |
569 | @Override
570 | public void removeAclEntries(Path path, List aclSpec) throws IOException {
571 | judgeActualFSInitialized();
572 | this.actualImplFS.removeAclEntries(path, aclSpec);
573 | }
574 |
575 | @Override
576 | public void removeDefaultAcl(Path path) throws IOException {
577 | judgeActualFSInitialized();
578 | this.actualImplFS.removeDefaultAcl(path);
579 | }
580 |
581 | @Override
582 | public void removeAcl(Path path) throws IOException {
583 | judgeActualFSInitialized();
584 | this.actualImplFS.removeAcl(path);
585 | }
586 |
587 | @Override
588 | public void setAcl(Path path, List aclSpec) throws IOException {
589 | judgeActualFSInitialized();
590 | this.actualImplFS.setAcl(path, aclSpec);
591 | }
592 |
593 | @Override
594 | public AclStatus getAclStatus(Path path) throws IOException {
595 | judgeActualFSInitialized();
596 | return this.actualImplFS.getAclStatus(path);
597 | }
598 |
599 | @Override
600 | public void setXAttr(Path path, String name, byte[] value, EnumSet flag) throws IOException {
601 | judgeActualFSInitialized();
602 | this.actualImplFS.setXAttr(path, name, value, flag);
603 | }
604 |
605 | @Override
606 | public byte[] getXAttr(Path path, String name) throws IOException {
607 | judgeActualFSInitialized();
608 | return this.actualImplFS.getXAttr(path, name);
609 | }
610 |
611 | @Override
612 | public Map getXAttrs(Path path) throws IOException {
613 | judgeActualFSInitialized();
614 | return this.actualImplFS.getXAttrs(path);
615 | }
616 |
617 | @Override
618 | public Map getXAttrs(Path path, List names) throws IOException {
619 | judgeActualFSInitialized();
620 | return this.actualImplFS.getXAttrs(path, names);
621 | }
622 |
623 | @Override
624 | public List listXAttrs(Path path) throws IOException {
625 | judgeActualFSInitialized();
626 | return this.actualImplFS.listXAttrs(path);
627 | }
628 |
629 | @Override
630 | public void removeXAttr(Path path, String name) throws IOException {
631 | judgeActualFSInitialized();
632 | this.actualImplFS.removeXAttr(path, name);
633 | }
634 |
635 | @Override
636 | public Token> getDelegationToken(String renewer) throws IOException {
637 | judgeActualFSInitialized();
638 | return this.actualImplFS.getDelegationToken(renewer);
639 | }
640 |
641 | @Override
642 | public String getCanonicalServiceName() {
643 | if (this.actualImplFS == null) {
644 | return null;
645 | } else {
646 | return this.actualImplFS.getCanonicalServiceName();
647 | }
648 | }
649 |
650 | @Override
651 | public ContentSummary getContentSummary(Path f) throws IOException {
652 | judgeActualFSInitialized();
653 | return this.actualImplFS.getContentSummary(f);
654 | }
655 |
656 | @Override
657 | public void releaseFileLock(Path p) throws IOException {
658 | judgeActualFSInitialized();
659 | if (this.actualImplFS instanceof FileLockCleaner) {
660 | ((FileLockCleaner) this.actualImplFS).releaseFileLock(p);
661 | } else {
662 | throw new IOException("the actual fileSystem not implemented the lock cleaner interface!");
663 | }
664 | }
665 |
666 | @Override
667 | public void enableSSECos() throws IOException {
668 | judgeActualFSInitialized();
669 |
670 | if (this.actualImplFS instanceof ServerSideEncryption) {
671 | ((ServerSideEncryption) this.actualImplFS).enableSSECos();
672 | } else {
673 | throw new IOException("the actual fileSystem not implemented the enable sse interface!");
674 | }
675 | }
676 |
677 | @Override
678 | public void disableSSE() throws IOException {
679 | judgeActualFSInitialized();
680 | if (this.actualImplFS instanceof ServerSideEncryption) {
681 | ((ServerSideEncryption) this.actualImplFS).disableSSE();
682 | } else {
683 | throw new IOException("the actual fileSystem not implemented the enable sse interface!");
684 | }
685 | }
686 |
687 | @Override
688 | public void checkPermission(Path f, RangerAccessType rangerAccessType) throws IOException {
689 | judgeActualFSInitialized();
690 | if (this.actualImplFS instanceof RangerPermissionChecker) {
691 | ((RangerPermissionChecker) this.actualImplFS).checkPermission(f, rangerAccessType);
692 | } else {
693 | throw new IOException("the actual fileSystem not implemented the permission check interface!");
694 | }
695 | }
696 |
697 | @Override
698 | public void close() throws IOException {
699 | judgeActualFSInitialized();
700 | super.close();
701 | this.actualImplFS.close();
702 | }
703 |
704 |
705 | }
706 |
--------------------------------------------------------------------------------