├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── LICENSE ├── README.md ├── compile.sh ├── deploy.sh ├── pom.xml └── src ├── main └── java │ ├── com │ └── qcloud │ │ └── chdfs │ │ └── permission │ │ └── RangerAccessType.java │ ├── hadoopCosPluginVersionInfo.properties │ └── org │ └── apache │ └── hadoop │ └── fs │ ├── CosFileSystem.java │ ├── CosN.java │ ├── CosNConfigKeys.java │ ├── CosNCopyFileContext.java │ ├── CosNCopyFileTask.java │ ├── CosNDeleteFileContext.java │ ├── CosNDeleteFileTask.java │ ├── CosNEncryptionMethods.java │ ├── CosNEncryptionSecrets.java │ ├── CosNExtendedFSDataOutputStream.java │ ├── CosNFSBufferedFSInputStream.java │ ├── CosNFSDataOutputStream.java │ ├── CosNFSInputStream.java │ ├── CosNFileReadTask.java │ ├── CosNFileStatus.java │ ├── CosNFileSystem.java │ ├── CosNFileSystemExt.java │ ├── CosNPartialListing.java │ ├── CosNResultInfo.java │ ├── CosNSeekableFSDataOutputStream.java │ ├── CosNSymlinkMetadata.java │ ├── CosNUtils.java │ ├── CosNXAttr.java │ ├── CosNativeFileSystemStore.java │ ├── FileMetadata.java │ ├── NativeFileSystemStore.java │ ├── RangerCredentialsClient.java │ ├── auth │ ├── AbstractCOSCredentialProvider.java │ ├── COSCredentialProviderList.java │ ├── CPMInstanceCredentialsProvider.java │ ├── CVMInstanceCredentialsProvider.java │ ├── CustomDefinedCredentialsProvider.java │ ├── DLFInstanceCredentialsProvider.java │ ├── EMRInstanceCredentialsProvider.java │ ├── EnvironmentVariableCredentialProvider.java │ ├── NoAuthWithCOSException.java │ ├── OIDCRoleArnCredentialsProvider.java │ ├── RangerCredentialsProvider.java │ ├── SessionCredentialProvider.java │ ├── SessionTokenCredentialProvider.java │ ├── SimpleCredentialProvider.java │ └── package-info.java │ └── cosn │ ├── Abortable.java │ ├── BufferInputStream.java │ ├── BufferOutputStream.java │ ├── BufferPool.java │ ├── CRC32CCheckSum.java │ ├── CRC64Checksum.java │ ├── ConsistencyChecker.java │ ├── Constants.java │ ├── CosNOutOfMemoryException.java │ ├── CosNPartListing.java │ ├── CrcUtils.java │ ├── CustomerDomainEndpointResolver.java │ ├── FileStatusProbeEnum.java │ ├── LocalRandomAccessMappedBufferPool.java │ ├── MD5Utils.java │ ├── MemoryAllocator.java │ ├── OperationCancellingStatusProvider.java │ ├── ReadBufferHolder.java │ ├── ResettableFileInputStream.java │ ├── Unit.java │ ├── buffer │ ├── CosNBufferFactory.java │ ├── CosNBufferType.java │ ├── CosNByteBuffer.java │ ├── CosNDirectBuffer.java │ ├── CosNDirectBufferFactory.java │ ├── CosNMappedBuffer.java │ ├── CosNMappedBufferFactory.java │ ├── CosNNonDirectBuffer.java │ ├── CosNNonDirectBufferFactory.java │ ├── CosNRandomAccessMappedBuffer.java │ └── CosNRandomAccessMappedBufferFactory.java │ ├── common │ └── Pair.java │ └── multipart │ └── upload │ ├── MultipartManager.java │ ├── UploadPart.java │ └── UploadPartCopy.java └── test ├── java └── org │ └── apache │ └── hadoop │ └── fs │ ├── CosNFileSystemTestBase.java │ ├── CosNFileSystemTestWithTimeout.java │ ├── ITestCosNFileSystemAppend.java │ ├── ITestCosNFileSystemCreate.java │ ├── ITestCosNFileSystemDelete.java │ ├── ITestCosNFileSystemGetFileStatus.java │ ├── ITestCosNFileSystemListStatus.java │ ├── ITestCosNFileSystemMkDirs.java │ ├── ITestCosNFileSystemRename.java │ ├── ITestCosNFileSystemSetAndGetXAttr.java │ ├── ITestCosNFileSystemSymlink.java │ ├── ITestCosNFileSystemTruncate.java │ └── cosn │ └── MemoryAllocatorTest.java └── resources ├── core-site.xml └── log4j.properties /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | # User-specific stuff 3 | .DS_Store 4 | .idea/ 5 | 6 | # Gradle and Maven with auto-import 7 | # When using Gradle or Maven with auto-import, you should exclude module files, 8 | # since they will be recreated, and may cause churn. Uncomment if using 9 | # auto-import. 10 | *.iml 11 | *.ipr 12 | 13 | ### Java template 14 | # Compiled class file 15 | *.class 16 | target/ 17 | 18 | # Log file 19 | *.log 20 | 21 | # BlueJ files 22 | *.ctxt 23 | 24 | # Mobile Tools for Java (J2ME) 25 | .mtj.tmp/ 26 | 27 | # Package Files # 28 | *.war 29 | *.nar 30 | *.ear 31 | *.zip 32 | *.tar.gz 33 | *.rar 34 | 35 | pom.xml-E 36 | pom.xml.versionsBackup 37 | 38 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 39 | hs_err_pid* 40 | 41 | # local dep 42 | dep 43 | 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 腾讯云 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /compile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | base_dir=$(cd `dirname $0`;pwd) 4 | cd ${base_dir} 5 | hadoop_version_array=("2.7.5" "2.8.5" "3.1.0" "3.2.2" "3.3.0" "3.4.0") 6 | 7 | origin_version=$(mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive exec:exec) 8 | 9 | for hadoop_version in ${hadoop_version_array[@]} 10 | do 11 | sed -i -E "s/.*<\/hadoop\.version>/${hadoop_version}<\/hadoop\.version>/g" pom.xml 12 | mvn versions:set -DnewVersion=${hadoop_version}-${origin_version} 13 | mvn clean verify 14 | rm -rf dep/${hadoop_version} 15 | mkdir -p dep/${hadoop_version} 16 | cp target/*.jar dep/${hadoop_version}/ 17 | cp target/*.asc dep/${hadoop_version}/ 18 | cp target/*.pom dep/${hadoop_version}/ 19 | 20 | mvn versions:set -DnewVersion=${origin_version} 21 | done 22 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | base_dir=$(cd `dirname $0`;pwd) 4 | 5 | hadoop_version_array=("2.7.5" "2.8.5" "3.1.0" "3.2.2" "3.3.0" "3.4.0") 6 | 7 | NORMAL="normal" 8 | INTER="inter" 9 | origin_artifact_version=$(mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive exec:exec) 10 | 11 | 12 | deploy_repository_id="" 13 | deploy_repository_url="" 14 | while getopts ":m:h" optname 15 | do 16 | case "$optname" in 17 | "m") 18 | echo "deploy mode is $OPTARG" 19 | OPT="${OPTARG}" 20 | if [ "$OPT" = "$NORMAL" ]; then 21 | # 外部maven 中央仓库 22 | deploy_repository_id="oss" 23 | deploy_repository_url="https://oss.sonatype.org/service/local/staging/deploy/maven2" 24 | elif [ "$OPT" = "$INTER" ]; then 25 | deploy_repository_id="cos-inner-maven-repository" 26 | deploy_repository_url="http://mirrors.tencent.com/repository/maven/QCLOUD_COS" 27 | fi 28 | ;; 29 | "h") 30 | echo "-m normal or -m inter" 31 | ;; 32 | "?") 33 | echo "Unknow option $OPTARG" 34 | ;; 35 | *) 36 | echo "Unknow error" 37 | ;; 38 | esac 39 | done 40 | 41 | for hadoop_version in ${hadoop_version_array[@]} 42 | do 43 | cd ${base_dir}/dep/${hadoop_version} 44 | prefix="hadoop-cos-${hadoop_version}-${origin_artifact_version}" 45 | pom_file=${prefix}.pom 46 | read groupId artifactId versionId <<< $(mvn -f ${pom_file} -q -Dexec.executable="echo" -Dexec.args='${project.groupId} ${project.artifactId} ${project.version}' --non-recursive exec:exec) 47 | echo ${prefix} 48 | mvn deploy:deploy-file \ 49 | -DrepositoryId=${deploy_repository_id} \ 50 | -Durl=${deploy_repository_url} \ 51 | -Dfile=${prefix}.jar \ 52 | -DpomFile=${pom_file} 53 | 54 | 55 | mvn deploy:deploy-file \ 56 | -DrepositoryId=${deploy_repository_id} \ 57 | -Durl=${deploy_repository_url} \ 58 | -Dpackaging=jar.asc \ 59 | -Dfile=${prefix}.jar.asc \ 60 | -DgeneratePom=false \ 61 | -DgroupId=${groupId} \ 62 | -DartifactId=${artifactId} \ 63 | -Dversion=${versionId} 64 | 65 | mvn deploy:deploy-file \ 66 | -DrepositoryId=${deploy_repository_id} \ 67 | -Durl=${deploy_repository_url} \ 68 | -Dfile=${prefix}-sources.jar \ 69 | -Dpackaging=jar \ 70 | -Dclassifier=sources \ 71 | -DgeneratePom=false \ 72 | -DgroupId=${groupId} \ 73 | -DartifactId=${artifactId} \ 74 | -Dversion=${versionId} 75 | 76 | mvn deploy:deploy-file \ 77 | -DrepositoryId=${deploy_repository_id} \ 78 | -Durl=${deploy_repository_url} \ 79 | -Dfile=${prefix}-sources.jar.asc \ 80 | -Dpackaging=jar.asc \ 81 | -Dclassifier=sources \ 82 | -DgeneratePom=false \ 83 | -DgroupId=${groupId} \ 84 | -DartifactId=${artifactId} \ 85 | -Dversion=${versionId} 86 | 87 | mvn deploy:deploy-file \ 88 | -DrepositoryId=${deploy_repository_id} \ 89 | -Durl=${deploy_repository_url} \ 90 | -Dfile=${prefix}-javadoc.jar \ 91 | -Dpackaging=jar \ 92 | -Dclassifier=javadoc \ 93 | -DgeneratePom=false \ 94 | -DgroupId=${groupId} \ 95 | -DartifactId=${artifactId} \ 96 | -Dversion=${versionId} 97 | 98 | mvn deploy:deploy-file \ 99 | -DrepositoryId=${deploy_repository_id} \ 100 | -Durl=${deploy_repository_url} \ 101 | -Dfile=${prefix}-javadoc.jar.asc \ 102 | -Dpackaging=jar.asc \ 103 | -Dclassifier=javadoc \ 104 | -DgeneratePom=false \ 105 | -DgroupId=${groupId} \ 106 | -DartifactId=${artifactId} \ 107 | -Dversion=${versionId} 108 | 109 | mvn deploy:deploy-file \ 110 | -DrepositoryId=${deploy_repository_id} \ 111 | -Durl=${deploy_repository_url} \ 112 | -Dpackaging=pom.asc \ 113 | -Dfile=${prefix}.pom.asc \ 114 | -DgeneratePom=false \ 115 | -DgroupId=${groupId} \ 116 | -DartifactId=${artifactId} \ 117 | -Dversion=${versionId} 118 | done -------------------------------------------------------------------------------- /src/main/java/com/qcloud/chdfs/permission/RangerAccessType.java: -------------------------------------------------------------------------------- 1 | package com.qcloud.chdfs.permission; 2 | 3 | public enum RangerAccessType { 4 | 5 | LIST, 6 | WRITE, 7 | READ, 8 | DELETE; 9 | 10 | private RangerAccessType() { 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/main/java/hadoopCosPluginVersionInfo.properties: -------------------------------------------------------------------------------- 1 | plugin_version=${project.version} -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosN.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.conf.Configuration; 4 | 5 | import java.io.IOException; 6 | import java.net.URI; 7 | import java.net.URISyntaxException; 8 | 9 | public class CosN extends DelegateToFileSystem { 10 | public CosN(URI theUri, Configuration conf) throws IOException, 11 | URISyntaxException { 12 | super(theUri, new CosFileSystem(), conf, CosFileSystem.SCHEME, false); 13 | } 14 | 15 | @Override 16 | public int getUriDefaultPort() { 17 | return -1; 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNCopyFileContext.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import java.io.IOException; 4 | import java.util.concurrent.atomic.AtomicBoolean; 5 | import java.util.concurrent.atomic.AtomicInteger; 6 | import java.util.concurrent.locks.Condition; 7 | import java.util.concurrent.locks.ReentrantLock; 8 | 9 | public class CosNCopyFileContext { 10 | private final ReentrantLock lock = new ReentrantLock(); 11 | private Condition readyCondition = lock.newCondition(); 12 | 13 | private AtomicBoolean copySuccess = new AtomicBoolean(true); 14 | private AtomicInteger copiesFinish = new AtomicInteger(0); 15 | 16 | private IOException copyException = null; 17 | 18 | public void lock() { 19 | this.lock.lock(); 20 | } 21 | 22 | public void unlock() { 23 | this.lock.unlock(); 24 | } 25 | 26 | public void awaitAllFinish(int copiesFinish) throws InterruptedException { 27 | while (this.copiesFinish.get() != copiesFinish && !this.hasException()) { 28 | this.readyCondition.await(); 29 | } 30 | } 31 | 32 | public IOException getCopyException() { 33 | return copyException; 34 | } 35 | 36 | public void setCopyException(IOException copyException) { 37 | this.copyException = copyException; 38 | } 39 | 40 | public boolean hasException() { 41 | return this.copyException != null; 42 | } 43 | 44 | public void signalAll() { 45 | this.readyCondition.signalAll(); 46 | } 47 | 48 | public boolean isCopySuccess() { 49 | return this.copySuccess.get(); 50 | } 51 | 52 | public void setCopySuccess(boolean copySuccess) { 53 | this.copySuccess.set(copySuccess); 54 | } 55 | 56 | public void incCopiesFinish(int copiesFinish) { 57 | this.copiesFinish.addAndGet(copiesFinish); 58 | } 59 | 60 | public void incCopiesFinish() { 61 | this.copiesFinish.addAndGet(1); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNCopyFileTask.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | import java.io.IOException; 7 | 8 | public class CosNCopyFileTask implements Runnable { 9 | private static final Logger LOG = LoggerFactory.getLogger(CosNCopyFileTask.class); 10 | 11 | private final NativeFileSystemStore store; 12 | 13 | private final String srcKey; 14 | private final FileMetadata srcFileMetadata; 15 | private final String dstKey; 16 | private final CosNCopyFileContext cosCopyFileContext; 17 | 18 | public CosNCopyFileTask(NativeFileSystemStore store, 19 | String srcKey, FileMetadata srcFileMetadata, 20 | String dstKey, 21 | CosNCopyFileContext cosCopyFileContext) { 22 | this.store = store; 23 | this.srcKey = srcKey; 24 | this.srcFileMetadata = srcFileMetadata; 25 | this.dstKey = dstKey; 26 | this.cosCopyFileContext = cosCopyFileContext; 27 | } 28 | 29 | @Override 30 | public void run() { 31 | boolean fail = false; 32 | try { 33 | this.store.copy(srcKey, this.srcFileMetadata, dstKey); 34 | } catch (IOException e) { 35 | LOG.warn("Exception thrown when copy from {} to {}", this.srcKey, this.dstKey, e); 36 | fail = true; 37 | } finally { 38 | this.cosCopyFileContext.lock(); 39 | if (fail) { 40 | cosCopyFileContext.setCopySuccess(false); 41 | } 42 | cosCopyFileContext.incCopiesFinish(); 43 | cosCopyFileContext.signalAll(); 44 | cosCopyFileContext.unlock(); 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNDeleteFileContext.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import java.io.IOException; 4 | import java.util.concurrent.atomic.AtomicBoolean; 5 | import java.util.concurrent.atomic.AtomicInteger; 6 | import java.util.concurrent.atomic.AtomicLong; 7 | import java.util.concurrent.locks.Condition; 8 | import java.util.concurrent.locks.ReentrantLock; 9 | 10 | public class CosNDeleteFileContext { 11 | private final ReentrantLock lock = new ReentrantLock(); 12 | private Condition readyCondition = lock.newCondition(); 13 | 14 | private AtomicBoolean deleteSuccess = new AtomicBoolean(true); 15 | private AtomicInteger deletesFinish = new AtomicInteger(0); 16 | 17 | private IOException deleteException = null; 18 | 19 | public void lock() { 20 | this.lock.lock(); 21 | } 22 | 23 | public void unlock() { 24 | this.lock.unlock(); 25 | } 26 | 27 | public IOException getIOException() { 28 | // todo whether need to sprite the exception from the delete interface 29 | return this.deleteException; 30 | } 31 | 32 | public void setIOException(IOException e) { 33 | this.deleteException = e; 34 | } 35 | 36 | public boolean hasException() { 37 | return this.deleteException != null; 38 | } 39 | 40 | public void awaitAllFinish(int deletesFinish) throws InterruptedException { 41 | while (this.deletesFinish.get() != deletesFinish && !this.hasException()) { 42 | this.readyCondition.await(); 43 | } 44 | } 45 | 46 | public void signalAll() { 47 | this.readyCondition.signalAll(); 48 | } 49 | 50 | public boolean isDeleteSuccess() { 51 | return this.deleteSuccess.get(); 52 | } 53 | 54 | public void setDeleteSuccess(boolean deleteSuccess) { 55 | this.deleteSuccess.set(deleteSuccess); 56 | } 57 | 58 | public void incDeletesFinish() { 59 | this.deletesFinish.addAndGet(1); 60 | } 61 | 62 | public void incDeletesFinish(int deletesFinish) { 63 | this.deletesFinish.addAndGet(deletesFinish); 64 | } 65 | } 66 | 67 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNDeleteFileTask.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | import java.io.IOException; 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | 10 | public class CosNDeleteFileTask implements Runnable { 11 | private static final Logger LOG = LoggerFactory.getLogger(CosNDeleteFileTask.class); 12 | 13 | private final NativeFileSystemStore store; 14 | private final List deletingKeys; 15 | private final CosNDeleteFileContext cosDeleteFileContext; 16 | 17 | public CosNDeleteFileTask(NativeFileSystemStore store, String srcKey, 18 | CosNDeleteFileContext cosDeleteFileContext) { 19 | this.store = store; 20 | this.deletingKeys = new ArrayList<>(1); 21 | this.deletingKeys.add(srcKey); 22 | this.cosDeleteFileContext = cosDeleteFileContext; 23 | } 24 | 25 | public CosNDeleteFileTask(NativeFileSystemStore store, List deletingKeys, 26 | CosNDeleteFileContext cosDeleteFileContext) { 27 | this.store = store; 28 | this.deletingKeys = deletingKeys; 29 | this.cosDeleteFileContext = cosDeleteFileContext; 30 | } 31 | 32 | @Override 33 | public void run() { 34 | boolean fail = false; 35 | int deleteFinishCounter = 0; 36 | try { 37 | for (String srcKey : deletingKeys) { 38 | try { 39 | LOG.debug("Delete the cos key: {}.", srcKey); 40 | this.store.delete(srcKey); 41 | deleteFinishCounter++; 42 | } catch (IOException e) { 43 | LOG.warn("Exception thrown when delete file [{}], exception: ", srcKey, e); 44 | fail = true; 45 | cosDeleteFileContext.setIOException(e); 46 | } 47 | } 48 | } finally { 49 | this.cosDeleteFileContext.lock(); 50 | if (fail) { 51 | cosDeleteFileContext.setDeleteSuccess(false); 52 | } 53 | cosDeleteFileContext.incDeletesFinish(deleteFinishCounter); 54 | cosDeleteFileContext.signalAll(); 55 | cosDeleteFileContext.unlock(); 56 | } 57 | } 58 | } 59 | 60 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNEncryptionMethods.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import com.qcloud.cos.utils.StringUtils; 4 | 5 | import java.io.IOException; 6 | 7 | public enum CosNEncryptionMethods { 8 | 9 | SSE_COS("SSE-COS", true), 10 | SSE_C("SSE-C", true), 11 | SSE_KMS("SSE-KMS", true), 12 | NONE("", false); 13 | 14 | static final String UNKNOWN_ALGORITHM_MESSAGE 15 | = "COSN unknown the encryption algorithm "; 16 | 17 | private String method; 18 | private boolean serverSide; 19 | 20 | CosNEncryptionMethods(String method, final boolean serverSide) { 21 | this.method = method; 22 | this.serverSide = serverSide; 23 | } 24 | 25 | public String getMethod() { 26 | return method; 27 | } 28 | 29 | /** 30 | * Get the encryption mechanism from the value provided. 31 | * @param name algorithm name 32 | * @return the method 33 | * @throws IOException if the algorithm is unknown 34 | */ 35 | public static CosNEncryptionMethods getMethod(String name) throws IOException { 36 | if (StringUtils.isNullOrEmpty(name)) { 37 | return NONE; 38 | } 39 | for (CosNEncryptionMethods v : values()) { 40 | if (v.getMethod().equals(name)) { 41 | return v; 42 | } 43 | } 44 | throw new IOException(UNKNOWN_ALGORITHM_MESSAGE + name); 45 | } 46 | 47 | /** 48 | * Flag to indicate this is a server-side encryption option. 49 | * @return true if this is server side. 50 | */ 51 | public boolean isServerSide() { 52 | return serverSide; 53 | } 54 | 55 | } 56 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNEncryptionSecrets.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import com.qcloud.cos.utils.StringUtils; 4 | import org.apache.hadoop.io.LongWritable; 5 | import org.apache.hadoop.io.Text; 6 | import org.apache.hadoop.io.Writable; 7 | 8 | import java.io.*; 9 | import java.util.Objects; 10 | 11 | 12 | public class CosNEncryptionSecrets implements Writable, Serializable { 13 | 14 | public static final int MAX_SECRET_LENGTH = 2048; 15 | 16 | private static final long serialVersionUID = 1208329045511296375L; 17 | 18 | /** 19 | * Encryption algorithm to use: must match one in 20 | * {@link CosNEncryptionMethods}. 21 | */ 22 | private String encryptionAlgorithm = ""; 23 | 24 | /** 25 | * Encryption key: possibly sensitive information. 26 | */ 27 | private String encryptionKey = ""; 28 | 29 | /** 30 | * Encryption context used by sse-kms 31 | */ 32 | private String encryptionContext = ""; 33 | 34 | /** 35 | * This field isn't serialized/marshalled; it is rebuilt from the 36 | * encryptionAlgorithm field. 37 | */ 38 | private transient CosNEncryptionMethods encryptionMethod = 39 | CosNEncryptionMethods.NONE; 40 | 41 | /** 42 | * Empty constructor, for use in marshalling. 43 | */ 44 | public CosNEncryptionSecrets() { 45 | } 46 | 47 | /** 48 | * Create a pair of secrets. 49 | * 50 | * @param encryptionAlgorithm algorithm enumeration. 51 | * @param encryptionKey key/key reference. 52 | * @throws IOException failure to initialize. 53 | */ 54 | public CosNEncryptionSecrets(final CosNEncryptionMethods encryptionAlgorithm, 55 | final String encryptionKey) throws IOException { 56 | this(encryptionAlgorithm.getMethod(), encryptionKey); 57 | } 58 | 59 | public CosNEncryptionSecrets(final CosNEncryptionMethods encryptionAlgorithm, 60 | final String encryptionKey, final String encryptionContext) throws IOException { 61 | this(encryptionAlgorithm.getMethod(), encryptionKey, encryptionContext); 62 | } 63 | /** 64 | * Create a pair of secrets. 65 | * 66 | * @param encryptionAlgorithm algorithm name 67 | * @param encryptionKey key/key reference. 68 | * @throws IOException failure to initialize. 69 | */ 70 | public CosNEncryptionSecrets(final String encryptionAlgorithm, 71 | final String encryptionKey) throws IOException { 72 | this.encryptionAlgorithm = encryptionAlgorithm; 73 | this.encryptionKey = encryptionKey; 74 | init(); 75 | } 76 | 77 | public CosNEncryptionSecrets(final String encryptionAlgorithm, 78 | final String encryptionKey, final String encryptionContext) throws IOException { 79 | this.encryptionAlgorithm = encryptionAlgorithm; 80 | this.encryptionKey = encryptionKey; 81 | this.encryptionContext = encryptionContext; 82 | init(); 83 | } 84 | /** 85 | * Write out the encryption secrets. 86 | * 87 | * @param out {@code DataOutput} to serialize this object into. 88 | * @throws IOException IO failure 89 | */ 90 | @Override 91 | public void write(final DataOutput out) throws IOException { 92 | new LongWritable(serialVersionUID).write(out); 93 | Text.writeString(out, encryptionAlgorithm); 94 | Text.writeString(out, encryptionKey); 95 | } 96 | 97 | /** 98 | * Read in from the writable stream. 99 | * After reading, call {@link #init()}. 100 | * 101 | * @param in {@code DataInput} to deserialize this object from. 102 | * @throws IOException failure to read/validate data. 103 | */ 104 | @Override 105 | public void readFields(final DataInput in) throws IOException { 106 | final LongWritable version = new LongWritable(); 107 | version.readFields(in); 108 | if (version.get() != serialVersionUID) { 109 | throw new IOException( 110 | "Incompatible EncryptionSecrets version"); 111 | } 112 | encryptionAlgorithm = Text.readString(in, MAX_SECRET_LENGTH); 113 | encryptionKey = Text.readString(in, MAX_SECRET_LENGTH); 114 | init(); 115 | } 116 | 117 | /** 118 | * For java serialization: read and then call {@link #init()}. 119 | * 120 | * @param in input 121 | * @throws IOException IO problem 122 | * @throws ClassNotFoundException problem loading inner class. 123 | */ 124 | private void readObject(ObjectInputStream in) 125 | throws IOException, ClassNotFoundException { 126 | in.defaultReadObject(); 127 | init(); 128 | } 129 | 130 | /** 131 | * Init all state, including after any read. 132 | * 133 | * @throws IOException error rebuilding state. 134 | */ 135 | private void init() throws IOException { 136 | encryptionMethod = CosNEncryptionMethods.getMethod( 137 | encryptionAlgorithm); 138 | } 139 | 140 | public String getEncryptionAlgorithm() { 141 | return encryptionAlgorithm; 142 | } 143 | 144 | public String getEncryptionKey() { 145 | return encryptionKey; 146 | } 147 | 148 | public String getEncryptionContext() { return encryptionContext; } 149 | /** 150 | * Does this instance have encryption options? 151 | * That is: is the algorithm non-null. 152 | * 153 | * @return true if there's an encryption algorithm. 154 | */ 155 | public boolean hasEncryptionAlgorithm() { 156 | return !StringUtils.isNullOrEmpty(encryptionAlgorithm); 157 | } 158 | 159 | /** 160 | * Does this instance have an encryption key? 161 | * 162 | * @return true if there's an encryption key. 163 | */ 164 | public boolean hasEncryptionKey() { 165 | return !StringUtils.isNullOrEmpty(encryptionKey); 166 | } 167 | 168 | @Override 169 | public boolean equals(final Object o) { 170 | if (this == o) { 171 | return true; 172 | } 173 | if (o == null || getClass() != o.getClass()) { 174 | return false; 175 | } 176 | final CosNEncryptionSecrets that = (CosNEncryptionSecrets) o; 177 | return Objects.equals(encryptionAlgorithm, that.encryptionAlgorithm) 178 | && Objects.equals(encryptionKey, that.encryptionKey); 179 | } 180 | 181 | @Override 182 | public int hashCode() { 183 | return Objects.hash(encryptionAlgorithm, encryptionKey); 184 | } 185 | 186 | /** 187 | * Get the encryption method. 188 | * 189 | * @return the encryption method 190 | */ 191 | public CosNEncryptionMethods getEncryptionMethod() { 192 | return encryptionMethod; 193 | } 194 | 195 | /** 196 | * String function returns the encryption mode but not any other 197 | * secrets. 198 | * 199 | * @return a string safe for logging. 200 | */ 201 | @Override 202 | public String toString() { 203 | return CosNEncryptionMethods.NONE.equals(encryptionMethod) 204 | ? "(no encryption)" 205 | : encryptionMethod.getMethod(); 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNExtendedFSDataOutputStream.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import com.google.common.util.concurrent.ListenableFuture; 4 | import com.google.common.util.concurrent.ListeningExecutorService; 5 | import com.google.common.util.concurrent.MoreExecutors; 6 | import com.qcloud.cos.model.PartETag; 7 | import com.qcloud.cos.utils.CRC64; 8 | import org.apache.hadoop.conf.Configuration; 9 | import org.apache.hadoop.fs.cosn.ConsistencyChecker; 10 | import org.apache.hadoop.fs.cosn.Unit; 11 | import org.apache.hadoop.fs.cosn.multipart.upload.UploadPartCopy; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.io.IOException; 16 | import java.io.InputStream; 17 | import java.math.BigInteger; 18 | import java.util.concurrent.Callable; 19 | import java.util.concurrent.ExecutorService; 20 | 21 | /** 22 | * Extended supports: append/truncate and visible flush. 23 | */ 24 | public class CosNExtendedFSDataOutputStream extends CosNFSDataOutputStream { 25 | private static final Logger LOG = LoggerFactory.getLogger(CosNExtendedFSDataOutputStream.class); 26 | 27 | private final ListeningExecutorService copyExecutoService; 28 | 29 | public CosNExtendedFSDataOutputStream(Configuration conf, NativeFileSystemStore nativeStore, 30 | String cosKey, ExecutorService ioExecutorService, ExecutorService copyExecutorService) throws IOException { 31 | this(conf, nativeStore, cosKey, ioExecutorService, copyExecutorService, false); 32 | } 33 | 34 | public CosNExtendedFSDataOutputStream(Configuration conf, NativeFileSystemStore nativeStore, 35 | String cosKey, ExecutorService ioExecutorService, ExecutorService copyExecutorService, boolean appendFlag) throws IOException { 36 | super(conf, nativeStore, cosKey, ioExecutorService); 37 | this.copyExecutoService = MoreExecutors.listeningDecorator(copyExecutorService); 38 | if (appendFlag) { 39 | this.resumeForWrite(); 40 | } 41 | } 42 | 43 | @Override 44 | public synchronized void write(byte[] b, int off, int len) throws IOException { 45 | if (super.committed) { 46 | this.resumeForWrite(); 47 | } 48 | super.write(b, off, len); 49 | } 50 | 51 | @Override 52 | public synchronized void write(int b) throws IOException { 53 | if (super.committed) { 54 | this.resumeForWrite(); 55 | } 56 | super.write(b); 57 | } 58 | 59 | @Override 60 | public synchronized void flush() throws IOException { 61 | super.flush(); 62 | // Visible immediately after flushing. 63 | super.commit(); 64 | } 65 | 66 | private void resumeForWrite() throws IOException { 67 | FileMetadata fileMetadata = super.nativeStore.retrieveMetadata(super.cosKey); 68 | if (null == fileMetadata) { 69 | throw new IOException(String.format("The cos key [%s] is not found.", super.cosKey)); 70 | } 71 | if (!fileMetadata.isFile()) { 72 | throw new IOException("The cos key is a directory object. Can not resume the write operation for it."); 73 | } 74 | 75 | super.resetContext(); 76 | super.initNewCurrentPartResource(); 77 | 78 | // resume for write operation. 79 | try { 80 | if (fileMetadata.getLength() < super.partSize) { 81 | // Single file resume 82 | try (InputStream inputStream = super.nativeStore.retrieve(super.cosKey)) { 83 | byte[] chunk = new byte[(int) (4 * Unit.KB)]; 84 | int readBytes = inputStream.read(chunk); 85 | while (readBytes != -1) { 86 | super.write(chunk, 0, readBytes); 87 | readBytes = inputStream.read(chunk); 88 | } 89 | } 90 | } else { 91 | // Multipart copy resume 92 | super.multipartUpload = new MultipartUploadEx(super.cosKey); 93 | long copyRemaining = fileMetadata.getLength(); 94 | long firstByte = 0; 95 | long lastByte = firstByte + super.partSize - 1; 96 | while (copyRemaining >= super.partSize) { 97 | UploadPartCopy uploadPartCopy = new UploadPartCopy(super.cosKey, super.cosKey, super.currentPartNumber++, 98 | firstByte, lastByte); 99 | ((MultipartUploadEx) super.multipartUpload).uploadPartCopyAsync(uploadPartCopy); 100 | copyRemaining -= ((lastByte - firstByte) + 1); 101 | firstByte = lastByte + 1; 102 | lastByte = firstByte + super.partSize - 1; 103 | } 104 | 105 | // initialize the last part 106 | if (copyRemaining > 0) { 107 | lastByte = firstByte + copyRemaining - 1; 108 | try (InputStream inputStream = super.nativeStore.retrieveBlock(super.cosKey, firstByte, lastByte)) { 109 | byte[] chunk = new byte[(int) (4 * Unit.KB)]; 110 | int readBytes = inputStream.read(chunk); 111 | while (readBytes != -1) { 112 | super.write(chunk, 0, readBytes); 113 | readBytes = inputStream.read(chunk); 114 | } 115 | } 116 | } 117 | 118 | // initialize the consistency checker. 119 | BigInteger bigInteger = new BigInteger(fileMetadata.getCrc64ecm()); 120 | this.consistencyChecker = new ConsistencyChecker(super.nativeStore, super.cosKey, 121 | new CRC64(bigInteger.longValue()), fileMetadata.getLength(), this.clientEncryptionEnabled); 122 | } 123 | } catch (Exception e) { 124 | LOG.error("Failed to resume for writing. Abort it.", e); 125 | super.doAbort(); 126 | throw new IOException(e); 127 | } 128 | } 129 | 130 | protected class MultipartUploadEx extends MultipartUpload { 131 | protected MultipartUploadEx(String cosKey) throws IOException { 132 | this(cosKey, null); 133 | } 134 | 135 | protected MultipartUploadEx(String cosKey, String uploadId) throws IOException { 136 | super(cosKey, uploadId); 137 | } 138 | 139 | protected void uploadPartCopyAsync(final UploadPartCopy uploadPartCopy) throws IOException { 140 | if (super.isCompleted() || super.isAborted()) { 141 | throw new IOException(String.format("The MPU [%s] has been closed or aborted. " + 142 | "Can not execute the upload part copy operation.", this)); 143 | } 144 | 145 | partsSubmitted.incrementAndGet(); 146 | bytesSubmitted.addAndGet(uploadPartCopy.getLastByte() - uploadPartCopy.getFirstByte() + 1); 147 | ListenableFuture partETagListenableFuture = 148 | CosNExtendedFSDataOutputStream.this.copyExecutoService.submit(new Callable() { 149 | @Override 150 | public PartETag call() throws Exception { 151 | LOG.debug("Start to copy the part: {}.", uploadPartCopy); 152 | PartETag partETag = nativeStore.uploadPartCopy(getUploadId(), 153 | uploadPartCopy.getSrcKey(), uploadPartCopy.getDestKey(), uploadPartCopy.getPartNumber(), 154 | uploadPartCopy.getFirstByte(), uploadPartCopy.getLastByte()); 155 | partsUploaded.incrementAndGet(); 156 | bytesUploaded.addAndGet(uploadPartCopy.getLastByte() - uploadPartCopy.getFirstByte() + 1); 157 | return partETag; 158 | } 159 | }); 160 | super.partETagFutures.put(uploadPartCopy.getPartNumber(), partETagListenableFuture); 161 | } 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNFSBufferedFSInputStream.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import java.io.IOException; 4 | import java.nio.ByteBuffer; 5 | 6 | public class CosNFSBufferedFSInputStream extends BufferedFSInputStream implements ByteBufferReadable { 7 | 8 | public CosNFSBufferedFSInputStream(FSInputStream in, int size) { 9 | super(in, size); 10 | } 11 | 12 | @Override 13 | public int read(ByteBuffer byteBuffer) throws IOException { 14 | final int bufSize = 1024; 15 | byte[] buf = new byte[bufSize]; 16 | int totalRead = 0; 17 | while (byteBuffer.hasRemaining()) { 18 | int maxReadSize = Math.min(bufSize, byteBuffer.remaining()); 19 | int readLen = this.read(buf, 0, maxReadSize); 20 | if (readLen <= 0) { 21 | return totalRead == 0 ? -1 : totalRead; 22 | } 23 | byteBuffer.put(buf, 0, readLen); 24 | totalRead += readLen; 25 | } 26 | return totalRead; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNFileReadTask.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.conf.Configuration; 4 | import org.apache.hadoop.io.IOUtils; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.io.IOException; 9 | import java.io.InputStream; 10 | import java.net.SocketException; 11 | import java.util.Objects; 12 | import java.util.concurrent.ThreadLocalRandom; 13 | import java.util.concurrent.TimeUnit; 14 | import java.util.concurrent.atomic.AtomicBoolean; 15 | 16 | import static org.apache.hadoop.fs.CosNConfigKeys.DEFAULT_READ_BUFFER_ALLOCATE_TIMEOUT_SECONDS; 17 | 18 | public class CosNFileReadTask implements Runnable { 19 | static final Logger LOG = LoggerFactory.getLogger(CosNFileReadTask.class); 20 | 21 | private final Configuration conf; 22 | private final String key; 23 | private final NativeFileSystemStore store; 24 | private final CosNFSInputStream.ReadBuffer readBuffer; 25 | private final int socketErrMaxRetryTimes; 26 | private final AtomicBoolean closed; 27 | 28 | /** 29 | * cos file read task 30 | * @param conf config 31 | * @param key cos key 32 | * @param store native file system 33 | * @param readBuffer read buffer 34 | */ 35 | public CosNFileReadTask(Configuration conf, String key, 36 | NativeFileSystemStore store, 37 | CosNFSInputStream.ReadBuffer readBuffer, 38 | int socketErrMaxRetryTimes, 39 | AtomicBoolean closed) { 40 | this.conf = conf; 41 | this.key = key; 42 | this.store = store; 43 | this.readBuffer = readBuffer; 44 | this.socketErrMaxRetryTimes = socketErrMaxRetryTimes; 45 | this.closed = closed; 46 | } 47 | 48 | @Override 49 | public void run() { 50 | try { 51 | this.readBuffer.lock(); 52 | checkStreamClosed(); 53 | try { 54 | this.readBuffer.allocate( 55 | conf.getLong(CosNConfigKeys.COSN_READ_BUFFER_ALLOCATE_TIMEOUT_SECONDS, 56 | DEFAULT_READ_BUFFER_ALLOCATE_TIMEOUT_SECONDS), TimeUnit.SECONDS); 57 | } catch (Exception e) { 58 | this.setFailResult("allocate read buffer failed.", new IOException(e)); 59 | return; 60 | } 61 | int retryIndex = 1; 62 | boolean needRetry = false; 63 | while (true) { 64 | try { 65 | this.retrieveBlock(); 66 | needRetry = false; 67 | } catch (SocketException socketException) { 68 | // if we get stream success, but exceptions occurs when read cos input stream 69 | String errMsg = String.format("retrieve block sdk socket failed, " + 70 | "retryIndex: [%d / %d], key: %s, range: [%d , %d], exception: %s", 71 | retryIndex, this.socketErrMaxRetryTimes, this.key, 72 | this.readBuffer.getStart(), this.readBuffer.getEnd(), socketException.toString()); 73 | if (retryIndex <= this.socketErrMaxRetryTimes) { 74 | LOG.info(errMsg, socketException); 75 | long sleepLeast = retryIndex * 300L; 76 | long sleepBound = retryIndex * 500L; 77 | try { 78 | Thread.sleep(ThreadLocalRandom.current(). 79 | nextLong(sleepLeast, sleepBound)); 80 | ++retryIndex; 81 | needRetry = true; 82 | } catch (InterruptedException interruptedException) { 83 | this.setFailResult(errMsg, new IOException(interruptedException.toString())); 84 | break; 85 | } 86 | } else { 87 | this.setFailResult(errMsg, socketException); 88 | break; 89 | } 90 | } catch (IOException ioException) { 91 | String errMsg = String.format("retrieve block failed, " + 92 | "retryIndex: [%d / %d], key: %s, range: [%d , %d], io exception: %s", 93 | retryIndex, this.socketErrMaxRetryTimes, this.key, 94 | this.readBuffer.getStart(), this.readBuffer.getEnd(), ioException); 95 | this.setFailResult(errMsg, ioException); 96 | break; 97 | } 98 | 99 | if (!needRetry) { 100 | break; 101 | } 102 | } // end of retry 103 | } catch (Throwable throwable) { 104 | this.setFailResult( 105 | String.format("retrieve block failed, key: %s, range: [%d , %d], exception: %s", 106 | this.key, this.readBuffer.getStart(), this.readBuffer.getEnd(), throwable), 107 | new IOException(throwable)); 108 | } finally { 109 | this.readBuffer.signalAll(); 110 | this.readBuffer.unLock(); 111 | } 112 | } 113 | 114 | public void setFailResult(String msg, IOException e) { 115 | this.readBuffer.setStatus(CosNFSInputStream.ReadBuffer.ERROR); 116 | this.readBuffer.setException(e); 117 | if (e.getCause() != null && e.getCause() instanceof CancelledException) { 118 | // 预期操作,以warn级别导出 119 | LOG.warn(msg); 120 | } else { 121 | LOG.error(msg); 122 | } 123 | } 124 | 125 | // not thread safe 126 | private void retrieveBlock() throws IOException, CancelledException { 127 | byte[] dataBuf = readBuffer.getBuffer(); 128 | checkStreamClosed(); 129 | Objects.requireNonNull(dataBuf); 130 | InputStream inputStream; 131 | inputStream = this.store.retrieveBlock( 132 | this.key, this.readBuffer.getStart(), this.readBuffer.getEnd()); 133 | IOUtils.readFully( 134 | inputStream, dataBuf, 0, 135 | dataBuf.length); 136 | int readEof = inputStream.read(); 137 | if (readEof != -1) { 138 | LOG.error("Expect to read the eof, but the return is not -1. key: {}.", this.key); 139 | } 140 | inputStream.close(); 141 | this.readBuffer.setStatus(CosNFSInputStream.ReadBuffer.SUCCESS); 142 | } 143 | 144 | private void checkStreamClosed() throws CancelledException { 145 | if (closed.get()) { 146 | throw new CancelledException("the input stream has been canceled."); 147 | } 148 | } 149 | 150 | 151 | private static class CancelledException extends Exception { 152 | public CancelledException(String message) { 153 | super(message); 154 | } 155 | } 156 | } 157 | 158 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNFileStatus.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.fs.permission.FsPermission; 4 | 5 | import javax.annotation.Nullable; 6 | import java.util.Map; 7 | 8 | public class CosNFileStatus extends FileStatus { 9 | private final String ETag; 10 | private final String crc64ecma; 11 | private final String crc32cm; 12 | private final String storageClass; 13 | private final String versionId; 14 | private final Map userAttributes; 15 | 16 | public CosNFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, 17 | long access_time, FsPermission permission, String owner, String group, Path path) { 18 | this(length, isdir, block_replication, blocksize, modification_time, access_time, permission, owner, group, 19 | path, null); 20 | } 21 | 22 | public CosNFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, 23 | long access_time, FsPermission permission, String owner, String group, Path path, 24 | String ETag) { 25 | this(length, isdir, block_replication, blocksize, modification_time, access_time, permission, owner, group, 26 | path, ETag, null, null, null, null, null); 27 | } 28 | 29 | public CosNFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, 30 | long access_time, FsPermission permission, String owner, String group, Path path, 31 | String ETag, String crc64ecma, String crc32cm, String versionId, String storageClass, Map userAttributes) { 32 | super(length, isdir, block_replication, blocksize, modification_time, access_time, permission, owner, group, 33 | path); 34 | this.ETag = ETag; 35 | this.crc64ecma = crc64ecma; 36 | this.crc32cm = crc32cm; 37 | this.storageClass = storageClass; 38 | this.versionId = versionId; 39 | this.userAttributes = userAttributes; 40 | } 41 | 42 | public String getETag() { 43 | return ETag; 44 | } 45 | 46 | @Nullable 47 | public String getCrc64ecma() { 48 | return crc64ecma; 49 | } 50 | 51 | @Nullable 52 | public String getCrc32cm() { 53 | return crc32cm; 54 | } 55 | 56 | public String getStorageClass() { 57 | return storageClass; 58 | } 59 | 60 | @Nullable 61 | public String getVersionId() { 62 | return versionId; 63 | } 64 | 65 | @Nullable 66 | public Map getUserAttributes() { 67 | return userAttributes; 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNFileSystemExt.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl; 4 | import org.apache.hadoop.fs.impl.OpenFileParameters; 5 | import org.apache.hadoop.util.LambdaUtils; 6 | 7 | import java.io.IOException; 8 | import java.util.Collections; 9 | import java.util.concurrent.CompletableFuture; 10 | 11 | /** 12 | * Extends the CosNFileSystem to provide additional functionality for some internal systems, such as GooseFS. 13 | * 14 | * This class is compiled over the Hadoop 3.3.0+ version. 15 | */ 16 | public class CosNFileSystemExt extends CosNFileSystem { 17 | @Override 18 | protected CompletableFuture openFileWithOptions(Path path, OpenFileParameters parameters) 19 | throws IOException { 20 | AbstractFSBuilderImpl.rejectUnknownMandatoryKeys( 21 | parameters.getMandatoryKeys(), 22 | Collections.emptySet(), 23 | "for " + path); 24 | return LambdaUtils.eval(new CompletableFuture<>(), () -> 25 | open(path, parameters.getBufferSize(), parameters.getStatus())); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNPartialListing.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.classification.InterfaceAudience; 4 | import org.apache.hadoop.classification.InterfaceStability; 5 | 6 | /** 7 | *

8 | * Holds information on a directory listing for a 9 | * {@link NativeFileSystemStore}. 10 | * This includes the {@link FileMetadata files} and directories 11 | * (their names) contained in a directory. 12 | *

13 | *

14 | * This listing may be returned in chunks, so a priorLastKey 15 | * is provided so that the next chunk may be requested. 16 | *

17 | * 18 | * @see NativeFileSystemStore#list(String, int) 19 | */ 20 | @InterfaceAudience.Private 21 | @InterfaceStability.Unstable 22 | public class CosNPartialListing { 23 | 24 | private final String priorLastKey; 25 | private final FileMetadata[] files; 26 | private final FileMetadata[] commonPrefixes; 27 | 28 | public CosNPartialListing(String priorLastKey, FileMetadata[] files, 29 | FileMetadata[] commonPrefixes) { 30 | this.priorLastKey = priorLastKey; 31 | this.files = files; 32 | this.commonPrefixes = commonPrefixes; 33 | } 34 | 35 | public FileMetadata[] getFiles() { 36 | return files; 37 | } 38 | 39 | public FileMetadata[] getCommonPrefixes() { 40 | return commonPrefixes; 41 | } 42 | 43 | public String getPriorLastKey() { 44 | return priorLastKey; 45 | } 46 | 47 | } 48 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNResultInfo.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | /** 4 | * Used to record the cos client query result 5 | */ 6 | public class CosNResultInfo { 7 | private String requestID; 8 | private boolean isKeySameToPrefix; 9 | 10 | CosNResultInfo() { 11 | requestID = ""; 12 | isKeySameToPrefix = false; 13 | } 14 | 15 | public void setRequestID(String requestID) { 16 | this.requestID = requestID; 17 | } 18 | public String getRequestID() { 19 | return this.requestID; 20 | } 21 | 22 | public boolean isKeySameToPrefix() { 23 | return this.isKeySameToPrefix; 24 | } 25 | 26 | public void setKeySameToPrefix(boolean isKeySameToPrefix) { 27 | this.isKeySameToPrefix = isKeySameToPrefix; 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNSeekableFSDataOutputStream.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import com.google.common.base.Preconditions; 4 | import org.apache.hadoop.conf.Configuration; 5 | import org.apache.hadoop.fs.chdfs.PosixSeekable; 6 | import org.apache.hadoop.fs.cosn.Abortable; 7 | import org.apache.hadoop.fs.cosn.Constants; 8 | import org.apache.hadoop.fs.cosn.multipart.upload.MultipartManager; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import java.io.IOException; 13 | import java.io.OutputStream; 14 | import java.util.concurrent.ExecutorService; 15 | 16 | /** 17 | * The POSIX seekable writing semantics. 18 | * Not belong to the Hadoop Compatible FileSystem semantic system. 19 | */ 20 | public class CosNSeekableFSDataOutputStream extends FSDataOutputStream 21 | implements PosixSeekable, Abortable { 22 | private static final Logger LOG = LoggerFactory.getLogger(CosNSeekableFSDataOutputStream.class); 23 | private final SeekableOutputStream seekableOutputStream; 24 | 25 | public CosNSeekableFSDataOutputStream(SeekableOutputStream seekableOutputStream, 26 | FileSystem.Statistics stats) throws IOException { 27 | super(seekableOutputStream, stats); 28 | this.seekableOutputStream = seekableOutputStream; 29 | } 30 | 31 | @Override 32 | public synchronized int ftruncate(long length) throws IOException { 33 | try { 34 | return this.seekableOutputStream.ftruncate(length); 35 | } catch (IOException ioException) { 36 | LOG.error("Failed to truncate the outputStream to length [{}].", length); 37 | return -1; 38 | } 39 | } 40 | 41 | @Override 42 | public synchronized void seek(long pos) throws IOException { 43 | this.seekableOutputStream.seek(pos); 44 | } 45 | 46 | @Override 47 | public synchronized boolean seekToNewSource(long pos) throws IOException { 48 | return this.seekableOutputStream.seekToNewSource(pos); 49 | } 50 | 51 | @Override 52 | public synchronized void doAbort() { 53 | this.seekableOutputStream.doAbort(); 54 | } 55 | 56 | @Override 57 | public synchronized long getPos() { 58 | return this.seekableOutputStream.getPos(); 59 | } 60 | 61 | public static class SeekableOutputStream extends OutputStream implements PosixSeekable, Abortable { 62 | private final NativeFileSystemStore nativeStore; 63 | private final String cosKey; 64 | private final MultipartManager multipartManager; 65 | private long pos; 66 | private boolean dirty; 67 | private boolean committed; 68 | private boolean closed; 69 | 70 | public SeekableOutputStream(Configuration conf, NativeFileSystemStore nativeStore, 71 | String cosKey, ExecutorService executorService, ExecutorService copyExecutor) 72 | throws IOException { 73 | Preconditions.checkNotNull(conf, "hadoop configuration"); 74 | this.nativeStore = Preconditions.checkNotNull(nativeStore, "nativeStore"); 75 | this.cosKey = Preconditions.checkNotNull(cosKey, "cosKey"); 76 | 77 | // 设置 partSize,取出配置的 partSize 和最大最小限制来选出最合适的大小 78 | long partSize = conf.getLong( 79 | CosNConfigKeys.COSN_UPLOAD_PART_SIZE_KEY, CosNConfigKeys.DEFAULT_UPLOAD_PART_SIZE); 80 | if (partSize < Constants.MIN_PART_SIZE) { 81 | LOG.warn("The minimum size of a single block is limited to " + 82 | "greater than or equal to {}.", Constants.MIN_PART_SIZE); 83 | } else if (partSize > Constants.MAX_PART_SIZE) { 84 | LOG.warn("The maximum size of a single block is limited to " + 85 | "smaller than or equal to {}.", Constants.MAX_PART_SIZE); 86 | partSize = Constants.MAX_PART_SIZE; 87 | } 88 | this.multipartManager = new MultipartManager( 89 | this.nativeStore, this.cosKey, partSize, executorService, copyExecutor); 90 | this.multipartManager.resumeForWrite(); 91 | // 把 pos 置于末尾 92 | this.pos = this.multipartManager.getCurrentSize(); 93 | this.dirty = false; 94 | this.committed = false; 95 | this.closed = false; 96 | } 97 | 98 | @Override 99 | public synchronized void write(int b) throws IOException { 100 | this.checkOpened(); 101 | byte[] singleBytes = new byte[1]; 102 | singleBytes[0] = (byte) b; 103 | this.write(singleBytes); 104 | } 105 | 106 | @Override 107 | public synchronized void write(byte[] b, int off, int len) throws IOException { 108 | this.checkOpened(); 109 | 110 | if (this.committed) { 111 | this.multipartManager.resumeForWrite(); 112 | this.committed = false; 113 | } 114 | 115 | // 根据当前的 pos 计算出要写的块号和块内偏移 116 | while (len > 0) { 117 | int partIndex = (int) (this.pos / this.multipartManager.getPartSize()); 118 | int partOffset = (int) (this.pos % this.multipartManager.getPartSize()); 119 | 120 | MultipartManager.LocalPart part = this.multipartManager.getPart(partIndex + 1); 121 | part.getBuffer().flipWrite(); 122 | part.getBuffer().position(partOffset); 123 | int writeBytes = Math.min(part.getBuffer().remaining(), len); 124 | part.getBuffer().put(b, off, writeBytes); 125 | part.setDirty(true); 126 | len -= writeBytes; 127 | off += writeBytes; 128 | this.pos += writeBytes; 129 | this.dirty = true; 130 | } 131 | } 132 | 133 | @Override 134 | public synchronized int ftruncate(long newLen) throws IOException { 135 | this.checkOpened(); 136 | Preconditions.checkArgument(newLen >= 0 && newLen < this.multipartManager.getMaxFileSizeLimit(), 137 | String.format("The new length must be a non-negative integer and less than the max file limit [%d].", 138 | this.multipartManager.getMaxFileSizeLimit())); 139 | LOG.debug("Call the ftruncate({}) on the cos key [{}].", newLen, this.cosKey); 140 | // 先将文件刷新提交上去 141 | this.flush(); 142 | // 然后再变换到需要的长度 143 | this.multipartManager.splitParts(newLen); 144 | this.dirty = true; 145 | this.committed = false; 146 | return 0; 147 | } 148 | 149 | @Override 150 | public synchronized void seek(long pos) throws IOException { 151 | this.checkOpened(); 152 | Preconditions.checkArgument(pos >= 0, 153 | "The new position must be a non-negative integer."); 154 | Preconditions.checkArgument(pos < this.multipartManager.getMaxFileSizeLimit(), 155 | String.format("The seek position [%d] exceeds the maximum file limit [%d].", 156 | pos, this.multipartManager.getMaxFileSizeLimit())); 157 | LOG.debug("Call the output seek({}) on the cos key [{}].", pos, this.cosKey); 158 | // seek 是允许超过当前文件长度的 159 | this.pos = pos; 160 | } 161 | 162 | @Override 163 | public synchronized long getPos() { 164 | return this.pos; 165 | } 166 | 167 | @Override 168 | public synchronized boolean seekToNewSource(long l) throws IOException { 169 | this.checkOpened(); 170 | return false; 171 | } 172 | 173 | @Override 174 | public synchronized void doAbort() { 175 | if (this.closed) { 176 | // 已经关闭了,无需额外 doAbort 177 | return; 178 | } 179 | 180 | LOG.info("Aborting the output stream [{}].", this); 181 | try { 182 | if (null != this.multipartManager) { 183 | this.multipartManager.abort(); 184 | } 185 | } finally { 186 | this.closed = true; 187 | } 188 | } 189 | 190 | @Override 191 | public synchronized void flush() throws IOException { 192 | this.checkOpened(); 193 | if (!this.dirty) { 194 | // 已经刷新过了,不必刷新了 195 | return; 196 | } 197 | this.commit(); 198 | this.dirty = false; 199 | } 200 | 201 | @Override 202 | public synchronized void close() throws IOException { 203 | if (this.closed) { 204 | return; 205 | } 206 | 207 | LOG.info("Closing the outputStream [{}].", this); 208 | try { 209 | this.flush(); 210 | this.multipartManager.close(); 211 | } finally { 212 | this.closed = true; 213 | } 214 | } 215 | 216 | private void commit() throws IOException { 217 | if (this.committed) { 218 | // 已经提交过了 219 | return; 220 | } 221 | // 把当前维持的块都提交可见 222 | this.multipartManager.commitLocalToRemote(); 223 | this.committed = true; 224 | } 225 | 226 | private void checkOpened() throws IOException { 227 | if (this.closed) { 228 | throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); 229 | } 230 | } 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNSymlinkMetadata.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | public class CosNSymlinkMetadata extends FileMetadata { 4 | private String target; 5 | 6 | public CosNSymlinkMetadata(String key, long length, long lastModified, boolean isFile, String eTag, 7 | String crc64ecm, String crc32cm, 8 | String versionId, String storageClass, 9 | String target) { 10 | super(key, length, lastModified, isFile, eTag, crc64ecm, crc32cm, versionId, storageClass); 11 | this.target = target; 12 | } 13 | 14 | public String getTarget() { 15 | return target; 16 | } 17 | 18 | public void setTarget(String target) { 19 | this.target = target; 20 | } 21 | 22 | @Override 23 | public boolean isFile() { 24 | return false; 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/CosNXAttr.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import java.io.Serializable; 4 | 5 | public class CosNXAttr implements Serializable { 6 | private String name; 7 | private String value; 8 | 9 | public String getName() { 10 | return name; 11 | } 12 | 13 | public void setName(String name) { 14 | this.name = name; 15 | } 16 | 17 | public String getValue() { 18 | return value; 19 | } 20 | 21 | public void setValue(String value) { 22 | this.value = value; 23 | } 24 | } -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/FileMetadata.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.classification.InterfaceAudience; 4 | import org.apache.hadoop.classification.InterfaceStability; 5 | 6 | import java.util.Map; 7 | 8 | /** 9 | *

10 | * Holds basic metadata for a file stored in a {@link NativeFileSystemStore}. 11 | *

12 | */ 13 | @InterfaceAudience.Private 14 | @InterfaceStability.Unstable 15 | public class FileMetadata { 16 | private final String key; 17 | private long length; 18 | private final long lastModified; 19 | private final boolean isFile; 20 | private final String ETag; 21 | private final String crc64ecm; 22 | private final String crc32cm; 23 | private final String versionId; 24 | private final String storageClass; 25 | private final Map userAttributes; 26 | 27 | static FileMetadata fromCosNFileStatus(CosNFileStatus fileStatus) { 28 | if (null == fileStatus) { 29 | return null; 30 | } 31 | String key = CosNUtils.pathToKey(fileStatus.getPath()); 32 | return new FileMetadata(key, fileStatus.getLen(), fileStatus.getModificationTime(), 33 | fileStatus.isFile(), fileStatus.getETag(), fileStatus.getCrc64ecma(), fileStatus.getCrc32cm(), 34 | fileStatus.getVersionId(), fileStatus.getStorageClass(), fileStatus.getUserAttributes()); 35 | } 36 | 37 | public FileMetadata(String key, long length, long lastModified) { 38 | this(key, length, lastModified, true); 39 | } 40 | 41 | public FileMetadata(String key, long length, long lastModified, 42 | boolean isFile) { 43 | this(key, length, lastModified, isFile, null); 44 | } 45 | 46 | public FileMetadata(String key, long length, long lastModified, boolean isFile, String ETag) { 47 | this(key, length, lastModified, isFile, ETag, null, null, null); 48 | } 49 | 50 | public FileMetadata(String key, long length, long lastModified, boolean isFile, String eTag, String crc64ecm, 51 | String crc32cm, String versionId) { 52 | this(key, length, lastModified, isFile, eTag, crc64ecm, crc32cm, versionId, null, null); 53 | } 54 | 55 | public FileMetadata(String key, long length, long lastModified, boolean isFile, String eTag, String crc64ecm, 56 | String crc32cm, String versionId, String storageClass) { 57 | this(key, length, lastModified, isFile, eTag, crc64ecm, crc32cm, versionId, storageClass, null); 58 | } 59 | 60 | public FileMetadata(String key, long length, long lastModified, boolean isFile, String eTag, String crc64ecm, 61 | String crc32cm, String versionId, String storageClass, Map userAttributes) { 62 | this.key = key; 63 | this.length = length; 64 | this.lastModified = lastModified; 65 | this.isFile = isFile; 66 | this.ETag = eTag; 67 | this.crc64ecm = crc64ecm; 68 | this.crc32cm = crc32cm; 69 | this.versionId = versionId; 70 | this.storageClass = storageClass; 71 | this.userAttributes = userAttributes; 72 | } 73 | 74 | public String getKey() { 75 | return key; 76 | } 77 | 78 | public long getLength() { 79 | return length; 80 | } 81 | 82 | public void setLength(long length) { 83 | this.length = length; 84 | } 85 | 86 | public long getLastModified() { 87 | return lastModified; 88 | } 89 | 90 | public String getETag() { 91 | return ETag; 92 | } 93 | 94 | public String getVersionId() { 95 | return versionId; 96 | } 97 | 98 | public String getCrc64ecm() { 99 | return crc64ecm; 100 | } 101 | 102 | public String getCrc32cm() { 103 | return crc32cm; 104 | } 105 | 106 | public String getStorageClass() { 107 | return storageClass; 108 | } 109 | 110 | public Map getUserAttributes() { 111 | return userAttributes; 112 | } 113 | 114 | @Override 115 | public String toString() { 116 | return "FileMetadata{" + 117 | "key='" + key + '\'' + 118 | ", length=" + length + 119 | ", lastModified=" + lastModified + 120 | ", isFile=" + isFile + 121 | ", ETag='" + ETag + '\'' + 122 | ", crc64ecm='" + crc64ecm + '\'' + 123 | ", crc32cm='" + crc32cm + '\'' + 124 | ", versionId='" + versionId + '\'' + 125 | ", storageClass='" + storageClass + '\'' + 126 | ", userAttributes=" + userAttributes + 127 | '}'; 128 | } 129 | 130 | public boolean isFile() { 131 | return isFile; 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/NativeFileSystemStore.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import com.qcloud.cos.model.CompleteMultipartUploadResult; 4 | import com.qcloud.cos.model.HeadBucketResult; 5 | import com.qcloud.cos.model.PartETag; 6 | import com.qcloud.cos.model.PutObjectResult; 7 | import org.apache.hadoop.classification.InterfaceAudience; 8 | import org.apache.hadoop.classification.InterfaceStability; 9 | import org.apache.hadoop.conf.Configuration; 10 | import org.apache.hadoop.fs.cosn.CosNPartListing; 11 | 12 | import java.io.File; 13 | import java.io.IOException; 14 | import java.io.InputStream; 15 | import java.net.URI; 16 | import java.util.List; 17 | 18 | /** 19 | *

20 | * An abstraction for a key-based {@link File} store. 21 | *

22 | */ 23 | @InterfaceAudience.Private 24 | @InterfaceStability.Stable 25 | public interface NativeFileSystemStore { 26 | 27 | void initialize(URI uri, Configuration conf) throws IOException; 28 | 29 | void initialize(URI uri, Configuration conf, RangerCredentialsClient rangerClient) throws IOException; 30 | 31 | HeadBucketResult headBucket(String bucketName) throws IOException; 32 | 33 | PutObjectResult storeFile(String key, File file, byte[] md5Hash) throws IOException; 34 | 35 | PutObjectResult storeFile(String key, InputStream inputStream, byte[] md5Hash, 36 | long contentLength) throws IOException; 37 | 38 | void storeEmptyFile(String key) throws IOException; 39 | 40 | // must notice some mpu chunk error might have double head check. 41 | // which means sometimes CompleteMultipartUploadResult might be null. 42 | CompleteMultipartUploadResult completeMultipartUpload(String key, 43 | String uploadId, 44 | List partETagList) throws IOException; 45 | 46 | void abortMultipartUpload(String key, String uploadId) throws IOException; 47 | 48 | String getUploadId(String key) throws IOException; 49 | 50 | PartETag uploadPart(File file, String key, String uploadId, 51 | int partNum, byte[] md5hash, Boolean isLastPart) throws IOException; 52 | 53 | PartETag uploadPart(InputStream inputStream, String key, String uploadId, 54 | int partNum, long partSize, byte[] md5hash, Boolean isLastPart) throws IOException; 55 | 56 | PartETag uploadPart(File file, String key, String uploadId, 57 | int partNum, byte[] md5hash) throws IOException; 58 | 59 | PartETag uploadPart(InputStream inputStream, String key, String uploadId, 60 | int partNum, long partSize, byte[] md5hash) throws IOException; 61 | 62 | PartETag uploadPartCopy(String uploadId, String srcKey, String destKey, int partNum, 63 | long firstByte, long lastByte) throws IOException; 64 | 65 | FileMetadata retrieveMetadata(String key) throws IOException; 66 | 67 | FileMetadata retrieveMetadata(String key, CosNResultInfo info) throws IOException; 68 | 69 | FileMetadata queryObjectMetadata(String key) throws IOException; 70 | 71 | FileMetadata queryObjectMetadata(String key, CosNResultInfo info) throws IOException; 72 | 73 | CosNSymlinkMetadata retrieveSymlinkMetadata(String symlink) throws IOException; 74 | 75 | CosNSymlinkMetadata retrieveSymlinkMetadata(String symlink, CosNResultInfo info) throws IOException; 76 | 77 | byte[] retrieveAttribute(String key, String attribute) throws IOException; 78 | 79 | void storeDirAttribute(String key, String attribute, byte[] value) throws IOException; 80 | 81 | void storeFileAttribute(String key, String attribute, byte[] value) throws IOException; 82 | 83 | void removeDirAttribute(String key, String attribute) throws IOException; 84 | 85 | void removeFileAttribute(String key, String attribute) throws IOException; 86 | 87 | InputStream retrieve(String key) throws IOException; 88 | 89 | InputStream retrieve(String key, FileMetadata fileMetadata) throws IOException; 90 | 91 | @Deprecated 92 | InputStream retrieve(String key, long byteRangeStart) throws IOException; 93 | 94 | InputStream retrieveBlock(String key, long byteRangeStart, 95 | long byteRangeEnd) throws IOException; 96 | 97 | InputStream retrieveBlock(String key, FileMetadata fileMetadata, long byteRangeStart, long byteRangeEnd) throws IOException; 98 | 99 | @Deprecated 100 | boolean retrieveBlock(String key, long byteRangeStart, long blockSize, 101 | String localBlockPath) throws IOException; 102 | 103 | long getFileLength(String key) throws IOException; 104 | 105 | void ModifyDataSize(String key, long fileSize) throws IOException; 106 | 107 | CosNPartialListing list(String prefix, int maxListingLength) throws IOException; 108 | 109 | CosNPartialListing list(String prefix, int maxListingLength, CosNResultInfo info) throws IOException; 110 | 111 | CosNPartialListing list(String prefix, int maxListingLength, 112 | String priorLastKey, boolean recursive) 113 | throws IOException; 114 | 115 | CosNPartialListing list(String prefix, int maxListingLength, 116 | String priorLastKey, boolean recursive, CosNResultInfo info) 117 | throws IOException; 118 | 119 | void delete(String key) throws IOException; 120 | 121 | void deleteRecursive(String key) throws IOException; 122 | 123 | void copy(String srcKey, String dstKey) throws IOException; 124 | 125 | void copy(String srcKey, FileMetadata srcFileMetadata, String dstKey) throws IOException; 126 | 127 | void rename(String srcKey, String dstKey) throws IOException; 128 | 129 | void createSymlink(String symLink, String targetKey) throws IOException; 130 | 131 | String getSymlink(String symlink) throws IOException; 132 | 133 | CosNPartListing listParts(String key, String uploadId) throws IOException; 134 | 135 | /** 136 | * Delete all keys with the given prefix. Used for testing. 137 | * 138 | * @throws IOException 139 | */ 140 | void purge(String prefix) throws IOException; 141 | 142 | /** 143 | * Diagnostic method to dump state to the console. 144 | * 145 | * @throws IOException 146 | */ 147 | void dump() throws IOException; 148 | 149 | /** 150 | * Used for outer to decide inner process. 151 | * if you use the CosNFileSystem gateway mode, 152 | * must set native store to posix process 153 | */ 154 | void setPosixBucket(boolean isPosixBucket); 155 | 156 | boolean isPosixBucket(); 157 | 158 | RangerCredentialsClient getRangerCredentialsClient(); 159 | 160 | void close(); 161 | } 162 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/AbstractCOSCredentialProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.COSCredentialsProvider; 4 | import org.apache.hadoop.conf.Configuration; 5 | 6 | import javax.annotation.Nullable; 7 | import java.net.URI; 8 | 9 | /** 10 | * The base class for COS credential providers which take a URI or 11 | * configuration in their constructor. 12 | */ 13 | public abstract class AbstractCOSCredentialProvider 14 | implements COSCredentialsProvider { 15 | private final URI uri; 16 | private final Configuration conf; 17 | 18 | public AbstractCOSCredentialProvider(@Nullable URI uri, 19 | Configuration conf) { 20 | this.uri = uri; 21 | this.conf = conf; 22 | } 23 | 24 | public URI getUri() { 25 | return uri; 26 | } 27 | 28 | public Configuration getConf() { 29 | return conf; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/COSCredentialProviderList.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.google.common.base.Preconditions; 4 | import com.qcloud.cos.auth.AnonymousCOSCredentials; 5 | import com.qcloud.cos.auth.COSCredentials; 6 | import com.qcloud.cos.auth.COSCredentialsProvider; 7 | import com.qcloud.cos.utils.StringUtils; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.io.Closeable; 12 | import java.util.ArrayList; 13 | import java.util.Collection; 14 | import java.util.List; 15 | import java.util.concurrent.atomic.AtomicBoolean; 16 | import java.util.concurrent.atomic.AtomicInteger; 17 | 18 | /** 19 | * a list of cos credentials provider. 20 | */ 21 | public class COSCredentialProviderList implements 22 | COSCredentialsProvider, AutoCloseable { 23 | private static final Logger LOG = 24 | LoggerFactory.getLogger(COSCredentialProviderList.class); 25 | 26 | private static final String NO_COS_CREDENTIAL_PROVIDERS = 27 | "No COS Credential Providers"; 28 | private static final String CREDENTIALS_REQUESTED_WHEN_CLOSED = 29 | "Credentials requested after provider list was closed"; 30 | 31 | private final List providers = 32 | new ArrayList(1); 33 | private boolean reuseLastProvider = true; 34 | private COSCredentialsProvider lastProvider; 35 | 36 | private final AtomicInteger refCount = new AtomicInteger(1); 37 | private final AtomicBoolean isClosed = new AtomicBoolean(false); 38 | 39 | public COSCredentialProviderList() { 40 | } 41 | 42 | public COSCredentialProviderList( 43 | Collection providers) { 44 | this.providers.addAll(providers); 45 | } 46 | 47 | public void add(COSCredentialsProvider provider) { 48 | this.providers.add(provider); 49 | } 50 | 51 | public int getRefCount() { 52 | return this.refCount.get(); 53 | } 54 | 55 | public void checkNotEmpty() { 56 | if (this.providers.isEmpty()) { 57 | throw new NoAuthWithCOSException(NO_COS_CREDENTIAL_PROVIDERS); 58 | } 59 | } 60 | 61 | public COSCredentialProviderList share() { 62 | Preconditions.checkState(!this.closed(), "Provider list is closed"); 63 | this.refCount.incrementAndGet(); 64 | return this; 65 | } 66 | 67 | public boolean closed() { 68 | return this.isClosed.get(); 69 | } 70 | 71 | @Override 72 | public COSCredentials getCredentials() { 73 | if (this.closed()) { 74 | throw new NoAuthWithCOSException(CREDENTIALS_REQUESTED_WHEN_CLOSED); 75 | } 76 | 77 | this.checkNotEmpty(); 78 | 79 | if (this.reuseLastProvider && this.lastProvider != null) { 80 | return this.lastProvider.getCredentials(); 81 | } 82 | 83 | for (COSCredentialsProvider provider : this.providers) { 84 | COSCredentials credentials = provider.getCredentials(); 85 | if (null != credentials 86 | && !StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId()) 87 | && !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey()) 88 | || credentials instanceof AnonymousCOSCredentials) { 89 | this.lastProvider = provider; 90 | return credentials; 91 | } 92 | } 93 | 94 | throw new NoAuthWithCOSException( 95 | "No COS Credentials provided by " + this.providers.toString()); 96 | } 97 | 98 | @Override 99 | public void refresh() { 100 | if (this.closed()) { 101 | return; 102 | } 103 | 104 | for (COSCredentialsProvider cosCredentialsProvider : this.providers) { 105 | cosCredentialsProvider.refresh(); 106 | } 107 | } 108 | 109 | @Override 110 | public void close() throws Exception { 111 | if (this.closed()) { 112 | return; 113 | } 114 | 115 | int remainder = this.refCount.decrementAndGet(); 116 | if (remainder != 0) { 117 | return; 118 | } 119 | this.isClosed.set(true); 120 | 121 | for (COSCredentialsProvider provider : this.providers) { 122 | if (provider instanceof Closeable) { 123 | ((Closeable) provider).close(); 124 | } 125 | } 126 | } 127 | 128 | } 129 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/CPMInstanceCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.*; 4 | import com.qcloud.cos.exception.CosClientException; 5 | import org.apache.hadoop.conf.Configuration; 6 | import org.apache.hadoop.fs.CosNConfigKeys; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | import javax.annotation.Nullable; 11 | import java.net.URI; 12 | 13 | /** 14 | * Provide the credentials when the CosN connector is instantiated on Tencent Cloud Physical Machine (CPM) 15 | */ 16 | public class CPMInstanceCredentialsProvider extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 17 | private static final Logger LOG = LoggerFactory.getLogger(CPMInstanceCredentialsProvider.class); 18 | 19 | private String appId; 20 | private final COSCredentialsProvider cosCredentialsProvider; 21 | 22 | public CPMInstanceCredentialsProvider(@Nullable URI uri, 23 | Configuration conf) { 24 | super(uri, conf); 25 | if (null != conf) { 26 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 27 | } 28 | InstanceMetadataCredentialsEndpointProvider endpointProvider = 29 | new InstanceMetadataCredentialsEndpointProvider( 30 | InstanceMetadataCredentialsEndpointProvider.Instance.CPM); 31 | InstanceCredentialsFetcher instanceCredentialsFetcher = new InstanceCredentialsFetcher(endpointProvider); 32 | this.cosCredentialsProvider = new InstanceCredentialsProvider(instanceCredentialsFetcher); 33 | } 34 | 35 | @Override 36 | public COSCredentials getCredentials() { 37 | try { 38 | COSCredentials cosCredentials = this.cosCredentialsProvider.getCredentials(); 39 | // Compatible appId 40 | if (null != this.appId) { 41 | if (cosCredentials instanceof InstanceProfileCredentials) { 42 | return new InstanceProfileCredentials(this.appId, cosCredentials.getCOSAccessKeyId(), 43 | cosCredentials.getCOSSecretKey(), 44 | ((InstanceProfileCredentials) cosCredentials).getSessionToken(), 45 | ((InstanceProfileCredentials) cosCredentials).getExpiredTime()); 46 | } 47 | } 48 | return cosCredentials; 49 | } catch (CosClientException e) { 50 | LOG.error("Failed to obtain the credentials from CAMCPMInstanceCredentialsProvider.", e); 51 | } 52 | 53 | return null; 54 | } 55 | 56 | @Override 57 | public void refresh() { 58 | this.cosCredentialsProvider.refresh(); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/CVMInstanceCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.*; 4 | import com.qcloud.cos.exception.CosClientException; 5 | import org.apache.hadoop.conf.Configuration; 6 | import org.apache.hadoop.fs.CosNConfigKeys; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | import javax.annotation.Nullable; 11 | import java.net.URI; 12 | 13 | /** 14 | * Provide the credentials when the CosN connector is instantiated on Tencent Cloud Virtual Machine(CVM) 15 | */ 16 | public class CVMInstanceCredentialsProvider extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 17 | private static final Logger LOG = LoggerFactory.getLogger(CVMInstanceCredentialsProvider.class); 18 | 19 | private String appId; 20 | private final COSCredentialsProvider cosCredentialsProvider; 21 | 22 | public CVMInstanceCredentialsProvider(@Nullable URI uri, Configuration conf) { 23 | super(uri, conf); 24 | if (null != conf) { 25 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 26 | } 27 | InstanceMetadataCredentialsEndpointProvider endpointProvider = 28 | new InstanceMetadataCredentialsEndpointProvider( 29 | InstanceMetadataCredentialsEndpointProvider.Instance.CVM); 30 | InstanceCredentialsFetcher instanceCredentialsFetcher = new InstanceCredentialsFetcher(endpointProvider); 31 | this.cosCredentialsProvider = new InstanceCredentialsProvider(instanceCredentialsFetcher); 32 | } 33 | 34 | @Override 35 | public COSCredentials getCredentials() { 36 | try { 37 | COSCredentials cosCredentials = this.cosCredentialsProvider.getCredentials(); 38 | // Compatible appId 39 | if (null != this.appId) { 40 | if (cosCredentials instanceof InstanceProfileCredentials) { 41 | return new InstanceProfileCredentials(this.appId, cosCredentials.getCOSAccessKeyId(), 42 | cosCredentials.getCOSSecretKey(), 43 | ((InstanceProfileCredentials) cosCredentials).getSessionToken(), 44 | ((InstanceProfileCredentials) cosCredentials).getExpiredTime()); 45 | } 46 | } 47 | return cosCredentials; 48 | } catch (CosClientException e) { 49 | LOG.error("Failed to obtain the credentials from CVMInstanceCredentialsProvider.", e); 50 | } 51 | 52 | return null; 53 | } 54 | 55 | @Override 56 | public void refresh() { 57 | this.cosCredentialsProvider.refresh(); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/CustomDefinedCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.COSCredentials; 4 | import com.qcloud.cos.auth.COSCredentialsProvider; 5 | import com.qcloud.cos.auth.InstanceCredentialsFetcher; 6 | import com.qcloud.cos.auth.InstanceCredentialsProvider; 7 | import com.qcloud.cos.auth.InstanceMetadataCredentialsEndpointProvider; 8 | import com.qcloud.cos.auth.InstanceProfileCredentials; 9 | import com.qcloud.cos.exception.CosClientException; 10 | import org.apache.hadoop.conf.Configuration; 11 | import org.apache.hadoop.fs.CosNConfigKeys; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.net.URI; 16 | import java.net.URISyntaxException; 17 | 18 | import javax.annotation.Nullable; 19 | 20 | /** 21 | * Fetch credential from a specific url. 22 | *

23 | * url response should be like: 24 | * { 25 | * "TmpSecretId": "AKIDxxxxxxxxxxxxxxxxxxxx", 26 | * "TmpSecretKey": "xxxxxxxxxxexxxxxxxxxgA=", 27 | * "ExpiredTime": 1615590047, 28 | * "Expiration": "2021-03-12T23:00:47Z", 29 | * "Token": "xxxxxxxxxxx", 30 | * "Code": "Success" 31 | * } 32 | */ 33 | public class CustomDefinedCredentialsProvider extends AbstractCOSCredentialProvider 34 | implements COSCredentialsProvider { 35 | 36 | private static final Logger LOG = LoggerFactory.getLogger(CVMInstanceCredentialsProvider.class); 37 | 38 | private final String appId; 39 | private final COSCredentialsProvider cosCredentialsProvider; 40 | 41 | public CustomDefinedCredentialsProvider(@Nullable URI uri, Configuration conf) { 42 | super(uri, conf); 43 | if (conf == null) { 44 | throw new IllegalArgumentException("Configuration is null. Please check the core-site.xml."); 45 | } 46 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 47 | final String providerUrl = conf.get(CosNConfigKeys.COS_CUSTOM_CREDENTIAL_PROVIDER_URL); 48 | if (providerUrl == null) { 49 | throw new IllegalArgumentException( 50 | "fs.cosn.remote-credential-provider.url should not be null."); 51 | } 52 | InstanceMetadataCredentialsEndpointProvider endpointProvider = 53 | new InstanceMetadataCredentialsEndpointProvider(null) { 54 | @Override 55 | public URI getCredentialsEndpoint() throws URISyntaxException { 56 | return new URI(providerUrl); 57 | } 58 | }; 59 | InstanceCredentialsFetcher instanceCredentialsFetcher = 60 | new InstanceCredentialsFetcher(endpointProvider); 61 | this.cosCredentialsProvider = new InstanceCredentialsProvider(instanceCredentialsFetcher); 62 | // try to fetch credentials and parse. 63 | getCredentials(); 64 | } 65 | 66 | @Override 67 | public COSCredentials getCredentials() { 68 | try { 69 | COSCredentials cosCredentials = this.cosCredentialsProvider.getCredentials(); 70 | // Compatible appId 71 | if (null != this.appId) { 72 | if (cosCredentials instanceof InstanceProfileCredentials) { 73 | return new InstanceProfileCredentials(this.appId, cosCredentials.getCOSAccessKeyId(), 74 | cosCredentials.getCOSSecretKey(), 75 | ((InstanceProfileCredentials) cosCredentials).getSessionToken(), 76 | ((InstanceProfileCredentials) cosCredentials).getExpiredTime()); 77 | } 78 | } 79 | return cosCredentials; 80 | } catch (CosClientException e) { 81 | LOG.error("Failed to obtain the credentials from CustomDefinedCredentialsProvider.", e); 82 | } catch (Exception e) { 83 | LOG.error("getCredentials failed", e); 84 | } 85 | 86 | return null; 87 | } 88 | 89 | @Override 90 | public void refresh() { 91 | this.cosCredentialsProvider.refresh(); 92 | } 93 | 94 | } 95 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/DLFInstanceCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.google.common.collect.ImmutableMap; 4 | import com.qcloud.cos.auth.COSCredentials; 5 | import com.qcloud.cos.auth.COSCredentialsProvider; 6 | import com.qcloud.cos.auth.HttpCredentialsEndpointProvider; 7 | import com.qcloud.cos.auth.InstanceCredentialsFetcher; 8 | import com.qcloud.cos.auth.InstanceCredentialsProvider; 9 | import com.qcloud.cos.exception.CosClientException; 10 | import org.apache.hadoop.conf.Configuration; 11 | import org.apache.hadoop.fs.CosNConfigKeys; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import javax.annotation.Nullable; 16 | import java.net.URI; 17 | import java.util.Map; 18 | 19 | public class DLFInstanceCredentialsProvider extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 20 | private static final Logger LOG = LoggerFactory.getLogger(DLFInstanceCredentialsProvider.class); 21 | private final COSCredentialsProvider cosCredentialsProvider; 22 | private static final String UIN = "Uin"; 23 | private static final String REQUEST_ID = "RequestId"; 24 | private static final String TYPE = "Type"; 25 | 26 | private String url; 27 | private String path; 28 | private String uin; 29 | private String requestId; 30 | 31 | public DLFInstanceCredentialsProvider (@Nullable URI uri, Configuration conf) { 32 | super(uri, conf); 33 | if (null != conf) { 34 | this.url = conf.get(CosNConfigKeys.COS_REMOTE_CREDENTIAL_PROVIDER_URL); 35 | this.path = conf.get(CosNConfigKeys.COS_REMOTE_CREDENTIAL_PROVIDER_PATH); 36 | this.uin = conf.get(CosNConfigKeys.COSN_UIN_KEY); 37 | this.requestId = conf.get(CosNConfigKeys.COSN_REQUEST_ID); 38 | 39 | } 40 | 41 | if (uin == null || requestId == null) { 42 | throw new IllegalArgumentException("uin and request id must be exist"); 43 | } 44 | 45 | Map header = ImmutableMap.of(UIN, uin, REQUEST_ID, requestId, TYPE, "DLF"); 46 | 47 | HttpCredentialsEndpointProvider endpointProvider = new HttpCredentialsEndpointProvider(url, path, header); 48 | InstanceCredentialsFetcher instanceCredentialsFetcher = new InstanceCredentialsFetcher(endpointProvider); 49 | this.cosCredentialsProvider = new InstanceCredentialsProvider(instanceCredentialsFetcher); 50 | } 51 | @Override 52 | public COSCredentials getCredentials() { 53 | try { 54 | return this.cosCredentialsProvider.getCredentials(); 55 | } catch (CosClientException e) { 56 | LOG.error("Failed to obtain the credentials from DLFInstanceCredentialsProvider.", e); 57 | } 58 | 59 | return null; 60 | } 61 | 62 | @Override 63 | public void refresh() { 64 | this.cosCredentialsProvider.refresh(); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/EMRInstanceCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.*; 4 | import com.qcloud.cos.exception.CosClientException; 5 | import org.apache.hadoop.conf.Configuration; 6 | import org.apache.hadoop.fs.CosNConfigKeys; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | import javax.annotation.Nullable; 11 | import java.net.URI; 12 | 13 | /** 14 | * Provide the credentials when the CosN connector is instantiated on Tencent Cloud Virtual Machine(CVM) 15 | */ 16 | public class EMRInstanceCredentialsProvider extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 17 | private static final Logger LOG = LoggerFactory.getLogger(EMRInstanceCredentialsProvider.class); 18 | 19 | private String appId; 20 | private final COSCredentialsProvider cosCredentialsProvider; 21 | private boolean emrV2InstanceEnabled; 22 | 23 | public EMRInstanceCredentialsProvider(@Nullable URI uri, Configuration conf) { 24 | super(uri, conf); 25 | if (null != conf) { 26 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 27 | this.emrV2InstanceEnabled = conf.getBoolean(CosNConfigKeys.COSN_EMRV2_INSTANCE_PROVIDER_ENABLED, 28 | CosNConfigKeys.DEFAULT_COSN_EMRV2_INSTANCE_PROVIDER_ENABLED); 29 | } 30 | 31 | InstanceMetadataCredentialsEndpointProvider endpointProvider; 32 | if (emrV2InstanceEnabled) { 33 | endpointProvider = new InstanceMetadataCredentialsEndpointProvider( 34 | InstanceMetadataCredentialsEndpointProvider.Instance.EMRV2); 35 | } else { 36 | endpointProvider = new InstanceMetadataCredentialsEndpointProvider( 37 | InstanceMetadataCredentialsEndpointProvider.Instance.EMR); 38 | } 39 | InstanceCredentialsFetcher instanceCredentialsFetcher = new InstanceCredentialsFetcher(endpointProvider); 40 | this.cosCredentialsProvider = new InstanceCredentialsProvider(instanceCredentialsFetcher); 41 | } 42 | 43 | @Override 44 | public COSCredentials getCredentials() { 45 | try { 46 | COSCredentials cosCredentials = this.cosCredentialsProvider.getCredentials(); 47 | // Compatible appId 48 | if (null != this.appId) { 49 | if (cosCredentials instanceof InstanceProfileCredentials) { 50 | return new InstanceProfileCredentials(this.appId, cosCredentials.getCOSAccessKeyId(), 51 | cosCredentials.getCOSSecretKey(), 52 | ((InstanceProfileCredentials) cosCredentials).getSessionToken(), 53 | ((InstanceProfileCredentials) cosCredentials).getExpiredTime()); 54 | } 55 | } 56 | return cosCredentials; 57 | } catch (CosClientException e) { 58 | LOG.error("Failed to obtain the credentials from EMRInstanceCredentialsProvider.", e); 59 | } catch (Exception e) { 60 | LOG.error("getCredentials failed", e); 61 | } 62 | 63 | return null; 64 | } 65 | 66 | @Override 67 | public void refresh() { 68 | this.cosCredentialsProvider.refresh(); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/EnvironmentVariableCredentialProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.BasicCOSCredentials; 4 | import com.qcloud.cos.auth.COSCredentials; 5 | import com.qcloud.cos.auth.COSCredentialsProvider; 6 | import com.qcloud.cos.utils.StringUtils; 7 | import org.apache.hadoop.conf.Configuration; 8 | import org.apache.hadoop.fs.cosn.Constants; 9 | import org.apache.hadoop.fs.CosNConfigKeys; 10 | 11 | import javax.annotation.Nullable; 12 | import java.net.URI; 13 | 14 | /** 15 | * The provider obtaining the cos credentials from the environment variables. 16 | */ 17 | public class EnvironmentVariableCredentialProvider 18 | extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 19 | private String appId; 20 | 21 | public EnvironmentVariableCredentialProvider(@Nullable URI uri, 22 | Configuration conf) { 23 | super(uri, conf); 24 | if (null != conf) { 25 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 26 | } 27 | } 28 | 29 | @Override 30 | public COSCredentials getCredentials() { 31 | String secretId = System.getenv(Constants.COSN_SECRET_ID_ENV); 32 | String secretKey = System.getenv(Constants.COSN_SECRET_KEY_ENV); 33 | 34 | secretId = StringUtils.trim(secretId); 35 | secretKey = StringUtils.trim(secretKey); 36 | 37 | if (!StringUtils.isNullOrEmpty(secretId) 38 | && !StringUtils.isNullOrEmpty(secretKey)) { 39 | if (null != this.appId) { 40 | return new BasicCOSCredentials(this.appId, secretId, secretKey); 41 | } else { 42 | return new BasicCOSCredentials(secretId, secretKey); 43 | } 44 | } 45 | 46 | return null; 47 | } 48 | 49 | @Override 50 | public void refresh() { 51 | } 52 | 53 | @Override 54 | public String toString() { 55 | return String.format("EnvironmentVariableCredentialProvider{%s, %s}", Constants.COSN_SECRET_ID_ENV, 56 | Constants.COSN_SECRET_KEY_ENV); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/NoAuthWithCOSException.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | *

10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | *

12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.hadoop.fs.auth; 20 | 21 | import com.qcloud.cos.exception.CosClientException; 22 | 23 | /** 24 | * Exception thrown when no credentials can be obtained. 25 | */ 26 | public class NoAuthWithCOSException extends CosClientException { 27 | public NoAuthWithCOSException(String message, Throwable t) { 28 | super(message, t); 29 | } 30 | 31 | public NoAuthWithCOSException(String message) { 32 | super(message); 33 | } 34 | 35 | public NoAuthWithCOSException(Throwable t) { 36 | super(t); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/OIDCRoleArnCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.BasicSessionCredentials; 4 | import com.qcloud.cos.auth.COSCredentials; 5 | import com.qcloud.cos.auth.COSCredentialsProvider; 6 | import com.tencentcloudapi.common.Credential; 7 | import com.tencentcloudapi.common.provider.OIDCRoleArnProvider; 8 | import org.apache.hadoop.conf.Configuration; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import javax.annotation.Nullable; 13 | import java.net.URI; 14 | import java.util.concurrent.atomic.AtomicReference; 15 | 16 | public class OIDCRoleArnCredentialsProvider extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 17 | private static final Logger log = LoggerFactory.getLogger(OIDCRoleArnCredentialsProvider.class); 18 | 19 | private OIDCRoleArnProvider provider; 20 | private boolean initialized = false; 21 | private AtomicReference lastCredentialsRef; 22 | 23 | public OIDCRoleArnCredentialsProvider(@Nullable URI uri, Configuration conf) { 24 | super(uri, conf); 25 | try { 26 | this.provider = new OIDCRoleArnProvider(); 27 | lastCredentialsRef = new AtomicReference<>(); 28 | initialized = true; 29 | } catch (Exception e) { 30 | log.error("Failed to initialize OIDC Role Arn Credentials Provider", e); 31 | } 32 | } 33 | 34 | @Override 35 | public COSCredentials getCredentials() { 36 | if (!initialized) { 37 | return null; 38 | } 39 | COSCredentials cosCredentials = null; 40 | try { 41 | Credential cred; 42 | // TODO: 这里下游有个bug,provider.getCredentials()调用两次,第二次返回的是null 43 | // 下游获取逻辑是先初始化空的credentials,然后调用update方法 44 | // update方法里会判断上一次获取credentials的时间与当前时间差是否大于阈值,只有大于阈值时才会更新 45 | // 因此这里我们缓存住了第一个credentials,通过refresh去更新这个credentials 46 | if (lastCredentialsRef.get() != null) { 47 | cred = lastCredentialsRef.get(); 48 | provider.update(cred); 49 | } else { 50 | cred = this.provider.getCredentials(); 51 | } 52 | lastCredentialsRef.set(cred); 53 | cosCredentials = new BasicSessionCredentials(cred.getSecretId(), cred.getSecretKey(), 54 | cred.getToken()); 55 | } catch (Exception e) { 56 | log.error("Failed to get credentials from OIDC Role Arn Credentials Provider", e); 57 | return null; 58 | } 59 | return cosCredentials; 60 | } 61 | 62 | @Override 63 | public void refresh() { 64 | if (!initialized) { 65 | return; 66 | } 67 | try { 68 | this.provider.update(lastCredentialsRef.get()); 69 | } catch (Exception e) { 70 | log.error("Failed to refresh OIDC Role Arn Credentials Provider", e); 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/RangerCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.BasicSessionCredentials; 4 | import com.qcloud.cos.auth.COSCredentials; 5 | import com.qcloud.cos.auth.COSCredentialsProvider; 6 | import org.apache.hadoop.conf.Configuration; 7 | import org.apache.hadoop.fs.CosNConfigKeys; 8 | import org.apache.hadoop.fs.CosNUtils; 9 | import org.apache.hadoop.fs.RangerCredentialsClient; 10 | import org.apache.hadoop.fs.cosn.ranger.security.sts.GetSTSResponse; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import javax.annotation.Nullable; 15 | import java.io.IOException; 16 | import java.net.URI; 17 | import java.util.concurrent.atomic.AtomicLong; 18 | import java.util.concurrent.atomic.AtomicReference; 19 | 20 | public class RangerCredentialsProvider extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 21 | private static final Logger log = LoggerFactory.getLogger(RangerCredentialsProvider.class); 22 | private RangerCredentialsFetcher rangerCredentialsFetcher; 23 | private RangerCredentialsClient rangerClient; 24 | private String bucketNameWithoutAppid; 25 | private String bucketRegion; 26 | private String appId; 27 | 28 | 29 | public RangerCredentialsProvider(@Nullable URI uri, Configuration conf, 30 | RangerCredentialsClient rangerClient) { 31 | super(uri, conf); 32 | if (null != conf) { 33 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 34 | this.bucketNameWithoutAppid = CosNUtils.getBucketNameWithAppid( 35 | uri.getHost(), conf.get(CosNConfigKeys.COSN_APPID_KEY)); 36 | this.bucketRegion = conf.get(CosNConfigKeys.COSN_REGION_KEY); 37 | // native store keep the ranger client not null. 38 | this.rangerClient = rangerClient; 39 | 40 | if (this.bucketRegion == null || this.bucketRegion.isEmpty()) { 41 | this.bucketRegion = conf.get(CosNConfigKeys.COSN_REGION_PREV_KEY); 42 | } 43 | 44 | rangerCredentialsFetcher = new RangerCredentialsFetcher( 45 | conf.getInt( 46 | CosNConfigKeys.COSN_RANGER_TEMP_TOKEN_REFRESH_INTERVAL, 47 | CosNConfigKeys.DEFAULT_COSN_RANGER_TEMP_TOKEN_REFRESH_INTERVAL)); 48 | } 49 | } 50 | 51 | class RangerCredentialsFetcher { 52 | private int refreshIntervalSeconds; 53 | private AtomicReference lastCredentialsRef; 54 | private AtomicLong lastGetCredentialsTimeStamp; 55 | 56 | RangerCredentialsFetcher(int refreshIntervalSeconds) { 57 | this.refreshIntervalSeconds = refreshIntervalSeconds; 58 | this.lastCredentialsRef = new AtomicReference<>(); 59 | this.lastGetCredentialsTimeStamp = new AtomicLong(); 60 | } 61 | 62 | COSCredentials getCredentials() { 63 | if (needSyncFetchNewCredentials()) { 64 | synchronized (this) { 65 | if (needSyncFetchNewCredentials()) { 66 | return fetchNewCredentials(); 67 | } 68 | } 69 | } 70 | return lastCredentialsRef.get(); 71 | } 72 | 73 | private boolean needSyncFetchNewCredentials() { 74 | if (lastCredentialsRef.get() == null) { 75 | return true; 76 | } 77 | long currentSec = System.currentTimeMillis() / 1000; 78 | return currentSec - lastGetCredentialsTimeStamp.get() > this.refreshIntervalSeconds; 79 | } 80 | 81 | private COSCredentials fetchNewCredentials() { 82 | try { 83 | if (rangerClient == null) { 84 | log.error("ranger provider's ranger client is null, impossible!"); 85 | } 86 | GetSTSResponse stsResp = rangerClient.getSTS(bucketRegion, bucketNameWithoutAppid); 87 | /** 88 | * some customers feel that kerberos authentication is heavy, so we have implemented a relatively 89 | * lightweight authentication method 90 | */ 91 | // if the custom authentication fails, there will be no temporary AK/SK 92 | if (stsResp == null) { 93 | log.error("ranger provider get fetch new credentials get sts resp null"); 94 | return null; 95 | } 96 | if (!stsResp.isCheckAuthPass()) { 97 | return null; 98 | } 99 | COSCredentials cosCredentials = null; 100 | if (appId != null) { 101 | cosCredentials = new BasicSessionCredentials(appId, stsResp.getTempAK(), stsResp.getTempSK(), 102 | stsResp.getTempToken()); 103 | } else { 104 | cosCredentials = new BasicSessionCredentials(stsResp.getTempAK(), stsResp.getTempSK(), 105 | stsResp.getTempToken()); 106 | } 107 | 108 | this.lastCredentialsRef.set(cosCredentials); 109 | this.lastGetCredentialsTimeStamp.set(System.currentTimeMillis() / 1000); 110 | return cosCredentials; 111 | } catch (IOException e) { 112 | log.error("fetch credentials failed", e); 113 | return null; 114 | } 115 | } 116 | } 117 | 118 | @Override 119 | public COSCredentials getCredentials() { 120 | return rangerCredentialsFetcher.getCredentials(); 121 | } 122 | 123 | @Override 124 | public void refresh() { 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/SessionCredentialProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.BasicCOSCredentials; 4 | import com.qcloud.cos.auth.COSCredentials; 5 | import com.qcloud.cos.auth.COSCredentialsProvider; 6 | import com.qcloud.cos.utils.StringUtils; 7 | import org.apache.hadoop.conf.Configuration; 8 | import org.apache.hadoop.fs.CosNConfigKeys; 9 | 10 | import javax.annotation.Nullable; 11 | import java.net.URI; 12 | 13 | /** 14 | * The provider getting the credential from the specified uri. 15 | */ 16 | public class SessionCredentialProvider 17 | extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 18 | 19 | private String appId; // compatible 20 | 21 | public SessionCredentialProvider(@Nullable URI uri, Configuration conf) { 22 | super(uri, conf); 23 | if (null != conf) { 24 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 25 | } 26 | } 27 | 28 | @Override 29 | public COSCredentials getCredentials() { 30 | if (null == super.getUri()) { 31 | return null; 32 | } 33 | 34 | String authority = super.getUri().getAuthority(); 35 | if (null == authority) { 36 | return null; 37 | } 38 | 39 | int authoritySplitIndex = authority.indexOf('@'); 40 | if (authoritySplitIndex < 0) { 41 | return null; 42 | } 43 | 44 | String credential = authority.substring(0, authoritySplitIndex); 45 | int credentialSplitIndex = credential.indexOf(':'); 46 | if (credentialSplitIndex < 0) { 47 | return null; 48 | } 49 | String secretId = credential.substring(0, credentialSplitIndex); 50 | String secretKey = credential.substring(credentialSplitIndex + 1); 51 | 52 | if (!StringUtils.isNullOrEmpty(secretId) 53 | && !StringUtils.isNullOrEmpty(secretKey)) { 54 | if (null != this.appId) { 55 | return new BasicCOSCredentials(this.appId, secretId, secretKey); 56 | } else { 57 | return new BasicCOSCredentials(secretId, secretKey); 58 | } 59 | } 60 | 61 | return null; 62 | } 63 | 64 | @Override 65 | public void refresh() { 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/SessionTokenCredentialProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.BasicSessionCredentials; 4 | import com.qcloud.cos.auth.COSCredentials; 5 | import com.qcloud.cos.auth.COSCredentialsProvider; 6 | import com.qcloud.cos.utils.StringUtils; 7 | import org.apache.hadoop.conf.Configuration; 8 | import org.apache.hadoop.fs.CosNConfigKeys; 9 | 10 | import javax.annotation.Nullable; 11 | import java.net.URI; 12 | 13 | public class SessionTokenCredentialProvider extends AbstractCOSCredentialProvider 14 | implements COSCredentialsProvider { 15 | private String appId; 16 | private String secretId; 17 | private String secretKey; 18 | private String sessionToken; 19 | 20 | public SessionTokenCredentialProvider(@Nullable URI uri, Configuration conf) { 21 | super(uri, conf); 22 | if (null != conf) { 23 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 24 | this.secretId = conf.get( 25 | CosNConfigKeys.COSN_USERINFO_SECRET_ID_KEY); 26 | this.secretKey = conf.get( 27 | CosNConfigKeys.COSN_USERINFO_SECRET_KEY_KEY); 28 | this.sessionToken = conf.get( 29 | CosNConfigKeys.COSN_USERINFO_SESSION_TOKEN); 30 | } 31 | } 32 | 33 | @Override 34 | public COSCredentials getCredentials() { 35 | if (!StringUtils.isNullOrEmpty(this.secretId) 36 | && !StringUtils.isNullOrEmpty(this.secretKey)) { 37 | if (null != this.appId) { 38 | return new BasicSessionCredentials(this.appId, this.secretId, this.secretKey,this.sessionToken); 39 | } else { 40 | return new BasicSessionCredentials(this.secretId, this.secretKey, this.sessionToken); 41 | } 42 | } 43 | return null; 44 | } 45 | 46 | @Override 47 | public void refresh() { 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/SimpleCredentialProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | 3 | import com.qcloud.cos.auth.BasicCOSCredentials; 4 | import com.qcloud.cos.auth.COSCredentials; 5 | import com.qcloud.cos.auth.COSCredentialsProvider; 6 | import com.qcloud.cos.utils.StringUtils; 7 | import org.apache.hadoop.conf.Configuration; 8 | import org.apache.hadoop.fs.CosNConfigKeys; 9 | 10 | import javax.annotation.Nullable; 11 | import java.net.URI; 12 | 13 | /** 14 | * Get the credentials from the hadoop configuration. 15 | */ 16 | public class SimpleCredentialProvider 17 | extends AbstractCOSCredentialProvider implements COSCredentialsProvider { 18 | private String appId; 19 | private String secretId; 20 | private String secretKey; 21 | 22 | public SimpleCredentialProvider(@Nullable URI uri, Configuration conf) { 23 | super(uri, conf); 24 | if (null != conf) { 25 | this.appId = conf.get(CosNConfigKeys.COSN_APPID_KEY); 26 | this.secretId = conf.get( 27 | CosNConfigKeys.COSN_USERINFO_SECRET_ID_KEY); 28 | this.secretKey = conf.get( 29 | CosNConfigKeys.COSN_USERINFO_SECRET_KEY_KEY); 30 | } 31 | } 32 | 33 | @Override 34 | public COSCredentials getCredentials() { 35 | if (!StringUtils.isNullOrEmpty(this.secretId) 36 | && !StringUtils.isNullOrEmpty(this.secretKey)) { 37 | if (null != this.appId) { 38 | return new BasicCOSCredentials(this.appId, this.secretId, this.secretKey); 39 | } else { 40 | return new BasicCOSCredentials(this.secretId, this.secretKey); 41 | } 42 | } 43 | return null; 44 | } 45 | 46 | @Override 47 | public void refresh() { 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/auth/package-info.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.auth; 2 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/Abortable.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import java.io.IOException; 4 | 5 | public interface Abortable { 6 | /** 7 | * hadoop-3.3.0+ 也有在输出流的基类中定义 abort 接口,因此为了实现该接口的类出现跟继承的输出流 8 | * 基类发生冲突,因此定义为 doAbort。 9 | * 10 | * @throws IOException 中断 CosN 的底层发生错误。 11 | */ 12 | void doAbort() throws IOException; 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/BufferInputStream.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import org.apache.hadoop.fs.cosn.buffer.CosNByteBuffer; 4 | 5 | import java.io.IOException; 6 | import java.io.InputStream; 7 | import java.nio.InvalidMarkException; 8 | 9 | public class BufferInputStream extends InputStream { 10 | private CosNByteBuffer buffer; 11 | private boolean isClosed = true; 12 | 13 | public BufferInputStream(CosNByteBuffer buffer) throws IOException { 14 | if (null == buffer) { 15 | throw new IOException("The buffer is null"); 16 | } 17 | this.buffer = buffer; 18 | this.buffer.flipRead(); 19 | this.isClosed = false; 20 | } 21 | 22 | @Override 23 | public synchronized int read() throws IOException { 24 | this.checkOpened(); 25 | 26 | if (!this.buffer.hasRemaining()) { 27 | return -1; 28 | } 29 | return this.buffer.get() & 0xFF; 30 | } 31 | 32 | @Override 33 | public synchronized int read(byte[] b, int off, int len) throws IOException { 34 | this.checkOpened(); 35 | 36 | if (!this.buffer.hasRemaining()) { 37 | return -1; 38 | } 39 | 40 | int readLength = Math.min(this.buffer.remaining(), len); 41 | this.buffer.get(b, off, readLength); 42 | return readLength; 43 | } 44 | 45 | @Override 46 | public synchronized void mark(int readLimit) { 47 | if (!this.markSupported()) { 48 | return; 49 | } 50 | this.buffer.mark(); 51 | // Parameter readLimit is ignored 52 | } 53 | 54 | @Override 55 | public boolean markSupported() { 56 | return true; 57 | } 58 | 59 | @Override 60 | public synchronized void reset() throws IOException { 61 | this.checkOpened(); 62 | 63 | try { 64 | this.buffer.reset(); 65 | } catch (InvalidMarkException e) { 66 | throw new IOException("Invalid mark"); 67 | } 68 | } 69 | 70 | @Override 71 | public synchronized int available() throws IOException { 72 | this.checkOpened(); 73 | return this.buffer.remaining(); 74 | } 75 | 76 | @Override 77 | public synchronized void close() throws IOException { 78 | this.isClosed = true; 79 | this.buffer = null; 80 | } 81 | 82 | private void checkOpened() throws IOException { 83 | if (this.isClosed) { 84 | throw new IOException( 85 | String.format("The BufferInputStream[%d] has been closed", this.hashCode())); 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/BufferOutputStream.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import org.apache.hadoop.fs.cosn.buffer.CosNByteBuffer; 4 | 5 | import java.io.IOException; 6 | import java.io.OutputStream; 7 | 8 | /** 9 | * The input stream class is used for buffered files. 10 | * The purpose of providing this class is to optimize buffer put performance. 11 | */ 12 | public class BufferOutputStream extends OutputStream { 13 | private CosNByteBuffer buffer; 14 | private boolean isFlush; 15 | private boolean isClosed; 16 | 17 | public BufferOutputStream(CosNByteBuffer buffer) throws IOException { 18 | if (null == buffer) { 19 | throw new IOException("buffer is null"); 20 | } 21 | this.buffer = buffer; 22 | this.buffer.flipWrite(); 23 | this.isFlush = false; 24 | this.isClosed = false; 25 | } 26 | 27 | @Override 28 | public synchronized void write(int b) throws IOException { 29 | this.checkOpened(); 30 | 31 | if(this.buffer.remaining() == 0){ 32 | throw new IOException("The buffer is full"); 33 | } 34 | 35 | byte[] singleBytes = new byte[1]; 36 | singleBytes[0] = (byte) b; 37 | this.buffer.put(singleBytes, 0, 1); 38 | this.isFlush = false; 39 | } 40 | 41 | @Override 42 | public synchronized void write(byte[] b, int off, int len) throws IOException { 43 | this.checkOpened(); 44 | 45 | if (b == null) { 46 | throw new NullPointerException(); 47 | } else if ((off < 0) || (off > b.length) || (len < 0) || 48 | ((off + len) > b.length) || ((off + len) < 0)) { 49 | throw new IndexOutOfBoundsException(); 50 | } else if (len == 0) { 51 | return; 52 | } 53 | 54 | this.buffer.put(b, off, len); 55 | this.isFlush = false; 56 | } 57 | 58 | @Override 59 | public synchronized void flush() throws IOException { 60 | this.checkOpened(); 61 | 62 | if (this.isFlush) { 63 | return; 64 | } 65 | // TODO MappedByteBuffer can call the force method to flush. 66 | this.isFlush = true; 67 | } 68 | 69 | @Override 70 | public synchronized void close() throws IOException { 71 | if (this.isClosed) { 72 | return; 73 | } 74 | 75 | this.flush(); 76 | 77 | this.isClosed = true; 78 | this.isFlush = true; 79 | this.buffer = null; 80 | } 81 | 82 | private void checkOpened() throws IOException { 83 | if (this.isClosed) { 84 | throw new IOException( 85 | String.format("The BufferOutputStream[%d] has been closed.", this.hashCode())); 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/CRC32CCheckSum.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import org.apache.hadoop.fs.FileChecksum; 4 | 5 | import java.io.DataInput; 6 | import java.io.DataOutput; 7 | import java.io.IOException; 8 | import java.math.BigInteger; 9 | 10 | /** 11 | * An etag as a checksum. 12 | * Consider these suitable for checking if an object has changed, but 13 | * not suitable for comparing two different objects for equivalence, 14 | * especially between hadoop compatible filesystem. 15 | */ 16 | public class CRC32CCheckSum extends FileChecksum { 17 | private static final String ALGORITHM_NAME = "COMPOSITE-CRC32C"; 18 | 19 | private int crc32c = 0; 20 | 21 | public CRC32CCheckSum() { 22 | } 23 | 24 | 25 | public CRC32CCheckSum(String crc32cecma) { 26 | try { 27 | BigInteger bigInteger = new BigInteger(crc32cecma); 28 | this.crc32c = bigInteger.intValue(); 29 | } catch (NumberFormatException e) { 30 | this.crc32c = 0; 31 | } 32 | } 33 | 34 | @Override 35 | public String getAlgorithmName() { 36 | return CRC32CCheckSum.ALGORITHM_NAME; 37 | } 38 | 39 | @Override 40 | public int getLength() { 41 | return Integer.SIZE / Byte.SIZE; 42 | } 43 | 44 | @Override 45 | public byte[] getBytes() { 46 | return CrcUtils.intToBytes(crc32c); 47 | } 48 | 49 | @Override 50 | public void write(DataOutput dataOutput) throws IOException { 51 | dataOutput.writeInt(this.crc32c); 52 | } 53 | 54 | @Override 55 | public void readFields(DataInput dataInput) throws IOException { 56 | this.crc32c = dataInput.readInt(); 57 | } 58 | 59 | @Override 60 | public String toString() { 61 | return getAlgorithmName() + ":" + String.format("0x%08x", crc32c); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/CRC64Checksum.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import org.apache.hadoop.fs.FileChecksum; 4 | import org.apache.hadoop.io.WritableUtils; 5 | 6 | import java.io.DataInput; 7 | import java.io.DataOutput; 8 | import java.io.IOException; 9 | import java.math.BigInteger; 10 | 11 | /** 12 | * An etag as a checksum. 13 | * Consider these suitable for checking if an object has changed, but 14 | * not suitable for comparing two different objects for equivalence, 15 | * especially between hadoop compatible filesystem. 16 | */ 17 | public class CRC64Checksum extends FileChecksum { 18 | private static final String ALGORITHM_NAME = "CRC64"; 19 | 20 | private long crc64 = 0; 21 | 22 | public CRC64Checksum() { 23 | } 24 | 25 | public CRC64Checksum(String crc64ecma) { 26 | try { 27 | BigInteger bigInteger = new BigInteger(crc64ecma); 28 | this.crc64 = bigInteger.longValue(); 29 | } catch (NumberFormatException e) { 30 | this.crc64 = 0; 31 | } 32 | } 33 | 34 | @Override 35 | public String getAlgorithmName() { 36 | return CRC64Checksum.ALGORITHM_NAME; 37 | } 38 | 39 | @Override 40 | public int getLength() { 41 | return Long.SIZE / Byte.SIZE; 42 | } 43 | 44 | @Override 45 | public byte[] getBytes() { 46 | return this.crc64 != 0 ? WritableUtils.toByteArray(this) : new byte[0]; 47 | } 48 | 49 | @Override 50 | public void write(DataOutput dataOutput) throws IOException { 51 | dataOutput.writeLong(this.crc64); 52 | } 53 | 54 | @Override 55 | public void readFields(DataInput dataInput) throws IOException { 56 | this.crc64 = dataInput.readLong(); 57 | } 58 | 59 | @Override 60 | public String toString() { 61 | return "CRC64Checksum{" + 62 | "crc64=" + crc64 + 63 | '}'; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/ConsistencyChecker.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import com.qcloud.cos.utils.CRC64; 4 | import org.apache.hadoop.fs.FileMetadata; 5 | import org.apache.hadoop.fs.NativeFileSystemStore; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.io.FileNotFoundException; 10 | import java.io.IOException; 11 | import java.math.BigInteger; 12 | 13 | public class ConsistencyChecker { 14 | private static final Logger LOG = LoggerFactory.getLogger(ConsistencyChecker.class); 15 | 16 | private final NativeFileSystemStore nativeStore; 17 | private String key; 18 | private volatile long writtenBytesLength; 19 | 20 | private CRC64 crc64; 21 | private volatile boolean finished; 22 | private CheckResult checkResult; 23 | 24 | private boolean useClientSideEncryption; 25 | 26 | public static final class CheckResult { 27 | private String fsScheme; // FileSystem scheme 28 | private String key; 29 | private long expectedLength; 30 | private long realLength; 31 | 32 | private long expectedCrc64Value; 33 | private long realCrc64Value; 34 | 35 | private Exception exception; 36 | 37 | private String description; 38 | 39 | private boolean useClientSideEncryption; 40 | 41 | public CheckResult() { 42 | this("", "", -1, -1, -1, -1, false, null); 43 | } 44 | 45 | public CheckResult(String scheme, String cosKey, 46 | long expectedLength, long realLength, 47 | long expectedCrc64Value, long realCrc64Value, boolean useClientSideEncryption, Exception e) { 48 | this.fsScheme = scheme; 49 | this.key = cosKey; 50 | this.expectedLength = expectedLength; 51 | this.realLength = realLength; 52 | this.expectedCrc64Value = expectedCrc64Value; 53 | this.realCrc64Value = realCrc64Value; 54 | this.exception = e; 55 | this.description = ""; 56 | this.useClientSideEncryption = useClientSideEncryption; 57 | } 58 | 59 | public String getFsScheme() { 60 | return fsScheme; 61 | } 62 | 63 | public String getKey() { 64 | return key; 65 | } 66 | 67 | /** 68 | * judge the success operation 69 | * @return whether success 70 | */ 71 | public boolean isSucceeded() { 72 | boolean succeeded = true; 73 | do { 74 | if (null != this.exception) { 75 | this.description = String.format("Failed to check the data due to an exception: %s.", 76 | this.exception); 77 | succeeded = false; 78 | break; 79 | } 80 | 81 | if (this.expectedLength < 0 || this.realLength < 0) { 82 | // The expected length and the real length are invalid. 83 | this.description = String.format("Invalid check data. expected length: %d, real length: %d.", 84 | this.expectedLength, this.realLength); 85 | succeeded = false; 86 | break; 87 | } 88 | if (this.expectedLength != this.realLength) { 89 | this.description = String.format("The expected length is not equal to the the real length. " + 90 | "expected length: %d, real length: %d.", this.expectedLength, this.realLength); 91 | succeeded = false; 92 | break; 93 | } 94 | if(this.useClientSideEncryption){ 95 | break; 96 | } 97 | if (this.expectedCrc64Value != this.realCrc64Value) { 98 | this.description = String.format("The CRC64 checksum verify failed. " + 99 | "expected CRC64 value: %d, real CRC64 value: %d", 100 | this.expectedCrc64Value, this.realCrc64Value); 101 | succeeded = false; 102 | break; 103 | } 104 | } while(false); 105 | 106 | if (succeeded) { 107 | this.description = String.format("File verification succeeded. " + 108 | "expected length: %d, real length: %d", this.expectedLength, this.realLength); 109 | if(!useClientSideEncryption){ 110 | this.description = this.description + String.format(", expected CRC64 value: %d, real CRC64 value: %d", 111 | this.expectedCrc64Value, this.realCrc64Value); 112 | } 113 | } else { 114 | this.description = String.format("File verification failure. %s", this.description); 115 | } 116 | 117 | return succeeded; 118 | } 119 | 120 | public long getExpectedLength() { 121 | return expectedLength; 122 | } 123 | 124 | public long getRealLength() { 125 | return realLength; 126 | } 127 | 128 | /** 129 | * get description string 130 | * @return description 131 | */ 132 | public String getDescription() { 133 | return this.description; 134 | } 135 | 136 | public void setFsScheme(String fsScheme) { 137 | this.fsScheme = fsScheme; 138 | } 139 | 140 | public void setKey(String key) { 141 | this.key = key; 142 | } 143 | 144 | public void setExpectedLength(long expectedLength) { 145 | this.expectedLength = expectedLength; 146 | } 147 | 148 | public void setRealLength(long realLength) { 149 | this.realLength = realLength; 150 | } 151 | 152 | public void setExpectedCrc64Value(long expectedCrc64Value) { 153 | this.expectedCrc64Value = expectedCrc64Value; 154 | } 155 | 156 | public void setRealCrc64Value(long realCrc64Value) { 157 | this.realCrc64Value = realCrc64Value; 158 | } 159 | 160 | public void setException(Exception exception) { 161 | this.exception = exception; 162 | } 163 | } 164 | 165 | public ConsistencyChecker(final NativeFileSystemStore nativeStore, final String cosKey, boolean useClientSideEncryption) throws IOException { 166 | this(nativeStore, cosKey, null, 0, useClientSideEncryption); 167 | } 168 | 169 | public ConsistencyChecker(final NativeFileSystemStore nativeStore, final String cosKey, 170 | CRC64 crc64, long writtenBytesLength, boolean useClientSideEncryption) throws IOException { 171 | if (null == nativeStore || null == cosKey || cosKey.isEmpty()) { 172 | throw new IOException(String.format( 173 | "Native FileSystem store [%s] or key [%s] is illegal.", nativeStore, cosKey)); 174 | } 175 | this.nativeStore = nativeStore; 176 | this.key = cosKey; 177 | 178 | if (null != crc64 && writtenBytesLength > 0) { 179 | this.crc64 = crc64; 180 | this.writtenBytesLength = writtenBytesLength; 181 | } else { 182 | this.crc64 = new CRC64(); 183 | this.writtenBytesLength = 0; 184 | } 185 | 186 | this.finished = false; 187 | this.useClientSideEncryption = useClientSideEncryption; 188 | this.checkResult = new CheckResult("cosn", this.key, -1, -1, -1, -1, useClientSideEncryption, null); 189 | } 190 | 191 | public synchronized void writeBytes(byte[] writeBytes, int offset, int length) { 192 | if (this.finished) { 193 | LOG.error("The cos key [{}] has ended statistics.", this.key); 194 | return; 195 | } 196 | 197 | // update the crc64 checksum. 198 | this.crc64.update(writeBytes, offset, length); 199 | this.writtenBytesLength += length; 200 | } 201 | 202 | public synchronized void finish() { 203 | if (this.finished) { 204 | return; 205 | } 206 | this.finished = true; 207 | 208 | FileMetadata fileMetadata; 209 | try { 210 | fileMetadata = this.nativeStore.retrieveMetadata(this.key); 211 | if (null == fileMetadata) { 212 | throw new FileNotFoundException("The target object is not found. " + 213 | "Please terminate your application immediately."); 214 | } 215 | LOG.debug("Get the target key [{}]'s length: {} and crc64 checksum: {}.", 216 | this.key, fileMetadata.getLength(), fileMetadata.getCrc64ecm()); 217 | } catch (IOException e) { 218 | LOG.error("Failed to get the target key [{}]'s length and crc64 checksum.", this.key, e); 219 | this.checkResult.setFsScheme("cosn"); 220 | this.checkResult.setKey(this.key); 221 | this.checkResult.setExpectedLength(this.writtenBytesLength); 222 | this.checkResult.setRealLength(-1); 223 | this.checkResult.setExpectedCrc64Value(this.crc64.getValue()); 224 | this.checkResult.setRealCrc64Value(-1); 225 | // The result that an exception has occurred is unreliable. 226 | this.checkResult.setException(e); 227 | return; 228 | } 229 | 230 | this.checkResult.setFsScheme("cosn"); 231 | this.checkResult.setKey(this.key); 232 | this.checkResult.setExpectedLength(this.writtenBytesLength); 233 | this.checkResult.setRealLength(fileMetadata.getLength()); 234 | this.checkResult.setExpectedCrc64Value(this.crc64.getValue()); 235 | this.checkResult.setRealCrc64Value(new BigInteger(fileMetadata.getCrc64ecm()).longValue()); 236 | } 237 | 238 | public CheckResult getCheckResult() { 239 | return checkResult; 240 | } 241 | 242 | public void setKey(String key) { 243 | this.key = key; 244 | } 245 | 246 | public synchronized void reset() { 247 | this.writtenBytesLength = 0; 248 | this.crc64.reset(); 249 | this.finished = false; 250 | this.checkResult = new CheckResult("cosn", this.key, -1, -1, -1, -1, this.useClientSideEncryption, null); 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/Constants.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | public final class Constants { 4 | private Constants() { 5 | } 6 | 7 | // The block file prefix for multipart upload op. 8 | public static final String BLOCK_TMP_FILE_PREFIX = "cos_"; 9 | 10 | // Suffix for local cache file name 11 | public static final String BLOCK_TMP_FILE_SUFFIX = "_local_block_cache"; 12 | 13 | // Crc32c server response header key 14 | public static final String CRC32C_RESP_HEADER = "x-cos-hash-crc32c"; 15 | // Crc32c agent request header key 16 | public static final String CRC32C_REQ_HEADER = "x-cos-crc32c-flag"; 17 | // Crc32c agent request header value 18 | public static final String CRC32C_REQ_HEADER_VAL = "cosn"; 19 | 20 | // Maximum number of blocks uploaded in trunks. 21 | public static final int MAX_PART_NUM = 10000; 22 | // The maximum size of a single block. 23 | public static final long MAX_PART_SIZE = 2 * Unit.GB; 24 | // The minimum size of a single block. 25 | public static final long MIN_PART_SIZE = Unit.MB; 26 | // The maximum size of the buffer is 8GB 27 | public static final long MAX_BUFFER_SIZE = 2 * Unit.GB; 28 | 29 | // Environments variables for the COS secretId and secretKey. 30 | public static final String COSN_SECRET_ID_ENV = "COSN_SECRET_ID"; 31 | public static final String COSN_SECRET_KEY_ENV = "COSN_SECRET_KEY"; 32 | 33 | public static final String COSN_OFS_CONFIG_PREFIX = "fs.ofs."; 34 | public static final String COSN_CONFIG_TRANSFER_PREFIX = "fs.cosn.trsf."; 35 | 36 | public static final String COSN_BUCKET_FS_COSN_IMPL = "org.apache.hadoop.fs.CosNFileSystem"; 37 | public static final String COSN_POSIX_BUCKET_FS_COSN_IMPL = "org.apache.hadoop.fs.CosNFileSystem"; 38 | public static final String COSN_POSIX_BUCKET_FS_CHDFS_IMPL="com.qcloud.chdfs.fs.CHDFSHadoopFileSystemAdapter"; 39 | 40 | public static final String CUSTOM_AUTHENTICATION = "custom authentication"; 41 | 42 | // posix bucket ranger config need to pass through 43 | public static final String COSN_POSIX_BUCKET_RANGER_POLICY_URL = "fs.ofs.cosn.ranger.policy.url"; 44 | public static final String COSN_POSIX_BUCKET_RANGER_AUTH_JAR_MD5 = "fs.ofs.cosn.ranger.auth.jar.md5"; 45 | public static final String COSN_POSIX_BUCKCET_OFS_RANGER_FLAG = "fs.ofs.ranger.enable.flag"; 46 | 47 | public static final String COSN_POSIX_BUCKET_APPID_CONFIG = "fs.ofs.user.appid"; 48 | public static final String COSN_POSIX_BUCKET_REGION_CONFIG = "fs.ofs.bucket.region"; 49 | 50 | // ofs relate config 51 | public static final String COSN_POSIX_BUCKET_SSE_MODE = "fs.ofs.sse.mode"; 52 | public static final String COSN_POSIX_BUCKET_SSE_C_KEY = "fs.ofs.sse.c.key"; 53 | public static final String COSN_POSIX_BUCKET_SSE_KMS_KEYID = "fs.ofs.sse.kms.keyid"; 54 | public static final String COSN_POSIX_BUCKET_SSE_KMS_CONTEXT = "fs.ofs.sse.kms.context"; 55 | 56 | // sse relate 57 | public static final String COSN_SSE_MODE_COS = "SSE-COS"; 58 | public static final String COSN_SSE_MODE_C = "SSE-C"; 59 | public static final String COSN_SSE_MODE_KMS = "SSE-KMS"; 60 | 61 | // Prefix for all cosn properties: {@value}. 62 | public static final String FS_COSN_PREFIX = "fs.cosn."; 63 | // Prefix for cosn bucket-specific properties: {@value}. 64 | public static final String FS_COSN_BUCKET_PREFIX = "fs.cosn.bucket."; 65 | 66 | } 67 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/CosNOutOfMemoryException.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | public class CosNOutOfMemoryException extends Exception { 4 | 5 | public CosNOutOfMemoryException(String message) { 6 | super(message); 7 | } 8 | 9 | } 10 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/CosNPartListing.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import com.qcloud.cos.model.PartSummary; 4 | import org.apache.hadoop.classification.InterfaceAudience; 5 | import org.apache.hadoop.classification.InterfaceStability; 6 | import org.apache.hadoop.fs.NativeFileSystemStore; 7 | 8 | import java.util.List; 9 | 10 | /** 11 | *

12 | * Holds information of one upload id listing for part summary 13 | * {@link NativeFileSystemStore}. 14 | * This includes the {@link PartSummary part summary} 15 | * (their names) contained in single MPU. 16 | *

17 | * 18 | * @see NativeFileSystemStore#listParts(String, String) 19 | */ 20 | 21 | @InterfaceAudience.Private 22 | @InterfaceStability.Unstable 23 | public class CosNPartListing { 24 | private final List partSummaries; 25 | 26 | public CosNPartListing(List partSummaries) { 27 | this.partSummaries = partSummaries; 28 | } 29 | 30 | public List getPartSummaries() { 31 | return this.partSummaries; 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/CrcUtils.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import java.io.IOException; 4 | 5 | public final class CrcUtils { 6 | private CrcUtils() { 7 | } 8 | 9 | /** 10 | * int turn to bytes 11 | * @return 4-byte array holding the big-endian representation of 12 | * {@code value}. 13 | */ 14 | public static byte[] intToBytes(int value) { 15 | byte[] buf = new byte[4]; 16 | try { 17 | writeInt(buf, 0, value); 18 | } catch (IOException ioe) { 19 | // Since this should only be able to occur from code bugs within this 20 | // class rather than user input, we throw as a RuntimeException 21 | // rather than requiring this method to declare throwing IOException 22 | // for something the caller can't control. 23 | throw new RuntimeException(ioe); 24 | } 25 | return buf; 26 | } 27 | 28 | /** 29 | * Writes big-endian representation of {@code value} into {@code buf} 30 | * starting at {@code offset}. buf.length must be greater than or 31 | * equal to offset + 4. 32 | */ 33 | public static void writeInt(byte[] buf, int offset, int value) 34 | throws IOException { 35 | if (offset + 4 > buf.length) { 36 | throw new IOException(String.format( 37 | "writeInt out of bounds: buf.length=%d, offset=%d", 38 | buf.length, offset)); 39 | } 40 | buf[offset + 0] = (byte) ((value >>> 24) & 0xff); 41 | buf[offset + 1] = (byte) ((value >>> 16) & 0xff); 42 | buf[offset + 2] = (byte) ((value >>> 8) & 0xff); 43 | buf[offset + 3] = (byte) (value & 0xff); 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/CustomerDomainEndpointResolver.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import com.qcloud.cos.endpoint.EndpointBuilder; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | 8 | public class CustomerDomainEndpointResolver implements EndpointBuilder { 9 | private static final Logger log = LoggerFactory.getLogger(CustomerDomainEndpointResolver.class); 10 | 11 | private String endPoint; 12 | 13 | public CustomerDomainEndpointResolver() { 14 | super(); 15 | endPoint = null; 16 | } 17 | 18 | public CustomerDomainEndpointResolver(String endPoint) { 19 | super(); 20 | this.endPoint = endPoint; 21 | } 22 | 23 | public void setEndpoint(String endPoint) { 24 | this.endPoint = endPoint; 25 | } 26 | 27 | public String getEndpoint() { 28 | return this.endPoint; 29 | } 30 | 31 | @Override 32 | public String buildGeneralApiEndpoint(String bucketName) { 33 | if (this.endPoint != null) { 34 | return this.endPoint.replace("", bucketName); 35 | } else { 36 | log.error("Get customer domain is null"); 37 | } 38 | return null; 39 | } 40 | 41 | @Override 42 | public String buildGetServiceApiEndpoint() { 43 | return "service.cos.myqcloud.com"; 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/FileStatusProbeEnum.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import java.util.EnumSet; 4 | import java.util.Set; 5 | 6 | /** 7 | * Define the status detection action to be performed according to different situations. 8 | */ 9 | public enum FileStatusProbeEnum { 10 | // Head the actual path. 11 | HEAD, 12 | 13 | // Head the path + /. 14 | DIR_MARKER, 15 | 16 | // List under the path. 17 | LIST; 18 | 19 | /** 20 | * Look for files and directories. 21 | * 1. check if a file with the same name exists. 22 | * 2. check if a directory (actual directory) with the same name exists. 23 | * 3. check if a directory (marker directory, the commonPrefix of a object that its key name contains '/') with the same name exists. 24 | */ 25 | public static final Set ALL = EnumSet.of(HEAD, DIR_MARKER, LIST); 26 | 27 | /** 28 | * Only check if a file with the same name exists. 29 | */ 30 | public static final Set HEAD_ONLY = EnumSet.of(HEAD); 31 | 32 | /** 33 | * Only check if a directory with the same name exists. 34 | */ 35 | public static final Set LIST_ONLY = EnumSet.of(LIST); 36 | 37 | /** 38 | * Only check if a file / directory with the same name exists. 39 | */ 40 | public static final Set FILE_DIRECTORY = EnumSet.of(HEAD, DIR_MARKER); 41 | 42 | /** 43 | * Only check if a directory with the same name exists. 44 | */ 45 | public static final Set DIRECTORIES = LIST_ONLY; 46 | } 47 | 48 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/LocalRandomAccessMappedBufferPool.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import com.google.common.base.Preconditions; 4 | import org.apache.hadoop.conf.Configuration; 5 | import org.apache.hadoop.fs.CosNConfigKeys; 6 | import org.apache.hadoop.fs.cosn.buffer.CosNRandomAccessMappedBuffer; 7 | import org.apache.hadoop.fs.cosn.buffer.CosNRandomAccessMappedBufferFactory; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.io.File; 12 | import java.io.IOException; 13 | import java.util.concurrent.atomic.AtomicBoolean; 14 | import java.util.concurrent.atomic.AtomicInteger; 15 | 16 | import static org.apache.hadoop.fs.CosNConfigKeys.DEFAULT_TMP_DIR; 17 | 18 | /** 19 | * Current temporarily used to support the seek write part cache. 20 | * Support the seek read/write by the RandomAccessFile. 21 | * It is used by the seek write. 22 | */ 23 | public final class LocalRandomAccessMappedBufferPool { 24 | private static final Logger LOG = LoggerFactory.getLogger(LocalRandomAccessMappedBufferPool.class); 25 | 26 | // Singleton. 27 | private static final LocalRandomAccessMappedBufferPool instance = new LocalRandomAccessMappedBufferPool(); 28 | 29 | public static LocalRandomAccessMappedBufferPool getInstance() { 30 | return instance; 31 | } 32 | 33 | private final AtomicInteger referCount = new AtomicInteger(0); 34 | private final AtomicBoolean isInitialized = new AtomicBoolean(false); 35 | 36 | private File cacheDir; 37 | // disk remaining 38 | private long remainingSpace; 39 | private long highWaterMarkRemainingSpace; 40 | private long lowWaterMarkRemainingSpace; 41 | private CosNRandomAccessMappedBufferFactory mappedBufferFactory; 42 | 43 | public synchronized void initialize(Configuration configuration) throws IOException { 44 | Preconditions.checkNotNull(configuration, "configuration"); 45 | LOG.info("Initialize the local cache."); 46 | if (this.isInitialized.get()) { 47 | LOG.info("The local file cache [{}] has been initialized and referenced once." + 48 | "current reference count: [{}].", this, this.referCount); 49 | this.referCount.incrementAndGet(); 50 | return; 51 | } 52 | 53 | // 获取用户配置的 POSIX extension 特性目录 54 | String tmpDir = configuration.get(CosNConfigKeys.COSN_TMP_DIR, DEFAULT_TMP_DIR); 55 | String cacheDirPath = configuration.get(CosNConfigKeys.COSN_POSIX_EXTENSION_TMP_DIR, 56 | String.format("%s/posix_extension", 57 | tmpDir.endsWith("/") ? tmpDir.substring(0, tmpDir.length() - 1) : tmpDir)); 58 | // 正式构建 MappedFactory 用于后续创建本地缓存文件 59 | boolean deleteOnExit = configuration.getBoolean( 60 | CosNConfigKeys.COSN_MAPDISK_DELETEONEXIT_ENABLED, CosNConfigKeys.DEFAULT_COSN_MAPDISK_DELETEONEXIT_ENABLED); 61 | this.mappedBufferFactory = new CosNRandomAccessMappedBufferFactory(cacheDirPath, deleteOnExit); 62 | 63 | this.cacheDir = new File(cacheDirPath); 64 | // 检查当前目录空间是否足够 65 | long usableSpace = this.cacheDir.getParentFile().getUsableSpace(); 66 | long quotaSize = configuration.getLong(CosNConfigKeys.COSN_POSIX_EXTENSION_TMP_DIR_QUOTA, 67 | CosNConfigKeys.DEFAULT_COSN_POSIX_EXTENSION_TMP_DIR_QUOTA); 68 | Preconditions.checkArgument(quotaSize <= usableSpace, 69 | String.format("The quotaSize [%d] configured should be less than the usableSpace [%d].", quotaSize, usableSpace)); 70 | this.remainingSpace = quotaSize; 71 | 72 | // 检查高低水位是否配置正确 73 | float lowWaterMark = configuration.getFloat(CosNConfigKeys.COSN_POSIX_EXTENSION_TMP_DIR_WATERMARK_LOW, 74 | CosNConfigKeys.DEFAULT_COSN_POSIX_EXTENSION_TMP_DIR_WATERMARK_LOW); 75 | float highWaterMark = configuration.getFloat(CosNConfigKeys.COSN_POSIX_EXTENSION_TMP_DIR_WATERMARK_HIGH, 76 | CosNConfigKeys.DEFAULT_COSN_POSIX_EXTENSION_TMP_DIR_WATERMARK_HIGH); 77 | Preconditions.checkArgument(Math.floor(lowWaterMark * 100) > 0 && Math.floor(lowWaterMark * 100) < 100, 78 | String.format("The low watermark [%f] should be in (0,1).", lowWaterMark)); 79 | Preconditions.checkArgument(Math.floor(highWaterMark * 100) > 0 && Math.floor(highWaterMark * 100) < 100, 80 | String.format("The high watermark [%f] should be in (0,1).", highWaterMark)); 81 | Preconditions.checkArgument(Float.compare(lowWaterMark, highWaterMark) < 0, 82 | String.format("The low watermark [%f] should be less than the high watermark [%f].", lowWaterMark, highWaterMark)); 83 | // 粗略地计算高低水位的容量 84 | this.highWaterMarkRemainingSpace = (long) (quotaSize * (1 - highWaterMark)); 85 | this.lowWaterMarkRemainingSpace = (long) (quotaSize * (1 - lowWaterMark)); 86 | 87 | this.referCount.incrementAndGet(); 88 | this.isInitialized.set(true); 89 | } 90 | 91 | /** 92 | * create a local cache file specified size. 93 | * 94 | * @param fileName the local cache file name. 95 | * @param size specified size. 96 | * @return random access file supporting the seekable write and read. 97 | */ 98 | public synchronized CosNRandomAccessMappedBuffer create(String fileName, int size) 99 | throws CacheSpaceFullException, IOException { 100 | Preconditions.checkArgument(size > 0, "The file size should be a positive integer."); 101 | if (size > this.remainingSpace) { 102 | throw new CacheSpaceFullException(String.format("The requested size [%d] exceeds the remaining space [%d].", 103 | size, this.remainingSpace)); 104 | } 105 | 106 | CosNRandomAccessMappedBuffer randomAccessMappedBuffer = 107 | this.mappedBufferFactory.create(fileName, size); 108 | randomAccessMappedBuffer.clear(); 109 | this.remainingSpace -= size; 110 | return randomAccessMappedBuffer; 111 | } 112 | 113 | public synchronized boolean shouldRelease() { 114 | // Need to release. 115 | return this.remainingSpace < this.highWaterMarkRemainingSpace; 116 | } 117 | 118 | public synchronized void releaseFile(CosNRandomAccessMappedBuffer localFile) { 119 | int returnSpace = localFile.capacity(); 120 | this.mappedBufferFactory.release(localFile); 121 | this.remainingSpace += returnSpace; 122 | } 123 | 124 | public synchronized void close() { 125 | LOG.info("Close the local file cache instance."); 126 | 127 | if (!this.isInitialized.get()) { 128 | LOG.warn("The local file cache has been closed. no changes would be exeucte."); 129 | return; 130 | } 131 | 132 | if (this.referCount.decrementAndGet() > 0) { 133 | return; 134 | } 135 | 136 | // POSIX extension 特性目录的清理由 MappedFactory 的标志决定 137 | this.isInitialized.set(false); 138 | } 139 | 140 | /** 141 | * Throw when the cache is full. 142 | */ 143 | public static class CacheSpaceFullException extends IOException { 144 | public CacheSpaceFullException() { 145 | } 146 | 147 | public CacheSpaceFullException(String message) { 148 | super(message); 149 | } 150 | 151 | public CacheSpaceFullException(String message, Throwable cause) { 152 | super(message, cause); 153 | } 154 | 155 | public CacheSpaceFullException(Throwable cause) { 156 | super(cause); 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/MD5Utils.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import org.apache.hadoop.fs.cosn.buffer.CosNByteBuffer; 4 | 5 | import java.io.IOException; 6 | import java.io.InputStream; 7 | import java.nio.charset.Charset; 8 | import java.nio.charset.StandardCharsets; 9 | import java.security.DigestInputStream; 10 | import java.security.MessageDigest; 11 | import java.security.NoSuchAlgorithmException; 12 | 13 | public final class MD5Utils { 14 | public static byte[] calculate(CosNByteBuffer buffer) 15 | throws NoSuchAlgorithmException, IOException { 16 | if (null == buffer) { 17 | return null; 18 | } 19 | 20 | MessageDigest md5 = MessageDigest.getInstance("MD5"); 21 | InputStream inputStream = new DigestInputStream(new BufferInputStream(buffer), md5); 22 | byte[] chunk = new byte[(int) (4 * Unit.KB)]; 23 | while (inputStream.read(chunk) != -1) ; 24 | return md5.digest(); 25 | } 26 | 27 | public static byte[] calculate(String str) throws NoSuchAlgorithmException { 28 | if (null == str) { 29 | return null; 30 | } 31 | MessageDigest md5 = MessageDigest.getInstance("MD5"); 32 | return md5.digest(str.getBytes(StandardCharsets.UTF_8)); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/OperationCancellingStatusProvider.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | public interface OperationCancellingStatusProvider { 4 | boolean isCancelled(); 5 | } 6 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/ReadBufferHolder.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | 4 | public final class ReadBufferHolder { 5 | 6 | private static MemoryAllocator memoryAllocator; 7 | 8 | public static synchronized void initialize(long capacity) { 9 | if (memoryAllocator != null) { 10 | return; 11 | } 12 | if (capacity == 0) { 13 | capacity = (long) (Runtime.getRuntime().maxMemory() * 0.8); 14 | } 15 | memoryAllocator = MemoryAllocator.Factory.create(capacity); 16 | } 17 | 18 | 19 | public static MemoryAllocator getBufferAllocator() { 20 | return memoryAllocator; 21 | } 22 | 23 | public synchronized static void clear() { 24 | memoryAllocator = null; 25 | } 26 | 27 | 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/ResettableFileInputStream.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import java.io.BufferedInputStream; 4 | import java.io.File; 5 | import java.io.FileInputStream; 6 | import java.io.IOException; 7 | import java.io.InputStream; 8 | 9 | public class ResettableFileInputStream 10 | extends InputStream { 11 | private static final int DEFAULT_BUFFER_SIZE = 1024; 12 | 13 | private final String fileName; 14 | private int bufferSize; 15 | private InputStream inputStream; 16 | private long position; 17 | private long mark; 18 | private boolean isMarkSet; 19 | 20 | public ResettableFileInputStream(final File file) 21 | throws IOException { 22 | this(file.getCanonicalPath()); 23 | } 24 | 25 | public ResettableFileInputStream(final String filename) 26 | throws IOException { 27 | this(filename, DEFAULT_BUFFER_SIZE); 28 | } 29 | 30 | public ResettableFileInputStream(final String filename, final int bufferSize) 31 | throws IOException { 32 | this.bufferSize = bufferSize; 33 | fileName = filename; 34 | position = 0; 35 | 36 | inputStream = newStream(); 37 | } 38 | 39 | public void mark(final int readLimit) { 40 | isMarkSet = true; 41 | mark = position; 42 | inputStream.mark(readLimit); 43 | } 44 | 45 | public boolean markSupported() { 46 | return true; 47 | } 48 | 49 | public void reset() 50 | throws IOException { 51 | if (!isMarkSet) { 52 | throw new IOException("Unmarked Stream"); 53 | } 54 | try { 55 | inputStream.reset(); 56 | } catch (final IOException ioe) { 57 | try { 58 | inputStream.close(); 59 | inputStream = newStream(); 60 | inputStream.skip(mark); 61 | position = mark; 62 | } catch (final Exception e) { 63 | throw new IOException("Cannot reset current Stream: " + e.getMessage()); 64 | } 65 | } 66 | } 67 | 68 | protected InputStream newStream() 69 | throws IOException { 70 | return new BufferedInputStream(new FileInputStream(fileName), bufferSize); 71 | } 72 | 73 | public int available() 74 | throws IOException { 75 | return inputStream.available(); 76 | } 77 | 78 | public void close() throws IOException { 79 | inputStream.close(); 80 | } 81 | 82 | public int read() throws IOException { 83 | position++; 84 | return inputStream.read(); 85 | } 86 | 87 | public int read(final byte[] bytes, final int offset, final int length) 88 | throws IOException { 89 | final int count = inputStream.read(bytes, offset, length); 90 | position += count; 91 | return count; 92 | } 93 | 94 | public long skip(final long count) 95 | throws IOException { 96 | position += count; 97 | return inputStream.skip(count); 98 | } 99 | } -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/Unit.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | public final class Unit { 4 | private Unit() { 5 | } 6 | 7 | public static final long KB = 1024; 8 | public static final long MB = 1024 * KB; 9 | public static final long GB = 1024 * MB; 10 | public static final long TB = 1024 * GB; 11 | public static final long PB = 1024 * TB; 12 | } 13 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNBufferFactory.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | public interface CosNBufferFactory { 4 | CosNByteBuffer create(int size); 5 | 6 | void release(CosNByteBuffer cosNByteBuffer); 7 | } 8 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNBufferType.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | /** 4 | * The type of upload buffer. 5 | */ 6 | public enum CosNBufferType { 7 | NON_DIRECT_MEMORY("non_direct_memory"), 8 | DIRECT_MEMORY("direct_memory"), 9 | MAPPED_DISK("mapped_disk"); 10 | 11 | private final String name; 12 | 13 | CosNBufferType(String str) { 14 | this.name = str; 15 | } 16 | 17 | public String getName() { 18 | return name; 19 | } 20 | 21 | public static CosNBufferType typeFactory(String typeName) { 22 | if (typeName.compareToIgnoreCase(NON_DIRECT_MEMORY.getName()) == 0) { 23 | return NON_DIRECT_MEMORY; 24 | } 25 | if (typeName.compareToIgnoreCase(DIRECT_MEMORY.getName()) == 0) { 26 | return DIRECT_MEMORY; 27 | } 28 | if (typeName.compareToIgnoreCase(MAPPED_DISK.getName()) == 0) { 29 | return MAPPED_DISK; 30 | } 31 | 32 | return null; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNByteBuffer.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | import java.io.Closeable; 7 | import java.io.IOException; 8 | import java.nio.ByteBuffer; 9 | 10 | /** 11 | * The base class for all CosN byte buffers. 12 | */ 13 | public abstract class CosNByteBuffer implements Closeable { 14 | private static final Logger LOG = 15 | LoggerFactory.getLogger(CosNByteBuffer.class); 16 | 17 | protected ByteBuffer byteBuffer; 18 | protected int nextWritePosition; 19 | 20 | public CosNByteBuffer(ByteBuffer byteBuffer) { 21 | this.byteBuffer = byteBuffer; 22 | this.nextWritePosition = this.byteBuffer.position(); 23 | } 24 | 25 | public CosNByteBuffer put(byte b) throws IOException { 26 | if (!this.byteBuffer.hasRemaining()) { 27 | throw new IOException("There is no remaining in the buffer."); 28 | } 29 | this.byteBuffer.put(b); 30 | this.nextWritePosition = this.byteBuffer.position(); 31 | return this; 32 | } 33 | 34 | public CosNByteBuffer put(byte[] src, int offset, int length) throws IOException { 35 | // 检查缓冲区是否还可以继续写 36 | if (this.byteBuffer.remaining() < length) { 37 | throw new IOException( 38 | String.format("The buffer remaining[%d] is less than the write length[%d].", 39 | this.byteBuffer.remaining(), length)); 40 | } 41 | 42 | this.byteBuffer.put(src, offset, length); 43 | this.nextWritePosition = this.byteBuffer.position(); 44 | return this; 45 | } 46 | 47 | public byte get() { 48 | return this.byteBuffer.get(); 49 | } 50 | 51 | public CosNByteBuffer get(byte[] dst, int offset, int length) { 52 | this.byteBuffer.get(dst, offset, length); 53 | return this; 54 | } 55 | 56 | public int capacity() { 57 | return this.byteBuffer.capacity(); 58 | } 59 | 60 | public int position() { 61 | return this.byteBuffer.position(); 62 | } 63 | 64 | public CosNByteBuffer position(int newPosition) { 65 | this.byteBuffer.position(newPosition); 66 | return this; 67 | } 68 | 69 | public int limit() { 70 | return this.byteBuffer.limit(); 71 | } 72 | 73 | public CosNByteBuffer limit(int newLimit) { 74 | this.byteBuffer.limit(newLimit); 75 | return this; 76 | } 77 | 78 | public CosNByteBuffer mark() { 79 | this.byteBuffer.mark(); 80 | return this; 81 | } 82 | 83 | public CosNByteBuffer reset() { 84 | this.byteBuffer.reset(); 85 | return this; 86 | } 87 | 88 | public CosNByteBuffer clear() { 89 | this.byteBuffer.clear(); 90 | this.nextWritePosition = 0; 91 | return this; 92 | } 93 | 94 | public CosNByteBuffer flip() { 95 | this.byteBuffer.flip(); 96 | return this; 97 | } 98 | 99 | public CosNByteBuffer rewind() { 100 | this.byteBuffer.rewind(); 101 | return this; 102 | } 103 | 104 | public CosNByteBuffer flipRead() { 105 | this.limit(this.nextWritePosition); 106 | this.position(0); 107 | return this; 108 | } 109 | 110 | public CosNByteBuffer flipWrite() { 111 | this.position(this.nextWritePosition); 112 | this.limit(this.byteBuffer.capacity()); 113 | return this; 114 | } 115 | 116 | public int remaining() { 117 | return this.byteBuffer.remaining(); 118 | } 119 | 120 | public boolean hasRemaining() { 121 | return this.byteBuffer.hasRemaining(); 122 | } 123 | 124 | protected abstract boolean isDirect(); 125 | 126 | protected abstract boolean isMapped(); 127 | 128 | @Override 129 | public void close() throws IOException { 130 | if (null == this.byteBuffer) { 131 | return; 132 | } 133 | this.byteBuffer.clear(); 134 | 135 | this.byteBuffer = null; 136 | this.nextWritePosition = -1; 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNDirectBuffer.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import java.io.IOException; 4 | import java.nio.ByteBuffer; 5 | 6 | /** 7 | * The direct buffer. 8 | */ 9 | class CosNDirectBuffer extends CosNByteBuffer { 10 | public CosNDirectBuffer(ByteBuffer byteBuffer) { 11 | super(byteBuffer); 12 | } 13 | 14 | ByteBuffer getByteBuffer() { 15 | return super.byteBuffer; 16 | } 17 | 18 | @Override 19 | public boolean isDirect() {return true;} 20 | 21 | @Override 22 | protected boolean isMapped() { 23 | return false; 24 | } 25 | 26 | @Override 27 | public void close() throws IOException { 28 | super.close(); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNDirectBufferFactory.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import java.io.IOException; 4 | import java.nio.ByteBuffer; 5 | 6 | import org.apache.hadoop.util.DirectBufferPool; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | public class CosNDirectBufferFactory implements CosNBufferFactory { 11 | private static final Logger LOG = 12 | LoggerFactory.getLogger(CosNDirectBufferFactory.class); 13 | 14 | private final DirectBufferPool directBufferPool = new DirectBufferPool(); 15 | 16 | @Override 17 | public CosNByteBuffer create(int size) { 18 | ByteBuffer byteBuffer = this.directBufferPool.getBuffer(size); 19 | return new CosNDirectBuffer(byteBuffer); 20 | } 21 | 22 | @Override 23 | public void release(CosNByteBuffer cosNByteBuffer) { 24 | if (null == cosNByteBuffer) { 25 | LOG.debug("The buffer returned is null. Ignore it."); 26 | return; 27 | } 28 | 29 | if (null == ((CosNDirectBuffer)cosNByteBuffer).getByteBuffer()) { 30 | LOG.warn("The byte buffer returned is null. can not be released."); 31 | return; 32 | } 33 | this.directBufferPool.returnBuffer(((CosNDirectBuffer)cosNByteBuffer).getByteBuffer()); 34 | 35 | try { 36 | cosNByteBuffer.close(); 37 | } catch (IOException e) { 38 | LOG.error("Release the direct byte buffer failed.", e); 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNMappedBuffer.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | import sun.nio.ch.FileChannelImpl; 6 | 7 | import java.io.File; 8 | import java.io.IOException; 9 | import java.io.RandomAccessFile; 10 | import java.lang.reflect.InvocationTargetException; 11 | import java.lang.reflect.Method; 12 | import java.nio.ByteBuffer; 13 | import java.nio.MappedByteBuffer; 14 | 15 | /** 16 | * The buffer based on the memory file mapped. 17 | */ 18 | class CosNMappedBuffer extends CosNByteBuffer { 19 | private static final Logger LOG = 20 | LoggerFactory.getLogger(CosNMappedBuffer.class); 21 | 22 | private File file; 23 | private RandomAccessFile randomAccessFile; 24 | 25 | public CosNMappedBuffer(ByteBuffer byteBuffer, 26 | RandomAccessFile randomAccessFile, File file) { 27 | super(byteBuffer); 28 | this.randomAccessFile = randomAccessFile; 29 | this.file = file; 30 | } 31 | 32 | @Override 33 | protected boolean isDirect() { 34 | return true; 35 | } 36 | 37 | @Override 38 | protected boolean isMapped() { 39 | return true; 40 | } 41 | 42 | @Override 43 | public void close() throws IOException { 44 | IOException e = null; 45 | 46 | // unmap 47 | try { 48 | Method method = FileChannelImpl.class.getDeclaredMethod("unmap", MappedByteBuffer.class); 49 | method.setAccessible(true); 50 | method.invoke(FileChannelImpl.class, (MappedByteBuffer)super.byteBuffer); 51 | } catch (NoSuchMethodException noSuchMethodException) { 52 | LOG.error("Failed to get the reflect unmap method.", noSuchMethodException); 53 | e = new IOException("Failed to release the mapped buffer.", noSuchMethodException); 54 | } catch (InvocationTargetException invocationTargetException) { 55 | LOG.error("Failed to invoke the reflect unmap method.", invocationTargetException); 56 | throw new IOException("Failed to release the mapped buffer.", invocationTargetException); 57 | } catch (IllegalAccessException illegalAccessException) { 58 | LOG.error("Failed to access the reflect unmap method.", illegalAccessException); 59 | throw new IOException("Failed to release the mapped buffer.", illegalAccessException); 60 | } 61 | 62 | // Memory must be unmapped successfully before files can be deleted. 63 | // Close the random access file. 64 | try { 65 | if (null != this.randomAccessFile) { 66 | this.randomAccessFile.close(); 67 | } 68 | } catch (IOException randomAccessFileClosedException) { 69 | LOG.error("Failed to close the random access file.", randomAccessFileClosedException); 70 | e = randomAccessFileClosedException; 71 | } 72 | 73 | // Delete the disk file to release the resource. 74 | if (null != this.file && this.file.exists()) { 75 | if (!this.file.delete()) { 76 | LOG.warn("Failed to clean up the temporary file: [{}].", 77 | this.file); 78 | } 79 | } 80 | 81 | // Call super close to release the resource of the base class. 82 | try { 83 | super.close(); 84 | } catch (IOException superClosedException) { 85 | // XXX exception chain of responsibility 86 | e = superClosedException; 87 | } 88 | 89 | // Finally, throw the error that occurred in the process. 90 | if (null != e) { 91 | throw e; 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNMappedBufferFactory.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import org.apache.hadoop.fs.CosNUtils; 4 | import org.apache.hadoop.fs.cosn.Constants; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.io.File; 9 | import java.io.IOException; 10 | import java.io.RandomAccessFile; 11 | import java.nio.MappedByteBuffer; 12 | import java.nio.channels.FileChannel; 13 | import java.util.ArrayList; 14 | import java.util.List; 15 | import java.util.concurrent.atomic.AtomicInteger; 16 | 17 | 18 | public class CosNMappedBufferFactory implements CosNBufferFactory { 19 | private static final Logger LOG = 20 | LoggerFactory.getLogger(CosNMappedBufferFactory.class); 21 | 22 | private final List tmpDirs = new ArrayList<>(); 23 | private final boolean deleteOnExit; 24 | 25 | public CosNMappedBufferFactory(String[] tmpDirList, boolean deleteOnExit) throws IOException { 26 | for (String tmpDir : tmpDirList) { 27 | File createDir = CosNMappedBufferFactory.createDir(tmpDir); 28 | tmpDirs.add(createDir); 29 | } 30 | this.deleteOnExit = deleteOnExit; 31 | } 32 | 33 | private static File createDir(String tmpDir) throws IOException { 34 | File file = new File(tmpDir); 35 | if (!file.exists()) { 36 | LOG.debug("Buffer dir: [{}] does not exist. Create it first.", 37 | file); 38 | if (file.mkdirs()) { 39 | if (!file.setWritable(true, false) || !file.setReadable(true, false) 40 | || !file.setExecutable(true, false)) { 41 | LOG.warn("Set the buffer dir: [{}]'s permission [writable," 42 | + "readable, executable] failed.", file); 43 | } 44 | LOG.debug("Buffer dir: [{}] is created successfully.", 45 | file.getAbsolutePath()); 46 | } else { 47 | // Once again, check if it has been created successfully. 48 | // Prevent problems created by multiple processes at the same 49 | // time. 50 | if (!file.exists()) { 51 | throw new IOException("buffer dir:" + file.getAbsolutePath() 52 | + " is created unsuccessfully"); 53 | } 54 | } 55 | } else { 56 | LOG.debug("buffer dir: {} already exists.", 57 | file.getAbsolutePath()); 58 | // Check whether you have read and write permissions for the directory during initialization 59 | if (!CosNUtils.checkDirectoryRWPermissions(tmpDir)) { 60 | String exceptionMsg = String.format("The tmp dir does not have read or write permissions." + 61 | "dir: %s", tmpDir); 62 | throw new IllegalArgumentException(exceptionMsg); 63 | } 64 | } 65 | 66 | return file; 67 | } 68 | 69 | @Override 70 | public CosNByteBuffer create(int size) { 71 | return this.create(Constants.BLOCK_TMP_FILE_PREFIX, 72 | Constants.BLOCK_TMP_FILE_SUFFIX, size); 73 | } 74 | 75 | private final AtomicInteger currentIndex = new AtomicInteger(); 76 | 77 | private File getTmpDir() { 78 | return tmpDirs.get(Math.abs(currentIndex.getAndIncrement() % tmpDirs.size())); 79 | } 80 | 81 | public CosNByteBuffer create(String prefix, String suffix, int size) { 82 | File tmpDir = getTmpDir(); 83 | if (null == tmpDir) { 84 | LOG.error("The tmp dir is null. no mapped buffer will be created."); 85 | return null; 86 | } 87 | 88 | if (!tmpDir.exists()) { 89 | LOG.warn("The tmp dir does not exist."); 90 | // try to create the tmp directory. 91 | try { 92 | CosNMappedBufferFactory.createDir(tmpDir.getAbsolutePath()); 93 | } catch (IOException e) { 94 | LOG.error("Try to create the tmp dir [{}] failed.", tmpDir.getAbsolutePath(), e); 95 | return null; 96 | } 97 | } 98 | 99 | try { 100 | File tmpFile = File.createTempFile( 101 | Constants.BLOCK_TMP_FILE_PREFIX, 102 | Constants.BLOCK_TMP_FILE_SUFFIX, 103 | tmpDir 104 | ); 105 | 106 | if (this.deleteOnExit) { 107 | tmpFile.deleteOnExit(); 108 | } 109 | RandomAccessFile randomAccessFile = new RandomAccessFile(tmpFile, 110 | "rw"); 111 | randomAccessFile.setLength(size); 112 | MappedByteBuffer buf = 113 | randomAccessFile.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, size); 114 | return (null != buf) ? new CosNMappedBuffer(buf, randomAccessFile, tmpFile) : null; 115 | } catch (IOException e) { 116 | LOG.error("Create tmp file failed. Tmp dir: {}", tmpDir, e); 117 | return null; 118 | } 119 | } 120 | 121 | @Override 122 | public void release(CosNByteBuffer cosNByteBuffer) { 123 | if (null == cosNByteBuffer) { 124 | LOG.debug("The buffer returned is null. Ignore it."); 125 | return; 126 | } 127 | 128 | try { 129 | cosNByteBuffer.close(); 130 | } catch (IOException e) { 131 | LOG.error("Release the mapped byte buffer failed.", e); 132 | } 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNNonDirectBuffer.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import java.nio.ByteBuffer; 4 | 5 | /** 6 | * The direct buffer based on the JVM heap memory. 7 | */ 8 | class CosNNonDirectBuffer extends CosNByteBuffer { 9 | 10 | public CosNNonDirectBuffer(ByteBuffer byteBuffer) { 11 | super(byteBuffer); 12 | } 13 | 14 | @Override 15 | protected boolean isDirect() { 16 | return false; 17 | } 18 | 19 | @Override 20 | protected boolean isMapped() { 21 | return false; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNNonDirectBufferFactory.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import java.io.IOException; 4 | import java.nio.ByteBuffer; 5 | 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | public class CosNNonDirectBufferFactory implements CosNBufferFactory { 10 | private static final Logger LOG = 11 | LoggerFactory.getLogger(CosNNonDirectBufferFactory.class); 12 | 13 | @Override 14 | public CosNByteBuffer create(int size) { 15 | ByteBuffer byteBuffer = ByteBuffer.allocate(size); 16 | return new CosNNonDirectBuffer(byteBuffer); 17 | } 18 | 19 | @Override 20 | public void release(CosNByteBuffer cosNByteBuffer) { 21 | if (null == cosNByteBuffer) { 22 | LOG.debug("The buffer returned is null. Ignore it."); 23 | return; 24 | } 25 | 26 | try { 27 | cosNByteBuffer.close(); 28 | } catch (IOException e) { 29 | LOG.error("Release the non direct buffer failed.", e); 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNRandomAccessMappedBuffer.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import org.apache.hadoop.fs.cosn.buffer.CosNByteBuffer; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | import sun.nio.ch.FileChannelImpl; 7 | 8 | import java.io.File; 9 | import java.io.IOException; 10 | import java.io.RandomAccessFile; 11 | import java.lang.reflect.InvocationTargetException; 12 | import java.lang.reflect.Method; 13 | import java.nio.ByteBuffer; 14 | import java.nio.MappedByteBuffer; 15 | 16 | /** 17 | * 专用于随机写的本地缓存 18 | */ 19 | public class CosNRandomAccessMappedBuffer extends CosNByteBuffer { 20 | private static final Logger LOG = LoggerFactory.getLogger(CosNRandomAccessMappedBuffer.class); 21 | 22 | // 有效的可读长度 23 | private final RandomAccessFile randomAccessFile; 24 | private final File file; 25 | 26 | private int maxReadablePosition; 27 | 28 | public CosNRandomAccessMappedBuffer(ByteBuffer byteBuffer, 29 | RandomAccessFile randomAccessFile, File file) { 30 | super(byteBuffer); 31 | this.randomAccessFile = randomAccessFile; 32 | this.file = file; 33 | this.maxReadablePosition = 0; 34 | } 35 | 36 | @Override 37 | public CosNByteBuffer put(byte b) throws IOException { 38 | super.put(b); 39 | this.maxReadablePosition = (Math.max(super.nextWritePosition, this.maxReadablePosition)); 40 | return this; 41 | } 42 | 43 | @Override 44 | public CosNByteBuffer put(byte[] src, int offset, int length) throws IOException { 45 | super.put(src, offset, length); 46 | this.maxReadablePosition = (Math.max(super.nextWritePosition, this.maxReadablePosition)); 47 | return this; 48 | } 49 | 50 | @Override 51 | public CosNByteBuffer flipRead() { 52 | super.limit(this.maxReadablePosition); 53 | super.position(0); 54 | return this; 55 | } 56 | 57 | public int getMaxReadablePosition() { 58 | return this.maxReadablePosition; 59 | } 60 | 61 | @Override 62 | public CosNByteBuffer clear() { 63 | super.clear(); 64 | this.maxReadablePosition = 0; 65 | return this; 66 | } 67 | 68 | @Override 69 | public void close() throws IOException { 70 | IOException ioException = null; 71 | 72 | try { 73 | Method method = FileChannelImpl.class.getDeclaredMethod("unmap", MappedByteBuffer.class); 74 | method.setAccessible(true); 75 | method.invoke(FileChannelImpl.class, (MappedByteBuffer) super.byteBuffer); 76 | } catch (InvocationTargetException invocationTargetException) { 77 | LOG.error("Failed to invoke the reflect unmap method.", invocationTargetException); 78 | throw new IOException("Failed to release the mapped buffer.", invocationTargetException); 79 | } catch (NoSuchMethodException noSuchMethodException) { 80 | LOG.error("Failed to get the reflect unmap method.", noSuchMethodException); 81 | ioException = new IOException("Failed to release the mapped buffer.", noSuchMethodException); 82 | } catch (IllegalAccessException illegalAccessException) { 83 | LOG.error("Failed to access the reflect unmap method.", illegalAccessException); 84 | throw new IOException("Failed to release the mapped buffer.", illegalAccessException); 85 | } 86 | 87 | try { 88 | if (null != this.randomAccessFile) { 89 | this.randomAccessFile.close(); 90 | } 91 | } catch (IOException randomAccessFileClosedException) { 92 | LOG.error("Failed to close the random access file.", randomAccessFileClosedException); 93 | ioException = randomAccessFileClosedException; 94 | } 95 | if (null != this.file && this.file.exists()) { 96 | if (!this.file.delete()) { 97 | LOG.warn("Failed to clean up the temporary file: [{}].", 98 | this.file); 99 | } 100 | } 101 | 102 | try { 103 | super.close(); 104 | } catch (IOException superClosedException) { 105 | // XXX exception chain of responsibility 106 | ioException = superClosedException; 107 | } 108 | 109 | if (null != ioException) { 110 | throw ioException; 111 | } 112 | } 113 | 114 | @Override 115 | protected boolean isDirect() { 116 | return true; 117 | } 118 | 119 | @Override 120 | protected boolean isMapped() { 121 | return true; 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/buffer/CosNRandomAccessMappedBufferFactory.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.buffer; 2 | 3 | import com.google.common.base.Preconditions; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | import java.io.File; 8 | import java.io.IOException; 9 | import java.io.RandomAccessFile; 10 | import java.nio.MappedByteBuffer; 11 | import java.nio.channels.FileChannel; 12 | 13 | /** 14 | * 专用于创建随机写的 MappedBuffer 的工厂方法。 15 | */ 16 | public class CosNRandomAccessMappedBufferFactory implements CosNBufferFactory { 17 | private static final Logger LOG = LoggerFactory.getLogger(CosNRandomAccessMappedBufferFactory.class); 18 | 19 | private final File cacheDir; 20 | private final boolean deleteOnExit; 21 | 22 | public CosNRandomAccessMappedBufferFactory(String cacheDir) throws IOException { 23 | this(cacheDir, false); 24 | } 25 | 26 | public CosNRandomAccessMappedBufferFactory(String cacheDir, boolean deleteOnExit) throws IOException { 27 | this.cacheDir = CosNRandomAccessMappedBufferFactory.createDir(cacheDir); 28 | this.deleteOnExit = deleteOnExit; 29 | } 30 | 31 | public CosNRandomAccessMappedBuffer create(String randomAccessFileName, int size) throws IOException { 32 | Preconditions.checkNotNull(randomAccessFileName, "randomAccessFileName"); 33 | Preconditions.checkArgument(size > 0, "The size should be a positive integer."); 34 | 35 | if (null == this.cacheDir) { 36 | throw new IOException("The cache directory is not initialized. " + 37 | "No random access file can be created."); 38 | } 39 | 40 | if (!this.cacheDir.exists()) { 41 | LOG.warn("The cache directory dose not exist. Try to create it."); 42 | CosNRandomAccessMappedBufferFactory.createDir(this.cacheDir.getAbsolutePath()); 43 | } 44 | 45 | // 创建指定大小的 RandomAccessFile 46 | File tmpFile = File.createTempFile(randomAccessFileName, ".cache", this.cacheDir); 47 | if (this.deleteOnExit) { 48 | tmpFile.deleteOnExit(); 49 | } 50 | RandomAccessFile randomAccessFile = new RandomAccessFile(tmpFile, "rw"); 51 | randomAccessFile.setLength(size); 52 | MappedByteBuffer buf = randomAccessFile.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, size); 53 | return (null != buf) ? new CosNRandomAccessMappedBuffer(buf, randomAccessFile, tmpFile) : null; 54 | } 55 | 56 | @Override 57 | public CosNByteBuffer create(int size) { 58 | throw new UnsupportedOperationException("the method is not available. " + 59 | "a filename should be specified."); 60 | } 61 | 62 | @Override 63 | public void release(CosNByteBuffer cosNByteBuffer) { 64 | if (null == cosNByteBuffer) { 65 | LOG.warn("The Null buffer can not be released. Ignore it."); 66 | return; 67 | } 68 | 69 | try { 70 | cosNByteBuffer.close(); 71 | } catch (IOException e) { 72 | LOG.error("Failed to release the buffer [{}].", cosNByteBuffer); 73 | } 74 | } 75 | 76 | private static File createDir(String cacheDir) throws IOException { 77 | File cacheDirFile = new File(cacheDir); 78 | if (!cacheDirFile.exists()) { 79 | LOG.info("The cache directory [{}] does not exist. Create it first.", cacheDirFile); 80 | if (cacheDirFile.mkdirs()) { 81 | if (!cacheDirFile.setWritable(true, false) 82 | || !cacheDirFile.setReadable(true, false) 83 | || !cacheDirFile.setExecutable(true, false)) { 84 | LOG.warn("Set the buffer dir: [{}]'s permission [writable," 85 | + "readable, executable] failed.", cacheDirFile); 86 | } 87 | LOG.info("Create the cache directory [{}] successfully.", cacheDirFile); 88 | } else { 89 | if (!cacheDirFile.exists()) { 90 | throw new IOException(String.format("Failed to create the cache directory [%s].", 91 | cacheDirFile)); 92 | } 93 | } 94 | } else { 95 | LOG.info("The cache directory [{}] already exists.", cacheDirFile); 96 | } 97 | return cacheDirFile; 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/common/Pair.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.common; 2 | 3 | import javax.annotation.Nullable; 4 | import java.util.Objects; 5 | 6 | /** 7 | * A pair of values. 8 | * 9 | * @param 10 | * @param 11 | */ 12 | public class Pair { 13 | private First first; 14 | private Second second; 15 | 16 | public Pair() { 17 | } 18 | 19 | public Pair(First first, Second second) { 20 | this.first = first; 21 | this.second = second; 22 | } 23 | 24 | @Nullable 25 | public First getFirst() { 26 | return first; 27 | } 28 | 29 | @Nullable 30 | public Second getSecond() { 31 | return second; 32 | } 33 | 34 | public void setFirst(@Nullable First first) { 35 | this.first = first; 36 | } 37 | 38 | public void setSecond(@Nullable Second second) { 39 | this.second = second; 40 | } 41 | 42 | @Override 43 | public boolean equals(Object o) { 44 | if (!(o instanceof Pair)) return false; 45 | Pair pair = (Pair) o; 46 | return Objects.equals(first, pair.first) && Objects.equals(second, pair.second); 47 | } 48 | 49 | @Override 50 | public int hashCode() { 51 | return Objects.hash(first, second); 52 | } 53 | 54 | @Override 55 | public String toString() { 56 | return "Pair{" + 57 | "first=" + first + 58 | ", second=" + second + 59 | '}'; 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/multipart/upload/UploadPart.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.multipart.upload; 2 | 3 | import com.qcloud.cos.thirdparty.org.apache.commons.codec.binary.Hex; 4 | import org.apache.hadoop.fs.cosn.MD5Utils; 5 | import org.apache.hadoop.fs.cosn.buffer.CosNByteBuffer; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.io.IOException; 10 | import java.security.NoSuchAlgorithmException; 11 | 12 | public final class UploadPart { 13 | private static final Logger LOG = LoggerFactory.getLogger(UploadPart.class); 14 | private final int partNumber; 15 | private final CosNByteBuffer cosNByteBuffer; 16 | private final byte[] md5Hash; 17 | private final boolean isLast; 18 | 19 | public UploadPart(int partNumber, CosNByteBuffer cosNByteBuffer) { 20 | this(partNumber, cosNByteBuffer, false); 21 | } 22 | 23 | public UploadPart(int partNumber, CosNByteBuffer cosNByteBuffer, boolean isLast) { 24 | this.partNumber = partNumber; 25 | this.cosNByteBuffer = cosNByteBuffer; 26 | // 计算MD5 27 | byte[] md5Hash = null; 28 | try { 29 | md5Hash = MD5Utils.calculate(cosNByteBuffer); 30 | } catch (NoSuchAlgorithmException | IOException exception) { 31 | LOG.warn("Failed to calculate the md5Hash for the part [{}].", 32 | partNumber, exception); 33 | } 34 | this.md5Hash = md5Hash; 35 | this.isLast = isLast; 36 | } 37 | 38 | public UploadPart(int partNumber, CosNByteBuffer cosNByteBuffer, byte[] md5Hash, boolean isLast) { 39 | this.partNumber = partNumber; 40 | this.cosNByteBuffer = cosNByteBuffer; 41 | this.md5Hash = md5Hash; 42 | this.isLast = isLast; 43 | } 44 | 45 | public int getPartNumber() { 46 | return this.partNumber; 47 | } 48 | 49 | public CosNByteBuffer getCosNByteBuffer() { 50 | return this.cosNByteBuffer; 51 | } 52 | 53 | public long getPartSize() { 54 | return this.cosNByteBuffer.remaining(); 55 | } 56 | 57 | public byte[] getMd5Hash() { 58 | return this.md5Hash; 59 | } 60 | 61 | public boolean isLast() { 62 | return isLast; 63 | } 64 | 65 | @Override 66 | public String toString() { 67 | return String.format("UploadPart{partNumber:%d, partSize: %d, md5Hash: %s, isLast: %b}", this.partNumber, this.cosNByteBuffer.flipRead().remaining(), (this.md5Hash != null ? Hex.encodeHexString(this.md5Hash) : "NULL"), this.isLast); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/fs/cosn/multipart/upload/UploadPartCopy.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn.multipart.upload; 2 | 3 | public final class UploadPartCopy { 4 | private final String srcKey; 5 | 6 | private final String destKey; 7 | private final int partNumber; 8 | private final long firstByte; 9 | private final long lastByte; 10 | 11 | public UploadPartCopy(String srcKey, String destKey, int partNumber, long firstByte, long lastByte) { 12 | this.srcKey = srcKey; 13 | this.destKey = destKey; 14 | this.partNumber = partNumber; 15 | this.firstByte = firstByte; 16 | this.lastByte = lastByte; 17 | } 18 | 19 | public String getSrcKey() { 20 | return srcKey; 21 | } 22 | 23 | public String getDestKey() { 24 | return destKey; 25 | } 26 | 27 | public int getPartNumber() { 28 | return partNumber; 29 | } 30 | 31 | public long getFirstByte() { 32 | return firstByte; 33 | } 34 | 35 | public long getLastByte() { 36 | return lastByte; 37 | } 38 | 39 | @Override 40 | public String toString() { 41 | return "UploadPartCopy{" + 42 | "srcKey='" + srcKey + '\'' + 43 | ", partNumber=" + partNumber + 44 | ", firstByte=" + firstByte + 45 | ", lastByte=" + lastByte + 46 | '}'; 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/CosNFileSystemTestBase.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.conf.Configuration; 4 | import org.junit.After; 5 | import org.junit.AfterClass; 6 | import org.junit.Before; 7 | import org.junit.BeforeClass; 8 | 9 | import java.io.IOException; 10 | import java.util.Arrays; 11 | import java.util.Random; 12 | import java.security.SecureRandom; 13 | 14 | public class CosNFileSystemTestBase extends CosNFileSystemTestWithTimeout { 15 | protected static Configuration configuration; 16 | protected static FileSystem fs; 17 | static Random random = new Random(); 18 | 19 | protected static final Path unittestDirPath = new Path("/unittest-dir" + random.nextInt()); 20 | protected final Path testDirPath = new Path(unittestDirPath, "test-dir"); 21 | protected final Path testFilePath = new Path(unittestDirPath, "test-file"); 22 | 23 | @BeforeClass 24 | public static void beforeClass() throws IOException { 25 | String configFilePath = System.getProperty("config.file"); 26 | configuration = new Configuration(); 27 | // 初始化文件系统对象,因为 core-site.xml 是在 test 的 resource 下面,因此应该能够正确加载到。 28 | if (configFilePath != null) { 29 | // 使用 addResource 方法加载配置文件 30 | configuration.addResource(new Path(configFilePath)); 31 | } 32 | // 考虑到是针对 CosNFileSystem 测试,因此强制设置为 CosNFileSystem。 33 | configuration.set("fs.cosn.impl", "org.apache.hadoop.fs.CosNFileSystem"); 34 | fs = FileSystem.get(configuration); 35 | 36 | if (null != fs && !fs.exists(unittestDirPath)) { 37 | fs.mkdirs(unittestDirPath); 38 | } 39 | } 40 | 41 | @AfterClass 42 | public static void afterClass() throws IOException { 43 | if (null != fs && fs.exists(unittestDirPath)) { 44 | fs.delete(unittestDirPath, true); 45 | } 46 | if (null != fs) { 47 | fs.close(); 48 | } 49 | } 50 | 51 | @Before 52 | public void before() throws IOException { 53 | if (!fs.exists(testDirPath)) { 54 | fs.mkdirs(testDirPath); 55 | } 56 | if (!fs.exists(testFilePath)) { 57 | try (FSDataOutputStream fsDataOutputStream = fs.create(testFilePath)) { 58 | fsDataOutputStream.write("Hello, World!".getBytes()); 59 | fsDataOutputStream.write("\n".getBytes()); 60 | fsDataOutputStream.write("Hello, COS!".getBytes()); 61 | } 62 | } 63 | } 64 | 65 | @After 66 | public void after() throws IOException { 67 | if (fs.exists(testFilePath)) { 68 | fs.delete(testFilePath, true); 69 | } 70 | if (fs.exists(testDirPath)) { 71 | fs.delete(testDirPath, true); 72 | } 73 | } 74 | 75 | /** 76 | * Return a path bonded to this method name, unique to this fork during 77 | * parallel execution. 78 | * 79 | * @return a method name unique to (fork, method). 80 | * @throws IOException IO problems 81 | */ 82 | protected Path methodPath() throws IOException { 83 | return new Path(unittestDirPath, methodName.getMethodName()); 84 | } 85 | 86 | /* 87 | * Helper method that creates test data of size provided by the 88 | * "size" parameter. 89 | */ 90 | protected static byte[] getTestData(int size) { 91 | byte[] testData = new byte[size]; 92 | SecureRandom secureRandom = new SecureRandom(); 93 | secureRandom.nextBytes(testData); // 生成随机字节 94 | return testData; 95 | } 96 | 97 | // Helper method to create file and write fileSize bytes of data on it. 98 | protected byte[] createBaseFileWithData(int fileSize, Path testPath) throws Throwable { 99 | 100 | try (FSDataOutputStream createStream = fs.create(testPath)) { 101 | byte[] fileData = null; 102 | 103 | if (fileSize != 0) { 104 | fileData = getTestData(fileSize); 105 | createStream.write(fileData); 106 | } 107 | return fileData; 108 | } 109 | } 110 | 111 | /* 112 | * Helper method to verify a testFile data. 113 | */ 114 | protected boolean verifyFile(byte[] testData, Path testFile) { 115 | 116 | try (FSDataInputStream srcStream = fs.open(testFile)) { 117 | 118 | int baseBufferSize = 2048; 119 | int testDataSize = testData.length; 120 | int testDataIndex = 0; 121 | 122 | while (testDataSize > baseBufferSize) { 123 | 124 | if (!verifyFileData(baseBufferSize, testData, testDataIndex, srcStream)) { 125 | return false; 126 | } 127 | testDataIndex += baseBufferSize; 128 | testDataSize -= baseBufferSize; 129 | } 130 | 131 | if (!verifyFileData(testDataSize, testData, testDataIndex, srcStream)) { 132 | return false; 133 | } 134 | 135 | return true; 136 | } catch (Exception ex) { 137 | return false; 138 | } 139 | } 140 | 141 | /* 142 | * Helper method to verify a file data equal to "dataLength" parameter 143 | */ 144 | protected boolean verifyFileData(int dataLength, byte[] testData, int testDataIndex, FSDataInputStream srcStream) { 145 | 146 | try { 147 | 148 | byte[] fileBuffer = new byte[dataLength]; 149 | byte[] testDataBuffer = new byte[dataLength]; 150 | 151 | int fileBytesRead = srcStream.read(fileBuffer); 152 | 153 | if (fileBytesRead < dataLength) { 154 | return false; 155 | } 156 | 157 | System.arraycopy(testData, testDataIndex, testDataBuffer, 0, dataLength); 158 | 159 | if (!Arrays.equals(fileBuffer, testDataBuffer)) { 160 | return false; 161 | } 162 | 163 | return true; 164 | 165 | } catch (Exception ex) { 166 | return false; 167 | } 168 | 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/CosNFileSystemTestWithTimeout.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.junit.Assert; 4 | import org.junit.Before; 5 | import org.junit.BeforeClass; 6 | import org.junit.Rule; 7 | import org.junit.rules.TestName; 8 | import org.junit.rules.Timeout; 9 | 10 | import java.util.concurrent.TimeUnit; 11 | 12 | public class CosNFileSystemTestWithTimeout extends Assert { 13 | /** 14 | * The name of the current method. 15 | */ 16 | @Rule 17 | public TestName methodName = new TestName(); 18 | /** 19 | * Set the timeout for every test. 20 | * This is driven by the value returned by {@link #getTestTimeoutMillis()}. 21 | */ 22 | @Rule 23 | public Timeout testTimeout = new Timeout(getTestTimeoutMillis(), TimeUnit.MILLISECONDS); 24 | 25 | /** 26 | * Name the junit thread for the class. This will overridden 27 | * before the individual test methods are run. 28 | */ 29 | @BeforeClass 30 | public static void nameTestThread() { 31 | Thread.currentThread().setName("JUnit"); 32 | } 33 | 34 | /** 35 | * Name the thread to the current test method. 36 | */ 37 | @Before 38 | public void nameThread() { 39 | Thread.currentThread().setName("JUnit-" + methodName.getMethodName()); 40 | } 41 | 42 | /** 43 | * Override point: the test timeout in milliseconds. 44 | * @return a timeout in milliseconds 45 | */ 46 | protected int getTestTimeoutMillis() { 47 | return 60 * 10 * 1000; 48 | } 49 | 50 | } 51 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemAppend.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.fs.cosn.Unit; 4 | import org.junit.Before; 5 | import org.junit.Test; 6 | 7 | import java.io.IOException; 8 | 9 | public class ITestCosNFileSystemAppend extends CosNFileSystemTestBase { 10 | 11 | private Path testPath; 12 | 13 | @Before 14 | public void before() throws IOException { 15 | super.before(); 16 | testPath = methodPath(); 17 | } 18 | 19 | @Test 20 | public void testSingleAppend() throws Throwable { 21 | FSDataOutputStream appendStream = null; 22 | try { 23 | int baseDataSize = 50; 24 | byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath); 25 | 26 | int appendDataSize = 20; 27 | byte[] appendDataBuffer = getTestData(appendDataSize); 28 | appendStream = fs.append(testPath, 10); 29 | appendStream.write(appendDataBuffer); 30 | appendStream.close(); 31 | byte[] testData = new byte[baseDataSize + appendDataSize]; 32 | System.arraycopy(baseDataBuffer, 0, testData, 0, baseDataSize); 33 | System.arraycopy(appendDataBuffer, 0, testData, baseDataSize, appendDataSize); 34 | 35 | assertTrue(verifyFile(testData, testPath)); 36 | } finally { 37 | if (appendStream != null) { 38 | appendStream.close(); 39 | } 40 | } 41 | } 42 | 43 | /* 44 | * Test case to verify append to an empty file. 45 | */ 46 | @Test 47 | public void testSingleAppendOnEmptyFile() throws Throwable { 48 | FSDataOutputStream appendStream = null; 49 | 50 | try { 51 | createBaseFileWithData(0, testPath); 52 | 53 | int appendDataSize = 20; 54 | byte[] appendDataBuffer = getTestData(appendDataSize); 55 | appendStream = fs.append(testPath, 10); 56 | appendStream.write(appendDataBuffer); 57 | appendStream.close(); 58 | 59 | assertTrue(verifyFile(appendDataBuffer, testPath)); 60 | } finally { 61 | if (appendStream != null) { 62 | appendStream.close(); 63 | } 64 | } 65 | } 66 | 67 | /* 68 | * Tests to verify multiple appends on a Blob. 69 | */ 70 | @Test 71 | public void testMultipleAppends() throws Throwable { 72 | int baseDataSize = 50; 73 | byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath); 74 | 75 | int appendDataSize = 100; 76 | int targetAppendCount = 5; 77 | byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)]; 78 | int testDataIndex = 0; 79 | System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize); 80 | testDataIndex += baseDataSize; 81 | 82 | int appendCount = 0; 83 | 84 | FSDataOutputStream appendStream = null; 85 | 86 | try { 87 | while (appendCount < targetAppendCount) { 88 | 89 | byte[] appendDataBuffer = getTestData(appendDataSize); 90 | appendStream = fs.append(testPath, 30); 91 | appendStream.write(appendDataBuffer); 92 | appendStream.close(); 93 | 94 | System.arraycopy(appendDataBuffer, 0, testData, testDataIndex, appendDataSize); 95 | testDataIndex += appendDataSize; 96 | appendCount++; 97 | } 98 | 99 | assertTrue(verifyFile(testData, testPath)); 100 | 101 | } finally { 102 | if (appendStream != null) { 103 | appendStream.close(); 104 | } 105 | } 106 | } 107 | 108 | /* 109 | * Test to verify we multiple appends on the same stream. 110 | */ 111 | @Test 112 | public void testMultipleAppendsOnSameStream() throws Throwable { 113 | int baseDataSize = 50; 114 | byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath); 115 | int appendDataSize = 100; 116 | int targetAppendCount = 3; 117 | byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)]; 118 | int testDataIndex = 0; 119 | System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize); 120 | testDataIndex += baseDataSize; 121 | int appendCount = 0; 122 | 123 | FSDataOutputStream appendStream = null; 124 | 125 | try { 126 | 127 | while (appendCount < targetAppendCount) { 128 | 129 | appendStream = fs.append(testPath, 50); 130 | 131 | int singleAppendChunkSize = 20; 132 | int appendRunSize = 0; 133 | while (appendRunSize < appendDataSize) { 134 | 135 | byte[] appendDataBuffer = getTestData(singleAppendChunkSize); 136 | appendStream.write(appendDataBuffer); 137 | System.arraycopy(appendDataBuffer, 0, testData, 138 | testDataIndex + appendRunSize, singleAppendChunkSize); 139 | 140 | appendRunSize += singleAppendChunkSize; 141 | } 142 | 143 | appendStream.close(); 144 | testDataIndex += appendDataSize; 145 | appendCount++; 146 | } 147 | 148 | assertTrue(verifyFile(testData, testPath)); 149 | } finally { 150 | if (appendStream != null) { 151 | appendStream.close(); 152 | } 153 | } 154 | } 155 | 156 | /* 157 | * Test to verify append big file. 158 | */ 159 | @Test 160 | public void testAppendsBigFile() throws Throwable { 161 | int baseDataSize = 50; 162 | byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath); 163 | int appendDataSize = (int) (8 * Unit.MB); // larger than part size 164 | int targetAppendCount = 2; 165 | byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)]; 166 | int testDataIndex = 0; 167 | System.arraycopy(baseDataBuffer, 0, testData, testDataIndex, baseDataSize); 168 | testDataIndex += baseDataSize; 169 | int appendCount = 0; 170 | 171 | FSDataOutputStream appendStream = null; 172 | 173 | try { 174 | 175 | while (appendCount < targetAppendCount) { 176 | appendStream = fs.append(testPath, 50); 177 | byte[] appendDataBuffer = getTestData(appendDataSize); 178 | appendStream.write(appendDataBuffer); 179 | System.arraycopy(appendDataBuffer, 0, testData, testDataIndex, appendDataSize); 180 | appendStream.close(); 181 | testDataIndex += appendDataSize; 182 | appendCount++; 183 | } 184 | 185 | assertTrue(verifyFile(testData, testPath)); 186 | } finally { 187 | if (appendStream != null) { 188 | appendStream.close(); 189 | } 190 | } 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemCreate.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.fs.cosn.Unit; 4 | import org.junit.Before; 5 | import org.junit.Test; 6 | 7 | import java.io.IOException; 8 | import java.util.Arrays; 9 | 10 | public class ITestCosNFileSystemCreate extends CosNFileSystemTestBase { 11 | private Path testPath; 12 | 13 | @Before 14 | public void before() throws IOException { 15 | super.before(); 16 | testPath = methodPath(); 17 | } 18 | 19 | @Test 20 | public void testSingleCreate() throws Throwable { 21 | long[] cases = new long[] {Unit.MB, 10 * Unit.MB}; 22 | for (long c : cases) { 23 | byte[] baseData = createBaseFileWithData((int) c, testPath); 24 | FSDataOutputStream outputStream = fs.create(testPath); 25 | outputStream.write(baseData); 26 | assertTrue(verifyFile(baseData, testPath)); 27 | } 28 | } 29 | 30 | @Test 31 | public void testNotAllowOverwriteCreate() throws Throwable { 32 | createBaseFileWithData(10, testPath); 33 | 34 | boolean actualException = false; 35 | byte[] testData = getTestData(10); 36 | try { 37 | FSDataOutputStream outputStream = fs.create(testPath, false); 38 | outputStream.write(testData); 39 | } catch (FileAlreadyExistsException fe) { 40 | actualException = true; 41 | } 42 | assertTrue(actualException); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemDelete.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import java.io.FileNotFoundException; 7 | import java.io.IOException; 8 | 9 | public class ITestCosNFileSystemDelete extends CosNFileSystemTestBase { 10 | private Path testPath; 11 | 12 | @Before 13 | public void before() throws IOException { 14 | super.before(); 15 | testPath = methodPath(); 16 | } 17 | 18 | @Test 19 | public void testDeleteFile() throws Throwable { 20 | createBaseFileWithData(10, testPath); 21 | assertNotEquals(null, fs.getFileStatus(testPath)); 22 | fs.delete(testPath, false); 23 | assertTrue(verifyDelete(testPath)); 24 | } 25 | 26 | @Test 27 | public void testDeleteEmptyDirectory() throws Throwable { 28 | fs.mkdirs(testPath); 29 | assertTrue(fs.getFileStatus(testPath).isDirectory()); 30 | fs.delete(testPath, false); 31 | assertTrue(verifyDelete(testPath)); 32 | } 33 | 34 | @Test 35 | public void testDeleteNotEmptyDirectory() throws Throwable { 36 | fs.mkdirs(testPath); 37 | assertTrue(fs.getFileStatus(testPath).isDirectory()); 38 | 39 | Path child = new Path(testPath, "child"); 40 | createBaseFileWithData(10, child); 41 | Path child1 = new Path(testPath, "child1"); 42 | createBaseFileWithData(10, child1); 43 | Path child2 = new Path(testPath, "child2"); 44 | createBaseFileWithData(10, child2); 45 | Path child3 = new Path(testPath, "child3"); 46 | createBaseFileWithData(10, child3); 47 | Path child4 = new Path(testPath, "child4"); 48 | createBaseFileWithData(10, child4); 49 | assertEquals(5, fs.listStatus(testPath).length); 50 | 51 | boolean actualThrown = false; 52 | try { 53 | fs.delete(testPath, false); 54 | } catch (IOException e) { 55 | actualThrown = true; 56 | } 57 | assertTrue(actualThrown); 58 | assertFalse(verifyDelete(testPath)); 59 | 60 | fs.delete(testPath, true); 61 | assertTrue(verifyDelete(testPath)); 62 | } 63 | 64 | private boolean verifyDelete(Path path) { 65 | boolean actualThrown = false; 66 | try { 67 | fs.getFileStatus(testPath); 68 | } catch (IOException e) { 69 | actualThrown = true; 70 | } 71 | 72 | return actualThrown; 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemGetFileStatus.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import java.io.IOException; 7 | 8 | public class ITestCosNFileSystemGetFileStatus extends CosNFileSystemTestBase { 9 | 10 | private Path testPath; 11 | 12 | @Before 13 | public void before() throws IOException { 14 | super.before(); 15 | testPath = methodPath(); 16 | } 17 | 18 | @Test 19 | public void testGetFileStatus() throws Throwable { 20 | createBaseFileWithData(10, testPath); 21 | FileStatus status = fs.getFileStatus(testPath); 22 | assertEquals(10, status.getLen()); 23 | assertTrue(status.isFile()); 24 | } 25 | 26 | @Test 27 | public void testGetDirStatus() throws Throwable { 28 | fs.mkdirs(testPath); 29 | Path child = new Path(testPath, "child"); 30 | createBaseFileWithData(10, child); 31 | FileStatus status = fs.getFileStatus(testPath); 32 | assertTrue(status.isDirectory()); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemListStatus.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import java.io.IOException; 7 | 8 | public class ITestCosNFileSystemListStatus extends CosNFileSystemTestBase { 9 | 10 | private Path testPath; 11 | 12 | @Before 13 | public void before() throws IOException { 14 | super.before(); 15 | testPath = methodPath(); 16 | } 17 | 18 | @Test 19 | public void testSingleFileListStatus() throws Throwable { 20 | createBaseFileWithData(10, testPath); 21 | FileStatus[] res = fs.listStatus(testPath); 22 | assertEquals(1, res.length); 23 | FileStatus fileStatus = fs.getFileStatus(testPath); 24 | // HCFS: the contents of a FileStatus of a child retrieved via listStatus() are equal to 25 | // those from a call of getFileStatus() to the same path. 26 | assertEquals(fileStatus, res[0]); 27 | } 28 | 29 | @Test 30 | public void testMultipleFileListStatus() throws Throwable { 31 | fs.mkdirs(testPath); 32 | Path child1 = new Path(testPath, "child1"); 33 | createBaseFileWithData(10, child1); 34 | Path child2 = new Path(testPath, "child2"); 35 | createBaseFileWithData(10, child2); 36 | FileStatus[] res = fs.listStatus(testPath); 37 | assertEquals(2, res.length); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemMkDirs.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import java.io.IOException; 7 | 8 | public class ITestCosNFileSystemMkDirs extends CosNFileSystemTestBase { 9 | 10 | private Path testPath; 11 | 12 | @Before 13 | public void before() throws IOException { 14 | super.before(); 15 | testPath = methodPath(); 16 | } 17 | 18 | @Test 19 | public void testMkdirSingle() throws Throwable { 20 | fs.mkdirs(testPath); 21 | assertTrue(fs.getFileStatus(testPath).isDirectory()); 22 | } 23 | 24 | /* 25 | * Test to verify create multiple subdirectories. 26 | */ 27 | @Test 28 | public void testMkdirChild() throws Throwable { 29 | Path child = new Path(testPath, "child"); 30 | fs.mkdirs(child); 31 | assertTrue(fs.getFileStatus(child).isDirectory()); 32 | assertTrue(fs.getFileStatus(testPath).isDirectory()); 33 | } 34 | 35 | /* 36 | * Test to verify create an existing directory. 37 | */ 38 | @Test 39 | public void testMkdirExistFilename() throws Throwable { 40 | createBaseFileWithData(10, testPath); 41 | boolean thrown = false; 42 | try { 43 | fs.mkdirs(testPath); 44 | } catch (FileAlreadyExistsException e) { 45 | thrown = true; 46 | } 47 | assertTrue(thrown); 48 | } 49 | 50 | @Test 51 | public void testCreateRoot() throws Throwable { 52 | fs.mkdirs(new Path("/")); 53 | assertTrue(fs.getFileStatus(new Path("/")).isDirectory()); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemRename.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import java.io.IOException; 7 | 8 | public class ITestCosNFileSystemRename extends CosNFileSystemTestBase { 9 | 10 | private Path testPath; 11 | 12 | @Before 13 | public void before() throws IOException { 14 | super.before(); 15 | testPath = methodPath(); 16 | fs.mkdirs(testPath); 17 | } 18 | 19 | @Test 20 | public void testRenameFile() throws Throwable { 21 | Path src = new Path(testPath,"src"); 22 | createBaseFileWithData(10, src); 23 | Path dest = new Path(testPath, "dest"); 24 | assertRenameOutcome(fs, src, dest, true); 25 | assertTrue(fs.getFileStatus(dest).isFile()); 26 | assertFalse(fs.exists(src)); 27 | } 28 | 29 | @Test 30 | public void testRenameWithPreExistingDestination() throws Throwable { 31 | Path src = new Path(testPath,"src"); 32 | createBaseFileWithData(10, src); 33 | Path dest = new Path(testPath, "dest"); 34 | createBaseFileWithData(10, dest); 35 | boolean exceptionThrown = false; 36 | try { 37 | assertRenameOutcome(fs, src, dest, false); 38 | } catch (FileAlreadyExistsException e) { 39 | exceptionThrown = true; 40 | } 41 | assertTrue("Expected FileAlreadyExistsException to be thrown", exceptionThrown); 42 | } 43 | 44 | @Test 45 | public void testRenameFileUnderDir() throws Throwable { 46 | Path src = new Path(testPath,"src"); 47 | fs.mkdirs(src); 48 | String filename = "file1"; 49 | Path file1 = new Path(src, filename); 50 | createBaseFileWithData(10, file1); 51 | 52 | Path dest = new Path(testPath, "dest"); 53 | assertRenameOutcome(fs, src, dest, true); 54 | FileStatus[] fileStatus = fs.listStatus(dest); 55 | assertNotNull("Null file status", fileStatus); 56 | FileStatus status = fileStatus[0]; 57 | assertEquals("Wrong filename in " + status, 58 | filename, status.getPath().getName()); 59 | } 60 | 61 | @Test 62 | public void testRenameDirectory() throws Throwable { 63 | Path test1 = new Path(testPath,"test1"); 64 | fs.mkdirs(test1); 65 | fs.mkdirs(new Path(testPath, "test1/test2")); 66 | Path test3 = new Path(testPath,"test1/test2/test3"); 67 | fs.mkdirs(test3); 68 | Path file1 = new Path(test3, "file1"); 69 | Path file2 = new Path(test3, "file2"); 70 | createBaseFileWithData(10, file1); 71 | createBaseFileWithData(10, file2); 72 | 73 | assertRenameOutcome(fs, test1, 74 | new Path(testPath, "test10"), true); 75 | assertTrue(fs.exists(new Path(testPath, "test10/test2/test3"))); 76 | assertFalse(fs.exists(test1)); 77 | assertEquals(2, fs.listStatus(new Path(testPath, "test10/test2/test3")).length); 78 | } 79 | 80 | @Test 81 | public void testRenameFileToExistDirectory() throws Throwable { 82 | Path test1 = new Path(testPath,"test1"); 83 | fs.mkdirs(test1); 84 | Path file1 = new Path(testPath, "file1"); 85 | createBaseFileWithData(10, file1); 86 | fs.rename(file1, test1); 87 | assertTrue(fs.exists(new Path(testPath, "test1/file1"))); 88 | } 89 | 90 | @Test 91 | public void testRenameDestDirParentNotExist() throws Throwable { 92 | Path test1 = new Path(testPath,"test1"); 93 | fs.mkdirs(test1); 94 | Path test2 = new Path(testPath,"test2/test3"); 95 | boolean thrown = false; 96 | try { 97 | fs.rename(test1, test2); 98 | } catch (IOException e) { 99 | thrown = true; 100 | } 101 | assertTrue(thrown); 102 | } 103 | 104 | public static void assertRenameOutcome(FileSystem fs, 105 | Path source, 106 | Path dest, 107 | boolean expectedResult) throws IOException { 108 | boolean result = fs.rename(source, dest); 109 | if (expectedResult != result) { 110 | fail(String.format("Expected rename(%s, %s) to return %b," 111 | + " but result was %b", source, dest, expectedResult, result)); 112 | } 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemSetAndGetXAttr.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import java.io.IOException; 7 | import java.util.Arrays; 8 | 9 | public class ITestCosNFileSystemSetAndGetXAttr extends CosNFileSystemTestBase { 10 | 11 | private Path testPath; 12 | 13 | @Before 14 | public void before() throws IOException { 15 | super.before(); 16 | testPath = methodPath(); 17 | } 18 | 19 | @Test 20 | public void testSetAndGetXAttr() throws Throwable { 21 | createBaseFileWithData(10, testPath); 22 | String attrName = "test"; 23 | byte[] attrValue = new byte[] { 1, 2, 3 }; 24 | fs.setXAttr(testPath, attrName, attrValue); 25 | assertEquals(Arrays.toString(attrValue), Arrays.toString(fs.getXAttr(testPath, attrName))); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemSymlink.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import java.io.IOException; 7 | import java.net.URISyntaxException; 8 | 9 | import static org.junit.Assert.assertEquals; 10 | import static org.junit.Assert.assertTrue; 11 | 12 | public class ITestCosNFileSystemSymlink extends CosNFileSystemTestBase { 13 | private final Path testFileSymlinkPath = new Path(unittestDirPath,"test-symlink"); 14 | private final Path testDirSymlinkPath = new Path(unittestDirPath,"test-dir-symlink"); 15 | 16 | @Before 17 | public void before() throws IOException { 18 | configuration.setBoolean("fs.cosn.support_symlink.enabled", true); 19 | super.before(); 20 | // NOTE 这里需要保证 createSymlink 是可以成功的,所以需要先删除对应的软连接。 21 | if (fs.exists(testFileSymlinkPath)) { 22 | fs.delete(testFileSymlinkPath, true); 23 | } 24 | if (fs.exists(testDirSymlinkPath)) { 25 | fs.delete(testDirSymlinkPath, true); 26 | } 27 | } 28 | 29 | @Test 30 | public void supportsSymlink() { 31 | assertTrue(fs.supportsSymlinks()); 32 | } 33 | 34 | @Test 35 | public void createSymlink() throws IOException { 36 | // 这里需要保证 createSymlink 是打开的 37 | assert fs.supportsSymlinks(); 38 | // 创建一个指向文件的软连接 39 | fs.createSymlink(testFilePath, testFileSymlinkPath, false); 40 | // 验证软连接是否存在 41 | assert fs.getFileLinkStatus(testFileSymlinkPath).isSymlink(); 42 | 43 | // 创建一个指向目录的软连接 44 | fs.createSymlink(testDirPath, testDirSymlinkPath, false); 45 | // 验证软连接是否存在 46 | assert fs.getFileLinkStatus(testDirSymlinkPath).isSymlink(); 47 | assert fs.getFileStatus(testDirSymlinkPath).isSymlink(); 48 | } 49 | 50 | @Test 51 | public void getFileLinkStatus() throws IOException { 52 | // 这里需要保证 createSymlink 是打开的 53 | assert fs.supportsSymlinks(); 54 | // 创建一个指向文件的软连接 55 | fs.createSymlink(testFilePath, testFileSymlinkPath, false); 56 | // 验证软连接是否存在 57 | assert fs.getFileLinkStatus(testFileSymlinkPath).isSymlink(); 58 | // Hadoop Compatible FileSystem 语义要求软连接的FileStatus是软连接,而不是文件。 59 | assert fs.getFileStatus(testFileSymlinkPath).isSymlink(); 60 | 61 | // 创建一个指向目录的软连接 62 | fs.createSymlink(testDirPath, testDirSymlinkPath, false); 63 | // 验证软连接是否存在 64 | assert fs.getFileLinkStatus(testDirSymlinkPath).isSymlink(); 65 | // Hadoop Compatible FileSystem 语义要求软连接的FileStatus是软连接,而不是目录。 66 | assert fs.getFileStatus(testDirSymlinkPath).isSymlink(); 67 | } 68 | 69 | @Test 70 | public void getLinkTarget() throws IOException, URISyntaxException { 71 | // 这里需要保证 createSymlink 是打开的 72 | assert fs.supportsSymlinks(); 73 | // 创建一个指x向文件的软连接 74 | fs.createSymlink(testFilePath, testFileSymlinkPath, false); 75 | // 验证软连接是否存在 76 | assertEquals(testFilePath, new Path(fs.getLinkTarget(testFileSymlinkPath).toUri().getPath())); 77 | 78 | // 创建一个指向目录的软连接 79 | fs.createSymlink(testDirPath, testDirSymlinkPath, false); 80 | // 验证软连接是否存在 81 | assertEquals(testDirPath, new Path(fs.getLinkTarget(testDirSymlinkPath).toUri().getPath())); 82 | } 83 | 84 | @Test 85 | public void getFileStatus() throws IOException { 86 | assert fs.supportsSymlinks(); 87 | fs.createSymlink(testFilePath, testFileSymlinkPath, false); 88 | assert fs.getFileStatus(testFileSymlinkPath).isSymlink(); // 预期返回的是软连接的FileStatus 89 | // 关掉软连接支持 90 | boolean supportSymlink = fs.getConf().getBoolean(CosNConfigKeys.COSN_SUPPORT_SYMLINK_ENABLED, CosNConfigKeys.DEFAULT_COSN_SUPPORT_SYMLINK_ENABLED); 91 | fs.getConf().setBoolean(CosNConfigKeys.COSN_SUPPORT_SYMLINK_ENABLED, false); 92 | assert fs.getFileStatus(testFileSymlinkPath).isFile(); // 预期返回的是文件的FileStatus 93 | fs.getConf().setBoolean(CosNConfigKeys.COSN_SUPPORT_SYMLINK_ENABLED, supportSymlink); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/ITestCosNFileSystemTruncate.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs; 2 | 3 | import org.apache.hadoop.fs.cosn.Unit; 4 | import org.junit.Before; 5 | import org.junit.Test; 6 | 7 | import java.io.IOException; 8 | import java.util.Arrays; 9 | 10 | public class ITestCosNFileSystemTruncate extends CosNFileSystemTestBase { 11 | 12 | private Path testPath; 13 | 14 | @Before 15 | public void before() throws IOException { 16 | super.before(); 17 | testPath = methodPath(); 18 | } 19 | 20 | @Test 21 | public void testTruncate() throws Throwable { 22 | long[] cases = new long[] {Unit.MB, 10 * Unit.MB}; 23 | int newLength = (int) Unit.MB - 1; 24 | for (long c : cases) { 25 | byte[] baseData = createBaseFileWithData((int) c, testPath); 26 | fs.truncate(testPath, newLength); 27 | assertTrue(verifyFile(Arrays.copyOf(baseData, newLength), testPath)); 28 | } 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/fs/cosn/MemoryAllocatorTest.java: -------------------------------------------------------------------------------- 1 | package org.apache.hadoop.fs.cosn; 2 | 3 | import org.apache.hadoop.fs.cosn.MemoryAllocator.Memory; 4 | import org.junit.Test; 5 | 6 | import java.lang.reflect.Executable; 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | import java.util.concurrent.TimeUnit; 10 | 11 | import static org.junit.Assert.assertEquals; 12 | import static org.junit.Assert.assertTrue; 13 | 14 | public class MemoryAllocatorTest { 15 | 16 | 17 | @Test 18 | public void test1() throws InterruptedException, CosNOutOfMemoryException { 19 | final MemoryAllocator memoryAllocator = MemoryAllocator.Factory.create(300 * 1024 * 1024); 20 | List memory = new ArrayList<>(); 21 | for (int i = 0; i < 300; i++) { 22 | memory.add(memoryAllocator.allocate(1024 * 1024)); 23 | } 24 | 25 | long t1 = System.currentTimeMillis(); 26 | 27 | try { 28 | memoryAllocator.allocate(1024 * 1024, 1, TimeUnit.SECONDS); 29 | } catch (CosNOutOfMemoryException e) { 30 | // ignore 31 | } 32 | 33 | long t2 = System.currentTimeMillis(); 34 | assertTrue(t2 - t1 >= 1000 && t2 - t1 <= 1100); 35 | 36 | memory.remove(0).free(); 37 | memory.add(memoryAllocator.allocate(1024 * 1024, 1, TimeUnit.SECONDS)); 38 | 39 | 40 | memory.remove(0); 41 | try { 42 | memoryAllocator.allocate(1024 * 1024, 0, TimeUnit.SECONDS); 43 | } catch (CosNOutOfMemoryException e) { 44 | // ignore 45 | } 46 | 47 | System.gc(); 48 | System.runFinalization(); 49 | memory.add(memoryAllocator.allocate(1024 * 1024, 10, TimeUnit.MILLISECONDS)); 50 | 51 | assertEquals(memoryAllocator.getTotalBytes(), memoryAllocator.getAllocatedBytes()); 52 | 53 | memory.clear(); 54 | System.gc(); 55 | System.runFinalization(); 56 | Thread.sleep(100); 57 | assertEquals(0, memoryAllocator.getAllocatedBytes()); 58 | } 59 | 60 | @Test 61 | public void test2() { 62 | MemoryAllocator unboundedMemoryAllocator = MemoryAllocator.Factory.create(-1); 63 | assertEquals(-1, unboundedMemoryAllocator.getTotalBytes()); 64 | assertTrue(unboundedMemoryAllocator instanceof MemoryAllocator.UnboundedMemoryAllocator); 65 | 66 | 67 | MemoryAllocator boundedMemoryAllocator = MemoryAllocator.Factory.create(100); 68 | assertEquals(100, boundedMemoryAllocator.getTotalBytes()); 69 | assertTrue(boundedMemoryAllocator instanceof MemoryAllocator.BoundedMemoryAllocator); 70 | 71 | try { 72 | MemoryAllocator.Factory.create(0); 73 | } catch (IllegalArgumentException e) { 74 | } 75 | } 76 | 77 | 78 | } -------------------------------------------------------------------------------- /src/test/resources/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | fs.defaultFS 22 | 23 | 24 | 25 | 26 | fs.cosn.impl 27 | org.apache.hadoop.fs.CosFileSystem 28 | 29 | 30 | 31 | fs.AbstractFileSystem.cosn.impl 32 | org.apache.hadoop.fs.CosN 33 | 34 | 35 | 36 | fs.cosn.userinfo.secretId 37 | 38 | 39 | 40 | 41 | fs.cosn.userinfo.secretKey 42 | 43 | 44 | 45 | 46 | fs.cosn.bucket.region 47 | ap-guangzhou 48 | 49 | 50 | 51 | fs.cosn.tmp.dir 52 | 53 | 54 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | # log4j configuration used during build and unit tests 13 | 14 | log4j.rootLogger=INFO, file 15 | log4j.threshhold=ALL 16 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 17 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 18 | log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n 19 | --------------------------------------------------------------------------------