├── .gitmodules ├── LICENSE ├── README.md ├── libhdfs3 ├── .gitignore ├── CMake │ ├── CMakeTestCompileNestedException.cpp │ ├── CMakeTestCompileSteadyClock.cpp │ ├── CMakeTestCompileStrerror.cpp │ ├── CodeCoverage.cmake │ ├── FindBoost.cmake │ ├── FindCurl.cmake │ ├── FindGSasl.cmake │ ├── FindGoogleTest.cmake │ ├── FindKERBEROS.cmake │ ├── FindLibUUID.cmake │ ├── FindSSL.cmake │ ├── Functions.cmake │ ├── Options.cmake │ └── Platform.cmake ├── CMakeLists.txt ├── Makefile ├── Makefile.global.in ├── README.md ├── bootstrap ├── debian │ ├── .gitignore │ ├── build.sh │ ├── changelog.in │ ├── compat │ ├── control │ ├── copyright │ ├── libhdfs3-dev.dirs │ ├── libhdfs3-dev.install │ ├── libhdfs3-dev.lintian-overrides │ ├── libhdfs3.dirs │ ├── libhdfs3.install │ ├── libhdfs3.lintian-overrides │ ├── rules │ └── source │ │ └── format ├── mock │ ├── CMakeLists.txt │ ├── MockBufferedSocketReader.h │ ├── MockCryptoCodec.h │ ├── MockDatanode.h │ ├── MockFileSystemInter.h │ ├── MockHttpClient.h │ ├── MockKmsClientProvider.h │ ├── MockLeaseRenewer.h │ ├── MockNamenode.h │ ├── MockOperationCanceledCallback.h │ ├── MockPipeline.h │ ├── MockRpcChannel.h │ ├── MockRpcClient.h │ ├── MockRpcRemoteCall.h │ ├── MockSockCall.h │ ├── MockSocket.h │ ├── MockSystem.cpp │ ├── MockSystem.h │ ├── NamenodeStub.h │ ├── PipelineStub.h │ ├── TestDatanodeStub.h │ ├── TestRpcChannelStub.h │ └── TestUtil.h ├── rpms │ ├── .gitignore │ ├── build.sh │ └── libhdfs3.spec ├── src │ ├── .gitignore │ ├── CMakeLists.txt │ ├── client │ │ ├── BlockLocation.h │ │ ├── BlockReader.h │ │ ├── CryptoCodec.cpp │ │ ├── CryptoCodec.h │ │ ├── DataReader.cpp │ │ ├── DataReader.h │ │ ├── DataTransferProtocol.h │ │ ├── DataTransferProtocolSender.cpp │ │ ├── DataTransferProtocolSender.h │ │ ├── DirectoryIterator.cpp │ │ ├── DirectoryIterator.h │ │ ├── EncryptionZoneInfo.h │ │ ├── EncryptionZoneIterator.cpp │ │ ├── EncryptionZoneIterator.h │ │ ├── FileEncryptionInfo.h │ │ ├── FileStatus.h │ │ ├── FileSystem.cpp │ │ ├── FileSystem.h │ │ ├── FileSystemImpl.cpp │ │ ├── FileSystemImpl.h │ │ ├── FileSystemInter.h │ │ ├── FileSystemKey.cpp │ │ ├── FileSystemKey.h │ │ ├── FileSystemStats.h │ │ ├── Hdfs.cpp │ │ ├── HttpClient.cpp │ │ ├── HttpClient.h │ │ ├── InputStream.cpp │ │ ├── InputStream.h │ │ ├── InputStreamImpl.cpp │ │ ├── InputStreamImpl.h │ │ ├── InputStreamInter.h │ │ ├── KerberosName.cpp │ │ ├── KerberosName.h │ │ ├── KmsClientProvider.cpp │ │ ├── KmsClientProvider.h │ │ ├── LeaseRenewer.cpp │ │ ├── LeaseRenewer.h │ │ ├── LocalBlockReader.cpp │ │ ├── LocalBlockReader.h │ │ ├── OutputStream.cpp │ │ ├── OutputStream.h │ │ ├── OutputStreamImpl.cpp │ │ ├── OutputStreamImpl.h │ │ ├── OutputStreamInter.h │ │ ├── Packet.cpp │ │ ├── Packet.h │ │ ├── PacketHeader.cpp │ │ ├── PacketHeader.h │ │ ├── PacketPool.cpp │ │ ├── PacketPool.h │ │ ├── PeerCache.cpp │ │ ├── PeerCache.h │ │ ├── Permission.cpp │ │ ├── Permission.h │ │ ├── Pipeline.cpp │ │ ├── Pipeline.h │ │ ├── PipelineAck.h │ │ ├── ReadShortCircuitInfo.cpp │ │ ├── ReadShortCircuitInfo.h │ │ ├── RemoteBlockReader.cpp │ │ ├── RemoteBlockReader.h │ │ ├── Token.cpp │ │ ├── Token.h │ │ ├── TokenInternal.h │ │ ├── UserInfo.cpp │ │ ├── UserInfo.h │ │ └── hdfs.h │ ├── common │ │ ├── Atomic.h │ │ ├── BigEndian.h │ │ ├── CFileWrapper.cpp │ │ ├── Checksum.h │ │ ├── DateTime.h │ │ ├── Exception.cpp │ │ ├── Exception.h │ │ ├── ExceptionInternal.cpp │ │ ├── ExceptionInternal.h │ │ ├── FileWrapper.h │ │ ├── Function.h │ │ ├── HWCrc32c.cpp │ │ ├── HWCrc32c.h │ │ ├── Hash.cpp │ │ ├── Hash.h │ │ ├── Logger.cpp │ │ ├── Logger.h │ │ ├── LruMap.h │ │ ├── MappedFileWrapper.cpp │ │ ├── Memory.h │ │ ├── SWCrc32c.cpp │ │ ├── SWCrc32c.h │ │ ├── SessionConfig.cpp │ │ ├── SessionConfig.h │ │ ├── StackPrinter.cpp │ │ ├── StackPrinter.h │ │ ├── StringUtil.h │ │ ├── Thread.cpp │ │ ├── Thread.h │ │ ├── Unordered.h │ │ ├── WritableUtils.cpp │ │ ├── WritableUtils.h │ │ ├── WriteBuffer.cpp │ │ ├── WriteBuffer.h │ │ ├── XmlConfig.cpp │ │ └── XmlConfig.h │ ├── doxyfile.in │ ├── libhdfs3.pc.in │ ├── network │ │ ├── BufferedSocketReader.cpp │ │ ├── BufferedSocketReader.h │ │ ├── DomainSocket.cpp │ │ ├── DomainSocket.h │ │ ├── Socket.h │ │ ├── Syscall.h │ │ ├── TcpSocket.cpp │ │ └── TcpSocket.h │ ├── platform.h.in │ ├── proto │ │ ├── ClientDatanodeProtocol.proto │ │ ├── ClientNamenodeProtocol.proto │ │ ├── IpcConnectionContext.proto │ │ ├── ProtobufRpcEngine.proto │ │ ├── RpcHeader.proto │ │ ├── Security.proto │ │ ├── datatransfer.proto │ │ ├── encryption.proto │ │ └── hdfs.proto │ ├── rpc │ │ ├── RpcAuth.cpp │ │ ├── RpcAuth.h │ │ ├── RpcCall.h │ │ ├── RpcChannel.cpp │ │ ├── RpcChannel.h │ │ ├── RpcChannelKey.cpp │ │ ├── RpcChannelKey.h │ │ ├── RpcClient.cpp │ │ ├── RpcClient.h │ │ ├── RpcConfig.cpp │ │ ├── RpcConfig.h │ │ ├── RpcContentWrapper.cpp │ │ ├── RpcContentWrapper.h │ │ ├── RpcProtocolInfo.cpp │ │ ├── RpcProtocolInfo.h │ │ ├── RpcRemoteCall.cpp │ │ ├── RpcRemoteCall.h │ │ ├── RpcServerInfo.cpp │ │ ├── RpcServerInfo.h │ │ ├── SaslClient.cpp │ │ └── SaslClient.h │ └── server │ │ ├── BlockLocalPathInfo.h │ │ ├── Datanode.cpp │ │ ├── Datanode.h │ │ ├── DatanodeInfo.h │ │ ├── EncryptionKey.h │ │ ├── ExtendedBlock.h │ │ ├── LocatedBlock.h │ │ ├── LocatedBlocks.cpp │ │ ├── LocatedBlocks.h │ │ ├── Namenode.h │ │ ├── NamenodeImpl.cpp │ │ ├── NamenodeImpl.h │ │ ├── NamenodeInfo.cpp │ │ ├── NamenodeInfo.h │ │ ├── NamenodeProxy.cpp │ │ ├── NamenodeProxy.h │ │ └── RpcHelper.h └── test │ ├── CMakeLists.txt │ ├── data │ ├── checksum1.in │ ├── checksum2.in │ ├── function-secure.xml │ ├── function-secure.xml.sample │ ├── function-test.xml │ ├── function-test.xml.sample │ ├── hdfs-default.xml │ ├── invalid.xml │ ├── invalidha.xml │ ├── unit-config.xml │ └── validha.xml │ ├── function │ ├── CMakeLists.txt │ ├── FunctionTestMain.cpp │ ├── TestCInterface.cpp │ ├── TestFileSystem.cpp │ ├── TestInputStream.cpp │ ├── TestKmsClient.cpp │ └── TestOutputStream.cpp │ ├── secure │ ├── CMakeLists.txt │ ├── FunctionTestSecureMain.cpp │ ├── SecureFunctionTest.cpp │ └── TestSecureCInterface.cpp │ └── unit │ ├── CMakeLists.txt │ ├── TestBufferedSocketReader.cpp │ ├── TestChecksum.cpp │ ├── TestException.cpp │ ├── TestGetHANamenodes.cpp │ ├── TestLeaseRenewer.cpp │ ├── TestLocatedBlocks.cpp │ ├── TestLruMap.cpp │ ├── TestRpcChannel.cpp │ ├── TestRpcClient.cpp │ ├── TestSessionConfig.cpp │ ├── TestSocket.cpp │ ├── TestXmlConfig.cpp │ ├── UnitTestCryptoCodec.cpp │ ├── UnitTestInputStream.cpp │ ├── UnitTestMain.cpp │ ├── UnitTestOutputStream.cpp │ └── UnitTestUtils.h └── sync_with_upstream.sh /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "incubator-hawq"] 2 | path = incubator-hawq 3 | url = https://github.com/apache/incubator-hawq.git 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## This repository is not undergoing development 2 | 3 | There is currently no one maintaining this large C++ codebase. 4 | 5 | For those wishing to access HDFS from python, if you find problems installing or 6 | using hdfs3/libhdfs3, you are recommended to try the 7 | [interface in pyarrow](https://arrow.apache.org/docs/python/filesystems.html#hadoop-file-system-hdfs). 8 | -------------------------------------------------------------------------------- /libhdfs3/.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | Makefile.global 3 | -------------------------------------------------------------------------------- /libhdfs3/CMake/CMakeTestCompileNestedException.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() { 5 | try { 6 | throw 2; 7 | } catch (int) { 8 | std::throw_with_nested(std::runtime_error("test")); 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /libhdfs3/CMake/CMakeTestCompileSteadyClock.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | using std::chrono::steady_clock; 4 | 5 | void foo(const steady_clock &clock) { 6 | return; 7 | } 8 | -------------------------------------------------------------------------------- /libhdfs3/CMake/CMakeTestCompileStrerror.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int main() 4 | { 5 | // We can't test "char *p = strerror_r()" because that only causes a 6 | // compiler warning when strerror_r returns an integer. 7 | char *buf = 0; 8 | int i = strerror_r(0, buf, 100); 9 | return i; 10 | } 11 | -------------------------------------------------------------------------------- /libhdfs3/CMake/CodeCoverage.cmake: -------------------------------------------------------------------------------- 1 | # Check prereqs 2 | FIND_PROGRAM(GCOV_PATH gcov) 3 | FIND_PROGRAM(LCOV_PATH lcov) 4 | FIND_PROGRAM(GENHTML_PATH genhtml) 5 | 6 | IF(NOT GCOV_PATH) 7 | MESSAGE(FATAL_ERROR "gcov not found! Aborting...") 8 | ENDIF(NOT GCOV_PATH) 9 | 10 | IF(NOT CMAKE_BUILD_TYPE STREQUAL Debug) 11 | MESSAGE(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading") 12 | ENDIF(NOT CMAKE_BUILD_TYPE STREQUAL Debug) 13 | 14 | #Setup compiler options 15 | ADD_DEFINITIONS(-fprofile-arcs -ftest-coverage) 16 | 17 | SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs ") 18 | SET(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fprofile-arcs ") 19 | 20 | IF(NOT LCOV_PATH) 21 | MESSAGE(FATAL_ERROR "lcov not found! Aborting...") 22 | ENDIF(NOT LCOV_PATH) 23 | 24 | IF(NOT GENHTML_PATH) 25 | MESSAGE(FATAL_ERROR "genhtml not found! Aborting...") 26 | ENDIF(NOT GENHTML_PATH) 27 | 28 | #Setup target 29 | ADD_CUSTOM_TARGET(ShowCoverage 30 | #Capturing lcov counters and generating report 31 | COMMAND ${LCOV_PATH} --directory . --capture --output-file CodeCoverage.info 32 | COMMAND ${LCOV_PATH} --remove CodeCoverage.info '${CMAKE_CURRENT_BINARY_DIR}/*' 'test/*' 'mock/*' '/usr/*' '/opt/*' '*ext/rhel5_x86_64*' '*ext/osx*' --output-file CodeCoverage.info.cleaned 33 | COMMAND ${GENHTML_PATH} -o CodeCoverageReport CodeCoverage.info.cleaned 34 | ) 35 | 36 | 37 | ADD_CUSTOM_TARGET(ShowAllCoverage 38 | #Capturing lcov counters and generating report 39 | COMMAND ${LCOV_PATH} -a CodeCoverage.info.cleaned -a CodeCoverage.info.cleaned_withoutHA -o AllCodeCoverage.info 40 | COMMAND sed -e 's|/.*/src|${CMAKE_SOURCE_DIR}/src|' -ig AllCodeCoverage.info 41 | COMMAND ${GENHTML_PATH} -o AllCodeCoverageReport AllCodeCoverage.info 42 | ) 43 | 44 | ADD_CUSTOM_TARGET(ResetCoverage 45 | #Cleanup lcov 46 | COMMAND ${LCOV_PATH} --directory . --zerocounters 47 | ) 48 | 49 | -------------------------------------------------------------------------------- /libhdfs3/CMake/FindCurl.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find the CURL library (curl) 2 | # 3 | # Once done this will define 4 | # 5 | # CURL_FOUND - System has gnutls 6 | # CURL_INCLUDE_DIR - The gnutls include directory 7 | # CURL_LIBRARIES - The libraries needed to use gnutls 8 | # CURL_DEFINITIONS - Compiler switches required for using gnutls 9 | 10 | 11 | IF (CURL_INCLUDE_DIR AND CURL_LIBRARIES) 12 | # in cache already 13 | SET(CURL_FIND_QUIETLY TRUE) 14 | ENDIF (CURL_INCLUDE_DIR AND CURL_LIBRARIES) 15 | 16 | FIND_PATH(CURL_INCLUDE_DIR curl/curl.h) 17 | 18 | FIND_LIBRARY(CURL_LIBRARIES curl) 19 | 20 | INCLUDE(FindPackageHandleStandardArgs) 21 | 22 | # handle the QUIETLY and REQUIRED arguments and set CURL_FOUND to TRUE if 23 | # all listed variables are TRUE 24 | FIND_PACKAGE_HANDLE_STANDARD_ARGS(CURL DEFAULT_MSG CURL_LIBRARIES CURL_INCLUDE_DIR) 25 | 26 | MARK_AS_ADVANCED(CURL_INCLUDE_DIR CURL_LIBRARIES) 27 | -------------------------------------------------------------------------------- /libhdfs3/CMake/FindGSasl.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find the GNU sasl library (gsasl) 2 | # 3 | # Once done this will define 4 | # 5 | # GSASL_FOUND - System has gnutls 6 | # GSASL_INCLUDE_DIR - The gnutls include directory 7 | # GSASL_LIBRARIES - The libraries needed to use gnutls 8 | # GSASL_DEFINITIONS - Compiler switches required for using gnutls 9 | 10 | 11 | IF (GSASL_INCLUDE_DIR AND GSASL_LIBRARIES) 12 | # in cache already 13 | SET(GSasl_FIND_QUIETLY TRUE) 14 | ENDIF (GSASL_INCLUDE_DIR AND GSASL_LIBRARIES) 15 | 16 | FIND_PATH(GSASL_INCLUDE_DIR gsasl.h) 17 | 18 | FIND_LIBRARY(GSASL_LIBRARIES gsasl) 19 | 20 | INCLUDE(FindPackageHandleStandardArgs) 21 | 22 | # handle the QUIETLY and REQUIRED arguments and set GSASL_FOUND to TRUE if 23 | # all listed variables are TRUE 24 | FIND_PACKAGE_HANDLE_STANDARD_ARGS(GSASL DEFAULT_MSG GSASL_LIBRARIES GSASL_INCLUDE_DIR) 25 | 26 | MARK_AS_ADVANCED(GSASL_INCLUDE_DIR GSASL_LIBRARIES) -------------------------------------------------------------------------------- /libhdfs3/CMake/FindGoogleTest.cmake: -------------------------------------------------------------------------------- 1 | include(CheckCXXSourceRuns) 2 | 3 | find_path(GTest_INCLUDE_DIR gtest/gtest.h 4 | NO_DEFAULT_PATH 5 | PATHS 6 | "${PROJECT_SOURCE_DIR}/../thirdparty/googletest/googletest/include" 7 | "/usr/local/include" 8 | "/usr/include") 9 | 10 | find_path(GMock_INCLUDE_DIR gmock/gmock.h 11 | NO_DEFAULT_PATH 12 | PATHS 13 | "${PROJECT_SOURCE_DIR}/../thirdparty/googletest/googlemock/include" 14 | "/usr/local/include" 15 | "/usr/include") 16 | 17 | find_library(Gtest_LIBRARY 18 | NAMES libgtest.a 19 | HINTS 20 | "${PROJECT_SOURCE_DIR}/../thirdparty/googletest/build/googlemock/gtest" 21 | "/usr/local/lib" 22 | "/usr/lib") 23 | 24 | find_library(Gmock_LIBRARY 25 | NAMES libgmock.a 26 | HINTS 27 | "${PROJECT_SOURCE_DIR}/../thirdparty/googletest/build/googlemock" 28 | "/usr/local/lib" 29 | "/usr/lib") 30 | 31 | message(STATUS "Find GoogleTest include path: ${GTest_INCLUDE_DIR}") 32 | message(STATUS "Find GoogleMock include path: ${GMock_INCLUDE_DIR}") 33 | message(STATUS "Find Gtest library path: ${Gtest_LIBRARY}") 34 | message(STATUS "Find Gmock library path: ${Gmock_LIBRARY}") 35 | 36 | set(CMAKE_REQUIRED_INCLUDES ${GTest_INCLUDE_DIR} ${GMock_INCLUDE_DIR}) 37 | set(CMAKE_REQUIRED_LIBRARIES ${Gtest_LIBRARY} ${Gmock_LIBRARY} -lpthread) 38 | set(CMAKE_REQUIRED_FLAGS) 39 | check_cxx_source_runs(" 40 | #include 41 | #include 42 | int main(int argc, char *argv[]) 43 | { 44 | double pi = 3.14; 45 | EXPECT_EQ(pi, 3.14); 46 | return 0; 47 | } 48 | " GoogleTest_CHECK_FINE) 49 | message(STATUS "GoogleTest check: ${GoogleTest_CHECK_FINE}") 50 | 51 | include(FindPackageHandleStandardArgs) 52 | find_package_handle_standard_args( 53 | GoogleTest 54 | REQUIRED_VARS 55 | GTest_INCLUDE_DIR 56 | GMock_INCLUDE_DIR 57 | Gtest_LIBRARY 58 | Gmock_LIBRARY 59 | GoogleTest_CHECK_FINE) 60 | 61 | set(GoogleTest_INCLUDE_DIR ${GTest_INCLUDE_DIR} ${GMock_INCLUDE_DIR}) 62 | set(GoogleTest_LIBRARIES ${Gtest_LIBRARY} ${Gmock_LIBRARY}) 63 | mark_as_advanced( 64 | GoogleTest_INCLUDE_DIR 65 | GoogleTest_LIBRARIES) 66 | -------------------------------------------------------------------------------- /libhdfs3/CMake/FindKERBEROS.cmake: -------------------------------------------------------------------------------- 1 | # - Find kerberos 2 | # Find the native KERBEROS includes and library 3 | # 4 | # KERBEROS_INCLUDE_DIRS - where to find krb5.h, etc. 5 | # KERBEROS_LIBRARIES - List of libraries when using krb5. 6 | # KERBEROS_FOUND - True if krb5 found. 7 | 8 | IF (KERBEROS_INCLUDE_DIRS) 9 | # Already in cache, be silent 10 | SET(KERBEROS_FIND_QUIETLY TRUE) 11 | ENDIF (KERBEROS_INCLUDE_DIRS) 12 | 13 | FIND_PATH(KERBEROS_INCLUDE_DIRS krb5.h) 14 | 15 | SET(KERBEROS_NAMES krb5 k5crypto com_err) 16 | FIND_LIBRARY(KERBEROS_LIBRARIES NAMES ${KERBEROS_NAMES}) 17 | 18 | # handle the QUIETLY and REQUIRED arguments and set KERBEROS_FOUND to TRUE if 19 | # all listed variables are TRUE 20 | INCLUDE(FindPackageHandleStandardArgs) 21 | FIND_PACKAGE_HANDLE_STANDARD_ARGS(KERBEROS DEFAULT_MSG KERBEROS_LIBRARIES KERBEROS_INCLUDE_DIRS) 22 | 23 | MARK_AS_ADVANCED(KERBEROS_LIBRARIES KERBEROS_INCLUDE_DIRS) 24 | -------------------------------------------------------------------------------- /libhdfs3/CMake/FindLibUUID.cmake: -------------------------------------------------------------------------------- 1 | # - Find libuuid 2 | # Find the native LIBUUID includes and library 3 | # 4 | # LIBUUID_INCLUDE_DIRS - where to find uuid/uuid.h, etc. 5 | # LIBUUID_LIBRARIES - List of libraries when using uuid. 6 | # LIBUUID_FOUND - True if uuid found. 7 | 8 | IF (LIBUUID_INCLUDE_DIRS) 9 | # Already in cache, be silent 10 | SET(LIBUUID_FIND_QUIETLY TRUE) 11 | ENDIF (LIBUUID_INCLUDE_DIRS) 12 | 13 | FIND_PATH(LIBUUID_INCLUDE_DIRS uuid/uuid.h) 14 | 15 | SET(LIBUUID_NAMES uuid) 16 | FIND_LIBRARY(LIBUUID_LIBRARIES NAMES ${LIBUUID_NAMES}) 17 | 18 | # handle the QUIETLY and REQUIRED arguments and set LIBUUID_FOUND to TRUE if 19 | # all listed variables are TRUE 20 | INCLUDE(FindPackageHandleStandardArgs) 21 | FIND_PACKAGE_HANDLE_STANDARD_ARGS(LIBUUID DEFAULT_MSG LIBUUID_LIBRARIES LIBUUID_INCLUDE_DIRS) 22 | 23 | MARK_AS_ADVANCED(LIBUUID_LIBRARIES LIBUUID_INCLUDE_DIRS) 24 | -------------------------------------------------------------------------------- /libhdfs3/CMake/FindSSL.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find the Open ssl library (ssl) 2 | # 3 | # Once done this will define 4 | # 5 | # SSL_FOUND - System has gnutls 6 | # SSL_INCLUDE_DIR - The gnutls include directory 7 | # SSL_LIBRARIES - The libraries needed to use gnutls 8 | # SSL_DEFINITIONS - Compiler switches required for using gnutls 9 | 10 | 11 | IF (SSL_INCLUDE_DIR AND SSL_LIBRARIES) 12 | # in cache already 13 | SET(SSL_FIND_QUIETLY TRUE) 14 | ENDIF (SSL_INCLUDE_DIR AND SSL_LIBRARIES) 15 | 16 | FIND_PATH(SSL_INCLUDE_DIR openssl/opensslv.h) 17 | 18 | FIND_LIBRARY(SSL_LIBRARIES crypto) 19 | 20 | INCLUDE(FindPackageHandleStandardArgs) 21 | 22 | # handle the QUIETLY and REQUIRED arguments and set SSL_FOUND to TRUE if 23 | # all listed variables are TRUE 24 | FIND_PACKAGE_HANDLE_STANDARD_ARGS(SSL DEFAULT_MSG SSL_LIBRARIES SSL_INCLUDE_DIR) 25 | 26 | MARK_AS_ADVANCED(SSL_INCLUDE_DIR SSL_LIBRARIES) 27 | -------------------------------------------------------------------------------- /libhdfs3/CMake/Functions.cmake: -------------------------------------------------------------------------------- 1 | FUNCTION(AUTO_SOURCES RETURN_VALUE PATTERN SOURCE_SUBDIRS) 2 | 3 | IF ("${SOURCE_SUBDIRS}" STREQUAL "RECURSE") 4 | SET(PATH ".") 5 | IF (${ARGC} EQUAL 4) 6 | LIST(GET ARGV 3 PATH) 7 | ENDIF () 8 | ENDIF() 9 | 10 | IF ("${SOURCE_SUBDIRS}" STREQUAL "RECURSE") 11 | UNSET(${RETURN_VALUE}) 12 | FILE(GLOB SUBDIR_FILES "${PATH}/${PATTERN}") 13 | LIST(APPEND ${RETURN_VALUE} ${SUBDIR_FILES}) 14 | 15 | FILE(GLOB SUBDIRS RELATIVE ${PATH} ${PATH}/*) 16 | 17 | FOREACH(DIR ${SUBDIRS}) 18 | IF (IS_DIRECTORY ${PATH}/${DIR}) 19 | IF (NOT "${DIR}" STREQUAL "CMAKEFILES") 20 | FILE(GLOB_RECURSE SUBDIR_FILES "${PATH}/${DIR}/${PATTERN}") 21 | LIST(APPEND ${RETURN_VALUE} ${SUBDIR_FILES}) 22 | ENDIF() 23 | ENDIF() 24 | ENDFOREACH() 25 | ELSE () 26 | FILE(GLOB ${RETURN_VALUE} "${PATTERN}") 27 | 28 | FOREACH (PATH ${SOURCE_SUBDIRS}) 29 | FILE(GLOB SUBDIR_FILES "${PATH}/${PATTERN}") 30 | LIST(APPEND ${RETURN_VALUE} ${SUBDIR_FILES}) 31 | ENDFOREACH(PATH ${SOURCE_SUBDIRS}) 32 | ENDIF () 33 | 34 | IF (${FILTER_OUT}) 35 | LIST(REMOVE_ITEM ${RETURN_VALUE} ${FILTER_OUT}) 36 | ENDIF() 37 | 38 | SET(${RETURN_VALUE} ${${RETURN_VALUE}} PARENT_SCOPE) 39 | ENDFUNCTION(AUTO_SOURCES) 40 | 41 | FUNCTION(CONTAINS_STRING FILE SEARCH RETURN_VALUE) 42 | FILE(STRINGS ${FILE} FILE_CONTENTS REGEX ".*${SEARCH}.*") 43 | IF (FILE_CONTENTS) 44 | SET(${RETURN_VALUE} TRUE PARENT_SCOPE) 45 | ENDIF() 46 | ENDFUNCTION(CONTAINS_STRING) 47 | -------------------------------------------------------------------------------- /libhdfs3/CMake/Platform.cmake: -------------------------------------------------------------------------------- 1 | IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") 2 | SET(OS_LINUX true CACHE INTERNAL "Linux operating system") 3 | ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Darwin") 4 | SET(OS_MACOSX true CACHE INTERNAL "Mac Darwin operating system") 5 | ELSE(CMAKE_SYSTEM_NAME STREQUAL "Linux") 6 | MESSAGE(FATAL_ERROR "Unsupported OS: \"${CMAKE_SYSTEM_NAME}\"") 7 | ENDIF(CMAKE_SYSTEM_NAME STREQUAL "Linux") 8 | 9 | IF(CMAKE_COMPILER_IS_GNUCXX) 10 | SET(GCC_COMPILER_VERSION ${CMAKE_CXX_COMPILER_VERSION}) 11 | 12 | IF (NOT GCC_COMPILER_VERSION) 13 | MESSAGE(FATAL_ERROR "Cannot get gcc version") 14 | ENDIF (NOT GCC_COMPILER_VERSION) 15 | 16 | STRING(REGEX MATCHALL "[0-9]+" GCC_COMPILER_VERSION ${GCC_COMPILER_VERSION}) 17 | 18 | LIST(GET GCC_COMPILER_VERSION 0 GCC_COMPILER_VERSION_MAJOR) 19 | LIST(GET GCC_COMPILER_VERSION 1 GCC_COMPILER_VERSION_MINOR) 20 | 21 | SET(GCC_COMPILER_VERSION_MAJOR ${GCC_COMPILER_VERSION_MAJOR} CACHE INTERNAL "gcc major version") 22 | SET(GCC_COMPILER_VERSION_MINOR ${GCC_COMPILER_VERSION_MINOR} CACHE INTERNAL "gcc minor version") 23 | 24 | IF(GCC_COMPILER_VERSION VERSION_GREATER 4.9 OR GCC_COMPILER_VERSION VERSION_EQUAL 4.9) 25 | ADD_DEFINITIONS(-fdiagnostics-color=always) 26 | ENDIF(GCC_COMPILER_VERSION VERSION_GREATER 4.9 OR GCC_COMPILER_VERSION VERSION_EQUAL 4.9) 27 | 28 | 29 | MESSAGE(STATUS "checking compiler: GCC (${GCC_COMPILER_VERSION_MAJOR}.${GCC_COMPILER_VERSION_MINOR}.${GCC_COMPILER_VERSION_PATCH})") 30 | ELSE(CMAKE_COMPILER_IS_GNUCXX) 31 | EXECUTE_PROCESS(COMMAND ${CMAKE_C_COMPILER} --version OUTPUT_VARIABLE COMPILER_OUTPUT) 32 | IF(COMPILER_OUTPUT MATCHES "clang") 33 | SET(CMAKE_COMPILER_IS_CLANG true CACHE INTERNAL "using clang as compiler") 34 | MESSAGE(STATUS "checking compiler: CLANG") 35 | ELSE(COMPILER_OUTPUT MATCHES "clang") 36 | MESSAGE(FATAL_ERROR "Unsupported compiler: \"${CMAKE_CXX_COMPILER}\"") 37 | ENDIF(COMPILER_OUTPUT MATCHES "clang") 38 | ENDIF(CMAKE_COMPILER_IS_GNUCXX) 39 | -------------------------------------------------------------------------------- /libhdfs3/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8) 2 | 3 | PROJECT(libhdfs3) 4 | 5 | SET(CMAKE_VERBOSE_MAKEFILE ON CACHE STRING "Verbose build." FORCE) 6 | 7 | IF(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR}) 8 | MESSAGE(FATAL_ERROR "cannot build the project in the source directory! Out-of-source build is enforced!") 9 | ENDIF() 10 | 11 | SET(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake" ${CMAKE_MODULE_PATH}) 12 | SET(DOXYFILE_PATH ${CMAKE_SOURCE_DIR}/docs) 13 | 14 | INCLUDE(Platform) 15 | INCLUDE(Functions) 16 | INCLUDE(Options) 17 | 18 | 19 | FIND_PACKAGE(LibXml2 REQUIRED) 20 | FIND_PACKAGE(Protobuf REQUIRED) 21 | FIND_PACKAGE(KERBEROS REQUIRED) 22 | FIND_PACKAGE(GSasl REQUIRED) 23 | FIND_PACKAGE(GoogleTest REQUIRED) 24 | FIND_PACKAGE(SSL REQUIRED) 25 | FIND_PACKAGE(CURL REQUIRED) 26 | INCLUDE_DIRECTORIES(${GoogleTest_INCLUDE_DIR}) 27 | LINK_LIBRARIES(${GoogleTest_LIBRARIES}) 28 | 29 | IF(OS_LINUX) 30 | FIND_PACKAGE(LibUUID REQUIRED) 31 | ENDIF(OS_LINUX) 32 | 33 | ADD_SUBDIRECTORY(mock) 34 | ADD_SUBDIRECTORY(src) 35 | ADD_SUBDIRECTORY(test) 36 | 37 | CONFIGURE_FILE(src/libhdfs3.pc.in ${CMAKE_SOURCE_DIR}/src/libhdfs3.pc @ONLY) 38 | CONFIGURE_FILE(debian/changelog.in ${CMAKE_SOURCE_DIR}/debian/changelog @ONLY) 39 | 40 | ADD_CUSTOM_TARGET(debian-package 41 | COMMAND dpkg-buildpackage -us -uc -b 42 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} 43 | COMMENT "Create debian package..." 44 | ) 45 | 46 | ADD_CUSTOM_TARGET(rpm-package 47 | COMMAND rpmbuild -bb --define "_topdir ${CMAKE_SOURCE_DIR}/rpms" --define "version ${libhdfs3_VERSION_STRING}" ${CMAKE_SOURCE_DIR}/rpms/libhdfs3.spec 48 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} 49 | COMMENT "Create rpm package..." 50 | ) 51 | 52 | ADD_CUSTOM_TARGET(doc 53 | COMMAND doxygen ${CMAKE_BINARY_DIR}/src/doxyfile 54 | WORKING_DIRECTORY ${DOXYFILE_PATH} 55 | COMMENT "Generate documents..." 56 | ) 57 | 58 | ADD_CUSTOM_TARGET(style 59 | COMMAND astyle --style=attach --indent=spaces=4 --indent-preprocessor --break-blocks --pad-oper --pad-header --unpad-paren --delete-empty-lines --suffix=none --align-pointer=middle --lineend=linux --indent-col1-comments ${libhdfs3_SOURCES} 60 | COMMAND astyle --style=attach --indent=spaces=4 --indent-preprocessor --break-blocks --pad-oper --pad-header --unpad-paren --delete-empty-lines --suffix=none --align-pointer=middle --lineend=linux --indent-col1-comments ${unit_SOURCES} 61 | COMMAND astyle --style=attach --indent=spaces=4 --indent-preprocessor --break-blocks --pad-oper --pad-header --unpad-paren --delete-empty-lines --suffix=none --align-pointer=middle --lineend=linux --indent-col1-comments ${function_SOURCES} 62 | COMMAND astyle --style=attach --indent=spaces=4 --indent-preprocessor --break-blocks --pad-oper --pad-header --unpad-paren --delete-empty-lines --suffix=none --align-pointer=middle --lineend=linux --indent-col1-comments ${secure_SOURCES} 63 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} 64 | COMMENT "format code style..." 65 | ) 66 | -------------------------------------------------------------------------------- /libhdfs3/Makefile: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | # 18 | # -*-makefile-*- 19 | #------------------------------------------------------------------------------ 20 | # A makefile that integrate building this module with hawq 21 | #------------------------------------------------------------------------------ 22 | subdir = depends/libhdfs3 23 | top_builddir = ../../ 24 | include Makefile.global 25 | 26 | PRE_CFG_ARG = 27 | # get argument for running ../boostrap 28 | ifeq ($(enable_debug), yes) 29 | PRE_CFG_ARG += --enable-debug 30 | endif # enable_debug 31 | 32 | ifeq ($(enable_coverage), yes) 33 | PRE_CFG_ARG += --enable-coverage 34 | endif # enable_coverage 35 | 36 | ########################################################################## 37 | # 38 | .PHONY: build all install distclean maintainer-clean clean pre-config 39 | 40 | ifeq ($(with_libhdfs3), yes) 41 | 42 | # We will need to install it temporarily under build/install for hawq building. 43 | all: build 44 | cd $(top_builddir)/$(subdir)/build; mkdir -p install; \ 45 | $(MAKE) DESTDIR=$(abs_top_builddir)/$(subdir)/build/install install 46 | 47 | install: build 48 | cd $(top_builddir)/$(subdir)/build && $(MAKE) install 49 | 50 | distclean: 51 | rm -rf $(top_builddir)/$(subdir)/build 52 | 53 | maintainer-clean: distclean 54 | 55 | clean: 56 | if [ -d $(top_builddir)/$(subdir)/build ]; then \ 57 | cd $(top_builddir)/$(subdir)/build && $(MAKE) clean && rm -f libhdfs3_build_timestamp; \ 58 | fi 59 | 60 | build: pre-config 61 | cd $(top_builddir)/$(subdir)/build && $(MAKE) 62 | 63 | # trigger bootstrap only once. 64 | pre-config: 65 | cd $(top_builddir)/$(subdir)/; \ 66 | mkdir -p build; \ 67 | cd build; \ 68 | if [ ! -f libhdfs3_build_timestamp ]; then \ 69 | $(abs_top_srcdir)/$(subdir)/bootstrap --prefix=$(prefix) $(PRE_CFG_ARG) && touch libhdfs3_build_timestamp; \ 70 | fi 71 | 72 | else 73 | 74 | all install distclean maintainer-clean clean pre-config: 75 | 76 | endif 77 | -------------------------------------------------------------------------------- /libhdfs3/Makefile.global.in: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | # 18 | # -*-makefile-*- 19 | #------------------------------------------------------------------------------ 20 | # A makefile that integrate building this module with hawq 21 | #------------------------------------------------------------------------------ 22 | 23 | prefix := @prefix@ 24 | enable_debug = @enable_debug@ 25 | enable_coverage = @enable_coverage@ 26 | with_libhdfs3 = @with_libhdfs3@ 27 | 28 | # Support for VPATH builds 29 | vpath_build = @vpath_build@ 30 | abs_top_srcdir = @abs_top_srcdir@ 31 | abs_top_builddir = @abs_top_builddir@ 32 | 33 | ifneq ($(vpath_build),yes) 34 | top_srcdir = $(top_builddir) 35 | srcdir = . 36 | else # vpath_build = yes 37 | top_srcdir = $(abs_top_srcdir) 38 | srcdir = $(top_srcdir)/$(subdir) 39 | VPATH = $(srcdir) 40 | endif 41 | -------------------------------------------------------------------------------- /libhdfs3/debian/.gitignore: -------------------------------------------------------------------------------- 1 | files 2 | libhdfs3.postinst.debhelper 3 | libhdfs3.postrm.debhelper 4 | libhdfs3.debhelper.log 5 | libhdfs3.substvars 6 | libhdfs3-1.postinst.debhelper 7 | libhdfs3-1.postrm.debhelper 8 | libhdfs3-1.substvars 9 | libhdfs3-1/ 10 | libhdfs3-dev.debhelper.log 11 | libhdfs3-dev.substvars 12 | libhdfs3-dev/ 13 | libhdfs3/ 14 | tmp/ 15 | changelog 16 | -------------------------------------------------------------------------------- /libhdfs3/debian/changelog.in: -------------------------------------------------------------------------------- 1 | libhdfs3 (@libhdfs3_VERSION_STRING@-1) unstable; urgency=low 2 | 3 | * Initial release 4 | 5 | -- Zhanwei Wang Fri, 06 Feb 2015 11:58:35 +0100 6 | -------------------------------------------------------------------------------- /libhdfs3/debian/compat: -------------------------------------------------------------------------------- 1 | 9 2 | -------------------------------------------------------------------------------- /libhdfs3/debian/control: -------------------------------------------------------------------------------- 1 | Source: libhdfs3 2 | Priority: optional 3 | Maintainer: Zhanwei Wang 4 | Build-Depends: debhelper (>= 9), cmake, libprotobuf-dev, protobuf-compiler, libxml2-dev, libkrb5-dev, uuid-dev, libgsasl7-dev 5 | Standards-Version: 3.9.5 6 | Section: libs 7 | 8 | Package: libhdfs3-dev 9 | Section: libdevel 10 | Architecture: any 11 | Depends: libhdfs3 (= ${binary:Version}), ${misc:Depends} 12 | Description: Native C/C++ HDFS Client - development files 13 | Libhdfs3, designed as an alternative implementation of libhdfs, 14 | is implemented based on native Hadoop RPC protocol and 15 | HDFS data transfer protocol. 16 | It gets rid of the drawbacks of JNI, and it has a lightweight, 17 | small memory footprint code base. In addition, it is easy to use and deploy. 18 | 19 | Package: libhdfs3 20 | Architecture: any 21 | Depends: ${shlibs:Depends}, ${misc:Depends} 22 | Description: Native C/C++ HDFS Client 23 | Libhdfs3, designed as an alternative implementation of libhdfs, 24 | is implemented based on native Hadoop RPC protocol and 25 | HDFS data transfer protocol. 26 | It gets rid of the drawbacks of JNI, and it has a lightweight, 27 | small memory footprint code base. In addition, it is easy to use and deploy. 28 | . 29 | -------------------------------------------------------------------------------- /libhdfs3/debian/copyright: -------------------------------------------------------------------------------- 1 | Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: libhdfs3 3 | 4 | Files: * 5 | License: Apache-2.0 6 | 7 | License: Apache-2.0 8 | Licensed under the Apache License, Version 2.0 (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at 11 | . 12 | http://www.apache.org/licenses/LICENSE-2.0 13 | . 14 | Unless required by applicable law or agreed to in writing, software 15 | distributed under the License is distributed on an "AS IS" BASIS, 16 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | See the License for the specific language governing permissions and 18 | limitations under the License. 19 | . 20 | On Debian systems, the complete text of the Apache version 2.0 license 21 | can be found in "/usr/share/common-licenses/Apache-2.0". 22 | -------------------------------------------------------------------------------- /libhdfs3/debian/libhdfs3-dev.dirs: -------------------------------------------------------------------------------- 1 | usr/lib 2 | usr/include 3 | -------------------------------------------------------------------------------- /libhdfs3/debian/libhdfs3-dev.install: -------------------------------------------------------------------------------- 1 | usr/include/* 2 | usr/lib/lib*.a 3 | usr/lib/lib*.so 4 | usr/lib/pkgconfig/* 5 | -------------------------------------------------------------------------------- /libhdfs3/debian/libhdfs3-dev.lintian-overrides: -------------------------------------------------------------------------------- 1 | libhdfs3-dev: new-package-should-close-itp-bug 2 | -------------------------------------------------------------------------------- /libhdfs3/debian/libhdfs3.dirs: -------------------------------------------------------------------------------- 1 | usr/lib 2 | -------------------------------------------------------------------------------- /libhdfs3/debian/libhdfs3.install: -------------------------------------------------------------------------------- 1 | usr/lib/lib*.so.* 2 | -------------------------------------------------------------------------------- /libhdfs3/debian/libhdfs3.lintian-overrides: -------------------------------------------------------------------------------- 1 | libhdfs3-1: new-package-should-close-itp-bug 2 | -------------------------------------------------------------------------------- /libhdfs3/debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | # See debhelper(7) (uncomment to enable) 3 | # output every command that modifies files on the build system. 4 | #DH_VERBOSE = 1 5 | 6 | # see EXAMPLES in dpkg-buildflags(1) and read /usr/share/dpkg/* 7 | DPKG_EXPORT_BUILDFLAGS = 1 8 | include /usr/share/dpkg/default.mk 9 | 10 | # see FEATURE AREAS in dpkg-buildflags(1) 11 | #export DEB_BUILD_MAINT_OPTIONS = hardening=+all 12 | 13 | # main packaging script based on dh7 syntax 14 | %: 15 | dh $@ --parallel 16 | 17 | # debmake generated override targets 18 | # This is example for Cmake (See http://bugs.debian.org/641051 ) 19 | #override_dh_auto_configure: 20 | # dh_auto_configure -- \ 21 | # -DCMAKE_LIBRARY_PATH=$(DEB_HOST_MULTIARCH) 22 | 23 | # Tests needs a running Hadoop cluster 24 | override_dh_auto_test: 25 | -------------------------------------------------------------------------------- /libhdfs3/debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (quilt) 2 | -------------------------------------------------------------------------------- /libhdfs3/mock/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8) 2 | 3 | AUTO_SOURCES(files "*.cpp" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}") 4 | LIST(APPEND libhdfs3_MOCK_SOURCES ${files}) 5 | 6 | SET(libhdfs3_MOCK_SOURCES ${libhdfs3_MOCK_SOURCES} PARENT_SCOPE) 7 | 8 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockBufferedSocketReader.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKBUFFEREDSOCKETREADER_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKBUFFEREDSOCKETREADER_H_ 24 | 25 | #include "gmock/gmock.h" 26 | #include "network/BufferedSocketReader.h" 27 | 28 | namespace Hdfs { 29 | namespace Mock { 30 | 31 | class MockBufferedSocketReader: public Hdfs::Internal::BufferedSocketReader { 32 | public: 33 | MOCK_METHOD2(read, int32_t(char * b, int32_t s)); 34 | MOCK_METHOD3(readFully, void(char * b, int32_t s, int timeout)); 35 | MOCK_METHOD1(readBigEndianInt32, int32_t(int timeout)); 36 | MOCK_METHOD1(readVarint32, int32_t(int timeout)); 37 | MOCK_METHOD1(poll, bool(int timeout)); 38 | }; 39 | 40 | } 41 | } 42 | 43 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKBUFFEREDSOCKETREADER_H_ */ 44 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockCryptoCodec.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_CRYPTOCODEC_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_CRYPTOCODEC_H_ 24 | 25 | #include "gmock/gmock.h" 26 | 27 | #include "client/CryptoCodec.h" 28 | #include "client/KmsClientProvider.h" 29 | 30 | class MockCryptoCodec: public Hdfs::CryptoCodec { 31 | public: 32 | MockCryptoCodec(FileEncryptionInfo *encryptionInfo, shared_ptr kcp, int32_t bufSize) : CryptoCodec(encryptionInfo, kcp, bufSize) {} 33 | MOCK_METHOD2(encode, std::string(const char * buffer,int64_t size)); 34 | MOCK_METHOD2(decode, std::string(const char * buffer,int64_t size)); 35 | }; 36 | 37 | #endif /* _HDFS_LIBHDFS3_MOCK_CRYPTOCODEC_H_ */ 38 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockDatanode.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKDATANODE_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKDATANODE_H_ 24 | 25 | #include "gmock/gmock.h" 26 | #include "server/Datanode.h" 27 | 28 | using namespace Hdfs::Internal; 29 | namespace Hdfs { 30 | 31 | namespace Mock { 32 | 33 | class MockDatanode: public Datanode { 34 | public: 35 | MOCK_METHOD1(getReplicaVisibleLength, int64_t (const Hdfs::Internal::ExtendedBlock & b)); 36 | MOCK_METHOD3(getBlockLocalPathInfo, void (const Hdfs::Internal::ExtendedBlock & block, 37 | const Hdfs::Internal::Token & token, Hdfs::Internal::BlockLocalPathInfo & info)); 38 | MOCK_METHOD0(sendPing, void ()); 39 | }; 40 | 41 | } 42 | } 43 | 44 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKDATANODE_H_ */ 45 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockHttpClient.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_HTTPCLIENT_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_HTTPCLIENT_H_ 24 | 25 | #include "gmock/gmock.h" 26 | 27 | #include "client/HttpClient.h" 28 | #include "client/KmsClientProvider.h" 29 | #include 30 | 31 | using boost::property_tree::ptree; 32 | 33 | class MockHttpClient: public Hdfs::HttpClient { 34 | public: 35 | MOCK_METHOD0(post, std::string()); 36 | MOCK_METHOD0(del, std::string()); 37 | MOCK_METHOD0(put, std::string()); 38 | MOCK_METHOD0(get, std::string()); 39 | 40 | std::string getPostResult(FileEncryptionInfo &encryptionInfo) { 41 | ptree map; 42 | map.put("name", encryptionInfo.getKeyName()); 43 | map.put("iv", encryptionInfo.getIv()); 44 | map.put("material", encryptionInfo.getKey()); 45 | 46 | std::string json = KmsClientProvider::toJson(map); 47 | return json; 48 | } 49 | 50 | }; 51 | 52 | #endif /* _HDFS_LIBHDFS3_MOCK_HTTPCLIENT_H_ */ 53 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockKmsClientProvider.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_KMSCLIENTPROVIDER_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_KMSCLIENTPROVIDER_H_ 24 | 25 | #include "gmock/gmock.h" 26 | 27 | #include "client/KmsClientProvider.h" 28 | 29 | using namespace Hdfs::Internal; 30 | 31 | class MockKmsClientProvider: public Hdfs::KmsClientProvider { 32 | public: 33 | MockKmsClientProvider(shared_ptr auth, shared_ptr conf) : KmsClientProvider(auth, conf) {} 34 | MOCK_METHOD1(setHttpClient, void(shared_ptr hc)); 35 | MOCK_METHOD1(getKeyMetadata, ptree(const FileEncryptionInfo &encryptionInfo)); 36 | MOCK_METHOD1(deleteKey, void(const FileEncryptionInfo &encryptionInfo)); 37 | MOCK_METHOD1(decryptEncryptedKey, ptree(const FileEncryptionInfo &encryptionInfo)); 38 | MOCK_METHOD5(createKey, void(const std::string &keyName, const std::string &cipher, const int length, const std::string &material, const std::string &description)); 39 | 40 | ptree getEDKResult(FileEncryptionInfo &encryptionInfo) { 41 | ptree map; 42 | map.put("name", encryptionInfo.getKeyName()); 43 | map.put("iv", encryptionInfo.getIv()); 44 | map.put("material", KmsClientProvider::base64Encode(encryptionInfo.getKey())); 45 | return map; 46 | } 47 | 48 | }; 49 | 50 | #endif /* _HDFS_LIBHDFS3_MOCK_KMSCLIENTPROVIDER_H_ */ 51 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockLeaseRenewer.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKLEASERENEWER_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKLEASERENEWER_H_ 24 | 25 | #include "client/LeaseRenewer.h" 26 | #include "gmock/gmock.h" 27 | 28 | namespace Hdfs { 29 | namespace Mock { 30 | 31 | class MockLeaseRenewer: public Hdfs::Internal::LeaseRenewer { 32 | public: 33 | MOCK_METHOD1(StartRenew, void(shared_ptr)); 34 | MOCK_METHOD1(StopRenew, void(shared_ptr)); 35 | }; 36 | 37 | static inline shared_ptr MakeMockLeaseRenewer() { 38 | Hdfs::Internal::LeaseRenewer::GetLeaseRenewer(); 39 | shared_ptr old = Hdfs::Internal::LeaseRenewer::renewer; 40 | Hdfs::Internal::LeaseRenewer::renewer = shared_ptr(new MockLeaseRenewer); 41 | return old; 42 | } 43 | 44 | static inline MockLeaseRenewer & GetMockLeaseRenewer(){ 45 | assert(Hdfs::Internal::LeaseRenewer::renewer); 46 | return static_cast (*Hdfs::Internal::LeaseRenewer::renewer); 47 | } 48 | 49 | static inline void ResetMockLeaseRenewer(shared_ptr old){ 50 | Hdfs::Internal::LeaseRenewer::renewer = old; 51 | } 52 | 53 | } 54 | } 55 | 56 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKLEASERENEWER_H_ */ 57 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockOperationCanceledCallback.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKOPERATIONCANCELEDCALLBACK_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKOPERATIONCANCELEDCALLBACK_H_ 24 | 25 | #include "gmock/gmock.h" 26 | 27 | namespace Hdfs { 28 | namespace Mock { 29 | 30 | class MockCancelObject { 31 | public: 32 | bool canceled() { 33 | return check(); 34 | } 35 | virtual bool check() = 0; 36 | virtual ~MockCancelObject() { 37 | } 38 | }; 39 | 40 | class MockOperationCanceledCallback: public MockCancelObject { 41 | public: 42 | MOCK_METHOD0(check, bool()); 43 | }; 44 | 45 | } 46 | } 47 | 48 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKOPERATIONCANCELEDCALLBACK_H_ */ 49 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockPipeline.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKPIPELINE_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKPIPELINE_H_ 24 | 25 | #include "gmock/gmock.h" 26 | #include "client/Packet.h" 27 | #include "server/ExtendedBlock.h" 28 | #include "client/FileSystem.h" 29 | 30 | using namespace Hdfs::Internal; 31 | namespace Hdfs { 32 | 33 | namespace Mock { 34 | 35 | class MockPipeline: public Pipeline { 36 | public: 37 | MOCK_METHOD0(flush, void()); 38 | MOCK_METHOD1(close, shared_ptr (shared_ptr lastPacket)); 39 | MOCK_METHOD1(send, void (shared_ptr packet)); 40 | MOCK_METHOD1(setFilesystem, void (FileSystemInter * fs)); 41 | 42 | }; 43 | 44 | } 45 | } 46 | 47 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKPIPELINE_H_ */ 48 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockRpcChannel.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKRPCCHANNEL_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKRPCCHANNEL_H_ 24 | 25 | #include "gmock/gmock.h" 26 | #include "rpc/RpcChannel.h" 27 | 28 | namespace Hdfs { 29 | namespace Mock { 30 | 31 | class MockRpcChannel: public Hdfs::Internal::RpcChannel { 32 | public: 33 | MOCK_METHOD0(close, void()); 34 | MOCK_METHOD1(invoke, void(const Hdfs::Internal::RpcCall &)); 35 | MOCK_METHOD0(checkIdle, bool()); 36 | MOCK_METHOD0(waitForExit, void()); 37 | MOCK_METHOD0(addRef, void()); 38 | }; 39 | 40 | } 41 | } 42 | 43 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKRPCCHANNEL_H_ */ 44 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockRpcClient.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKRPCCLIENT_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKRPCCLIENT_H_ 24 | 25 | #include "gmock/gmock.h" 26 | #include "rpc/RpcClient.h" 27 | 28 | using namespace Hdfs::Internal; 29 | namespace Hdfs { 30 | namespace Mock { 31 | 32 | class MockRpcClient: public RpcClient { 33 | public: 34 | MOCK_METHOD0(isRunning, bool()); 35 | 36 | MOCK_METHOD4(getChannel, RpcChannel & (const RpcAuth &, 37 | const RpcProtocolInfo &, const RpcServerInfo &, 38 | const RpcConfig &)); 39 | 40 | MOCK_CONST_METHOD0(getClientId, std::string()); 41 | 42 | MOCK_METHOD0(getCallId, int32_t()); 43 | }; 44 | 45 | } 46 | } 47 | 48 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKRPCCLIENT_H_ */ 49 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockRpcRemoteCall.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKRPCREMOTECALL_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKRPCREMOTECALL_H_ 24 | 25 | #include "gmock/gmock.h" 26 | #include "rpc/RpcRemoteCall.h" 27 | 28 | using namespace Hdfs::Internal; 29 | namespace Hdfs { 30 | namespace Mock { 31 | 32 | class MockRpcRemoteCall: public RpcRemoteCall { 33 | public: 34 | MockRpcRemoteCall(const RpcCall & c, int32_t id, const std::string & clientId) : 35 | RpcRemoteCall(c, id, clientId) { 36 | } 37 | 38 | MOCK_METHOD2(serialize, void(const RpcProtocolInfo&, WriteBuffer &)); 39 | MOCK_METHOD1(cancel, void(exception_ptr )); 40 | 41 | }; 42 | 43 | } 44 | } 45 | 46 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKRPCREMOTECALL_H_ */ 47 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockSockCall.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKSOCKCALL_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKSOCKCALL_H_ 24 | 25 | #include "gmock/gmock.h" 26 | 27 | #include "MockSystem.h" 28 | 29 | namespace Hdfs { 30 | namespace Mock { 31 | 32 | class MockSockSysCall: public MockSockSysCallInterface { 33 | public: 34 | MOCK_METHOD4(recv , ssize_t (int sock, void * buffer, size_t size, int flag)); 35 | MOCK_METHOD4(send , ssize_t (int sock, const void * buffer, size_t size, 36 | int flag)); 37 | MOCK_METHOD3(recvmsg , ssize_t (int socket, struct msghdr *message, int flags)); 38 | MOCK_METHOD4(getaddrinfo , int (const char * __restrict host, 39 | const char * __restrict port, 40 | const struct addrinfo * __restrict hint, 41 | struct addrinfo ** __restrict addr)); 42 | MOCK_METHOD1(freeaddrinfo , void (struct addrinfo * addr)); 43 | MOCK_METHOD3(socket , int (int family, int type, int protocol)); 44 | MOCK_METHOD3(connect , int (int sock, const struct sockaddr * addr, 45 | socklen_t len)); 46 | MOCK_METHOD3(getpeername , int (int sock, struct sockaddr * __restrict peer, 47 | socklen_t * __restrict len)); 48 | MOCK_METHOD3(fcntl , int (int sock, int flag, int value)); 49 | MOCK_METHOD5(setsockopt , int (int sock, int level, int optname, const void *optval, 50 | socklen_t optlen)); 51 | MOCK_METHOD3(poll , int (struct pollfd * pfd, nfds_t size, int timeout)); 52 | MOCK_METHOD2(shutdown , int (int sock, int how)); 53 | MOCK_METHOD1(close , int (int sock)); 54 | }; 55 | 56 | } 57 | } 58 | 59 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKSOCKCALL_H_ */ 60 | -------------------------------------------------------------------------------- /libhdfs3/mock/MockSocket.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_MOCKSOCKET_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_MOCKSOCKET_H_ 24 | 25 | #include "gmock/gmock.h" 26 | 27 | #include "network/Socket.h" 28 | 29 | class MockSocket: public Hdfs::Internal::Socket { 30 | public: 31 | 32 | MOCK_METHOD2(read, int32_t(char * buffer, int32_t size)); 33 | 34 | MOCK_METHOD3(readFully, void(char * buffer, int32_t size, int timeout)); 35 | 36 | MOCK_METHOD2(write, int32_t(const char * buffer, int32_t size)); 37 | 38 | MOCK_METHOD3(writeFully, void(const char * buffer, int32_t size, int timeout)); 39 | 40 | MOCK_METHOD3(connect, void(const char * host, int port, int timeout)); 41 | 42 | MOCK_METHOD3(connect, void(const char * host, const char * port, int timeout)); 43 | 44 | MOCK_METHOD4(connect, void(struct addrinfo * paddr, const char * host, const char * port, 45 | int timeout)); 46 | 47 | MOCK_METHOD3(poll, bool(bool read, bool write, int timeout)); 48 | 49 | MOCK_METHOD1(setBlockMode, void(bool enable)); 50 | 51 | MOCK_METHOD1(setNoDelay, void(bool enable)); 52 | 53 | MOCK_METHOD1(setLingerTimeout, void(int timeout)); 54 | 55 | MOCK_METHOD0(disableSigPipe, void()); 56 | 57 | MOCK_METHOD0(close, void()); 58 | }; 59 | 60 | #endif /* _HDFS_LIBHDFS3_MOCK_MOCKSOCKET_H_ */ 61 | -------------------------------------------------------------------------------- /libhdfs3/mock/NamenodeStub.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_NAMENODESTUB_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_NAMENODESTUB_H_ 24 | 25 | #include "MockNamenode.h" 26 | 27 | #include 28 | 29 | using namespace Hdfs; 30 | using namespace Internal; 31 | 32 | namespace Hdfs { 33 | 34 | namespace Mock { 35 | 36 | class NamenodeStub { 37 | public: 38 | 39 | virtual ~NamenodeStub() { 40 | } 41 | 42 | virtual MockNamenode * getNamenode() = 0; 43 | 44 | }; 45 | 46 | } 47 | } 48 | 49 | #endif /* _HDFS_LIBHDFS3_MOCK_NAMENODESTUB_H_ */ 50 | -------------------------------------------------------------------------------- /libhdfs3/mock/PipelineStub.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_PIPELINESTUB_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_PIPELINESTUB_H_ 24 | 25 | #include "MockPipeline.h" 26 | 27 | #include 28 | 29 | using namespace Hdfs; 30 | using namespace Internal; 31 | 32 | namespace Hdfs { 33 | 34 | namespace Mock { 35 | 36 | class PipelineStub { 37 | public: 38 | 39 | virtual ~PipelineStub() { 40 | } 41 | 42 | virtual shared_ptr getPipeline() = 0; 43 | 44 | }; 45 | 46 | } 47 | } 48 | 49 | #endif /* _HDFS_LIBHDFS3_MOCK_PIPELINESTUB_H_ */ 50 | -------------------------------------------------------------------------------- /libhdfs3/mock/TestDatanodeStub.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_TESTDATANODESTUB_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_TESTDATANODESTUB_H_ 24 | 25 | #include "MockDatanode.h" 26 | 27 | namespace Hdfs { 28 | 29 | namespace Mock { 30 | 31 | class TestDatanodeStub { 32 | public: 33 | virtual ~TestDatanodeStub() { 34 | } 35 | 36 | virtual shared_ptr getDatanode() = 0; 37 | }; 38 | 39 | } 40 | } 41 | 42 | #endif /* _HDFS_LIBHDFS3_MOCK_TESTDATANODESTUB_H_ */ 43 | -------------------------------------------------------------------------------- /libhdfs3/mock/TestRpcChannelStub.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_MOCK_TESTRPCCHANNELSTUB_H_ 23 | #define _HDFS_LIBHDFS3_MOCK_TESTRPCCHANNELSTUB_H_ 24 | 25 | #include "rpc/RpcChannel.h" 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | class RpcClient; 31 | 32 | } 33 | 34 | namespace Mock { 35 | 36 | class TestRpcChannelStub { 37 | public: 38 | virtual ~TestRpcChannelStub() { 39 | } 40 | 41 | virtual Hdfs::Internal::RpcChannel* getChannel( 42 | Hdfs::Internal::RpcChannelKey key, 43 | Hdfs::Internal::RpcClient & c) = 0; 44 | 45 | }; 46 | 47 | } 48 | } 49 | 50 | #endif /* _HDFS_LIBHDFS3_MOCK_TESTRPCCHANNELSTUB_H_ */ 51 | -------------------------------------------------------------------------------- /libhdfs3/rpms/.gitignore: -------------------------------------------------------------------------------- 1 | BUILD 2 | BUILDROOT 3 | RPMS 4 | SOURCES 5 | SPECS 6 | SRPMS 7 | 8 | -------------------------------------------------------------------------------- /libhdfs3/src/.gitignore: -------------------------------------------------------------------------------- 1 | libhdfs3.pc 2 | 3 | -------------------------------------------------------------------------------- /libhdfs3/src/client/BlockLocation.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_ 24 | 25 | #include 26 | #include 27 | 28 | namespace Hdfs { 29 | 30 | class BlockLocation { 31 | public: 32 | bool isCorrupt() const { 33 | return corrupt; 34 | } 35 | 36 | void setCorrupt(bool corrupt) { 37 | this->corrupt = corrupt; 38 | } 39 | 40 | const std::vector & getHosts() const { 41 | return hosts; 42 | } 43 | 44 | void setHosts(const std::vector & hosts) { 45 | this->hosts = hosts; 46 | } 47 | 48 | int64_t getLength() const { 49 | return length; 50 | } 51 | 52 | void setLength(int64_t length) { 53 | this->length = length; 54 | } 55 | 56 | const std::vector & getNames() const { 57 | return names; 58 | } 59 | 60 | void setNames(const std::vector & names) { 61 | this->names = names; 62 | } 63 | 64 | int64_t getOffset() const { 65 | return offset; 66 | } 67 | 68 | void setOffset(int64_t offset) { 69 | this->offset = offset; 70 | } 71 | 72 | const std::vector & getTopologyPaths() const { 73 | return topologyPaths; 74 | } 75 | 76 | void setTopologyPaths(const std::vector & topologyPaths) { 77 | this->topologyPaths = topologyPaths; 78 | } 79 | 80 | private: 81 | bool corrupt; 82 | int64_t length; 83 | int64_t offset; // Offset of the block in the file 84 | std::vector hosts; // Datanode hostnames 85 | std::vector names; // Datanode IP:xferPort for accessing the block 86 | std::vector topologyPaths; // Full path name in network topology 87 | }; 88 | 89 | } 90 | 91 | #endif /* _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_ */ 92 | -------------------------------------------------------------------------------- /libhdfs3/src/client/BlockReader.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_ 24 | 25 | #include 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | class BlockReader { 31 | public: 32 | virtual ~BlockReader() { 33 | } 34 | 35 | /** 36 | * Get how many bytes can be read without blocking. 37 | * @return The number of bytes can be read without blocking. 38 | */ 39 | virtual int64_t available() = 0; 40 | 41 | /** 42 | * To read data from block. 43 | * @param buf the buffer used to filled. 44 | * @param size the number of bytes to be read. 45 | * @return return the number of bytes filled in the buffer, 46 | * it may less than size. Return 0 if reach the end of block. 47 | */ 48 | virtual int32_t read(char * buf, int32_t size) = 0; 49 | 50 | /** 51 | * Move the cursor forward len bytes. 52 | * @param len The number of bytes to skip. 53 | */ 54 | virtual void skip(int64_t len) = 0; 55 | }; 56 | 57 | } 58 | } 59 | 60 | #endif /* _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_ */ 61 | -------------------------------------------------------------------------------- /libhdfs3/src/client/DataReader.h: -------------------------------------------------------------------------------- 1 | #ifndef _HDFS_LIBHDFS3_SERVER_DATAREADER_H_ 2 | #define _HDFS_LIBHDFS3_SERVER_DATAREADER_H_ 3 | 4 | #include 5 | #include 6 | namespace Hdfs { 7 | namespace Internal { 8 | 9 | /** 10 | * Helps read data responses from the server 11 | */ 12 | class DataReader { 13 | public: 14 | DataReader(DataTransferProtocol *sender, 15 | shared_ptr reader, int readTimeout); 16 | std::vector& readResponse(const char* text, int &outsize); 17 | std::vector& readPacketHeader(const char* text, int size, int &outsize); 18 | std::string& getRest() { 19 | return rest; 20 | } 21 | 22 | void setRest(const char* data, int size); 23 | void reduceRest(int size); 24 | void getMissing(int size); 25 | 26 | private: 27 | std::string raw; 28 | std::string decrypted; 29 | std::string rest; 30 | std::vector buf; 31 | DataTransferProtocol *sender; 32 | shared_ptr reader; 33 | int readTimeout; 34 | }; 35 | 36 | } 37 | } 38 | 39 | #endif /* _HDFS_LIBHDFS3_SERVER_DATAREADER_H_ */ -------------------------------------------------------------------------------- /libhdfs3/src/client/DirectoryIterator.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHFDS3_CLIENT_DIRECTORY_ITERATOR_H_ 23 | #define _HDFS_LIBHFDS3_CLIENT_DIRECTORY_ITERATOR_H_ 24 | 25 | #include "FileStatus.h" 26 | #include 27 | 28 | namespace Hdfs { 29 | namespace Internal { 30 | class FileSystemImpl; 31 | } 32 | 33 | class DirectoryIterator { 34 | public: 35 | DirectoryIterator(); 36 | DirectoryIterator(Hdfs::Internal::FileSystemImpl * const fs, 37 | std::string path, bool needLocations); 38 | DirectoryIterator(const DirectoryIterator & it); 39 | DirectoryIterator & operator = (const DirectoryIterator & it); 40 | bool hasNext(); 41 | FileStatus getNext(); 42 | 43 | private: 44 | bool getListing(); 45 | 46 | private: 47 | bool needLocations; 48 | Hdfs::Internal::FileSystemImpl * filesystem; 49 | size_t next; 50 | std::string path; 51 | std::string startAfter; 52 | std::vector lists; 53 | }; 54 | 55 | } 56 | 57 | #endif /* _HDFS_LIBHFDS3_CLIENT_DIRECTORY_ITERATOR_H_ */ 58 | -------------------------------------------------------------------------------- /libhdfs3/src/client/EncryptionZoneInfo.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | #ifndef _HDFS_LIBHDFS3_CLIENT_ENCRYPTIONZONEINFO_H_ 19 | #define _HDFS_LIBHDFS3_CLIENT_ENCRYPTIONZONEINFO_H_ 20 | 21 | #include 22 | 23 | namespace Hdfs { 24 | 25 | class EncryptionZoneInfo { 26 | public: 27 | EncryptionZoneInfo() : 28 | suite(0), cryptoProtocolVersion(0), id(0) { 29 | } 30 | 31 | int getSuite() const { 32 | return suite; 33 | } 34 | 35 | void setSuite(int suite) { 36 | this->suite = suite; 37 | } 38 | 39 | int getCryptoProtocolVersion() const { 40 | return cryptoProtocolVersion; 41 | } 42 | 43 | void setCryptoProtocolVersion(int cryptoProtocolVersion) { 44 | this->cryptoProtocolVersion = cryptoProtocolVersion; 45 | } 46 | 47 | int getId() const { 48 | return id; 49 | } 50 | 51 | void setId(int id) { 52 | this->id = id; 53 | } 54 | 55 | const char * getPath() const{ 56 | return path.c_str(); 57 | } 58 | 59 | void setPath(const char * path){ 60 | this->path = path; 61 | } 62 | 63 | const char * getKeyName() const{ 64 | return keyName.c_str(); 65 | } 66 | 67 | void setKeyName(const char * keyName){ 68 | this->keyName = keyName; 69 | } 70 | 71 | private: 72 | int suite; 73 | int cryptoProtocolVersion; 74 | int64_t id; 75 | std::string path; 76 | std::string keyName; 77 | }; 78 | 79 | } 80 | #endif 81 | -------------------------------------------------------------------------------- /libhdfs3/src/client/EncryptionZoneIterator.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "EncryptionZoneIterator.h" 23 | #include "Exception.h" 24 | #include "ExceptionInternal.h" 25 | #include "EncryptionZoneInfo.h" 26 | #include "FileSystemImpl.h" 27 | 28 | namespace Hdfs { 29 | EncryptionZoneIterator::EncryptionZoneIterator() :filesystem(NULL), id(0), next(0) { 30 | } 31 | 32 | EncryptionZoneIterator::EncryptionZoneIterator(Hdfs::Internal::FileSystemImpl * const fs, 33 | const int64_t id) :filesystem(fs), id(id), next(0) { 34 | } 35 | 36 | EncryptionZoneIterator::EncryptionZoneIterator(const EncryptionZoneIterator & it) : 37 | filesystem(it.filesystem), id(it.id), next(it.next), lists(it.lists) { 38 | } 39 | 40 | EncryptionZoneIterator & EncryptionZoneIterator::operator =(const EncryptionZoneIterator & it) { 41 | if (this == &it) { 42 | return *this; 43 | } 44 | 45 | filesystem = it.filesystem; 46 | id = it.id; 47 | next = it.next; 48 | lists = it.lists; 49 | return *this; 50 | } 51 | 52 | bool EncryptionZoneIterator::listEncryptionZones() { 53 | bool more; 54 | 55 | if (NULL == filesystem) { 56 | return false; 57 | } 58 | 59 | next = 0; 60 | lists.clear(); 61 | more = filesystem->listEncryptionZones(id, lists); 62 | if (!lists.empty()){ 63 | id = lists.back().getId(); 64 | } 65 | 66 | return more || !lists.empty(); 67 | } 68 | 69 | bool EncryptionZoneIterator::hasNext() { 70 | if (next >= lists.size()) { 71 | return listEncryptionZones(); 72 | } 73 | 74 | return true; 75 | } 76 | 77 | Hdfs::EncryptionZoneInfo EncryptionZoneIterator::getNext() { 78 | if (next >= lists.size()) { 79 | if (!listEncryptionZones()) { 80 | THROW(HdfsIOException, "End of the dir flow"); 81 | } 82 | } 83 | return lists[next++]; 84 | } 85 | 86 | } 87 | -------------------------------------------------------------------------------- /libhdfs3/src/client/EncryptionZoneIterator.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHFDS3_CLIENT_ENCRYPTIONZONE_ITERATOR_H_ 23 | #define _HDFS_LIBHFDS3_CLIENT_ENCRYPTIONZONE_ITERATOR_H_ 24 | 25 | #include "FileStatus.h" 26 | #include "EncryptionZoneInfo.h" 27 | #include 28 | 29 | namespace Hdfs { 30 | namespace Internal { 31 | class FileSystemImpl; 32 | } 33 | 34 | class EncryptionZoneIterator { 35 | public: 36 | EncryptionZoneIterator(); 37 | EncryptionZoneIterator(Hdfs::Internal::FileSystemImpl * const fs, 38 | const int64_t id); 39 | EncryptionZoneIterator(const EncryptionZoneIterator & it); 40 | EncryptionZoneIterator & operator = (const EncryptionZoneIterator & it); 41 | bool hasNext(); 42 | EncryptionZoneInfo getNext(); 43 | 44 | private: 45 | bool listEncryptionZones(); 46 | 47 | private: 48 | Hdfs::Internal::FileSystemImpl * filesystem; 49 | int64_t id; 50 | size_t next; 51 | std::vector lists; 52 | }; 53 | 54 | } 55 | 56 | #endif /* _HDFS_LIBHFDS3_CLIENT_ENCRYPTIONZONE_ITERATOR_H_ */ 57 | -------------------------------------------------------------------------------- /libhdfs3/src/client/FileEncryptionInfo.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_FILEENCRYPTIONINFO_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_FILEENCRYPTIONINFO_H_ 24 | 25 | #include 26 | 27 | namespace Hdfs { 28 | 29 | class FileEncryptionInfo { 30 | public: 31 | FileEncryptionInfo() : 32 | suite(0), cryptoProtocolVersion(0) { 33 | } 34 | 35 | int getSuite() const { 36 | return suite; 37 | } 38 | 39 | void setSuite(int suite) { 40 | this->suite = suite; 41 | } 42 | 43 | int getCryptoProtocolVersion() const { 44 | return cryptoProtocolVersion; 45 | } 46 | 47 | void setCryptoProtocolVersion(int cryptoProtocolVersion) { 48 | this->cryptoProtocolVersion = cryptoProtocolVersion; 49 | } 50 | 51 | const std::string & getKey() const{ 52 | return key; 53 | } 54 | 55 | void setKey(const std::string & key){ 56 | this->key = key; 57 | } 58 | 59 | const std::string & getKeyName() const{ 60 | return keyName; 61 | } 62 | 63 | void setKeyName(const std::string & keyName){ 64 | this->keyName = keyName; 65 | } 66 | 67 | const std::string & getIv() const{ 68 | return iv; 69 | } 70 | 71 | void setIv(const std::string & iv){ 72 | this->iv = iv; 73 | } 74 | 75 | const std::string & getEzKeyVersionName() const{ 76 | return ezKeyVersionName; 77 | } 78 | 79 | void setEzKeyVersionName(const std::string & ezKeyVersionName){ 80 | this->ezKeyVersionName = ezKeyVersionName; 81 | } 82 | 83 | private: 84 | int suite; 85 | int cryptoProtocolVersion; 86 | std::string key; 87 | std::string iv; 88 | std::string keyName; 89 | std::string ezKeyVersionName; 90 | }; 91 | 92 | } 93 | #endif 94 | -------------------------------------------------------------------------------- /libhdfs3/src/client/FileSystemStats.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_FSSTATS_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_FSSTATS_H_ 24 | 25 | #include 26 | 27 | namespace Hdfs { 28 | 29 | /** 30 | * file system statistics 31 | */ 32 | class FileSystemStats { 33 | public: 34 | /** 35 | * To construct a FileSystemStats. 36 | */ 37 | FileSystemStats() : 38 | capacity(-1), used(-1), remaining(-1) { 39 | } 40 | 41 | /** 42 | * To construct a FileSystemStats with given values. 43 | * @param capacity the capacity of file system. 44 | * @param used the space which has been used. 45 | * @param remaining available space on file system. 46 | */ 47 | FileSystemStats(int64_t capacity, int64_t used, int64_t remaining) : 48 | capacity(capacity), used(used), remaining(remaining) { 49 | } 50 | 51 | /** 52 | * Return the capacity in bytes of the file system 53 | * @return capacity of file system. 54 | */ 55 | int64_t getCapacity() { 56 | return capacity; 57 | } 58 | 59 | /** 60 | * Return the number of bytes used on the file system 61 | * @return return used space. 62 | */ 63 | int64_t getUsed() { 64 | return used; 65 | } 66 | 67 | /** 68 | * Return the number of remaining bytes on the file system 69 | * @return return available space. 70 | */ 71 | int64_t getRemaining() { 72 | return remaining; 73 | } 74 | 75 | private: 76 | int64_t capacity; 77 | int64_t used; 78 | int64_t remaining; 79 | 80 | }; 81 | 82 | } 83 | #endif /* _HDFS_LIBHDFS3_CLIENT_FSSTATS_H_ */ 84 | -------------------------------------------------------------------------------- /libhdfs3/src/client/KerberosName.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_KERBEROSNAME_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_KERBEROSNAME_H_ 24 | 25 | #include 26 | #include 27 | 28 | #include "Hash.h" 29 | 30 | namespace Hdfs { 31 | namespace Internal { 32 | 33 | class KerberosName { 34 | public: 35 | KerberosName(); 36 | KerberosName(const std::string & principal); 37 | 38 | std::string getPrincipal() const { 39 | std::stringstream ss; 40 | ss.imbue(std::locale::classic()); 41 | ss << name; 42 | 43 | if (!host.empty()) { 44 | ss << "/" << host; 45 | } 46 | 47 | if (!realm.empty()) { 48 | ss << '@' << realm; 49 | } 50 | 51 | return ss.str(); 52 | } 53 | 54 | const std::string & getHost() const { 55 | return host; 56 | } 57 | 58 | void setHost(const std::string & host) { 59 | this->host = host; 60 | } 61 | 62 | const std::string & getName() const { 63 | return name; 64 | } 65 | 66 | void setName(const std::string & name) { 67 | this->name = name; 68 | } 69 | 70 | const std::string & getRealm() const { 71 | return realm; 72 | } 73 | 74 | void setRealm(const std::string & realm) { 75 | this->realm = realm; 76 | } 77 | 78 | size_t hash_value() const; 79 | 80 | bool operator ==(const KerberosName & other) const { 81 | return name == other.name && host == other.host && realm == other.realm; 82 | } 83 | 84 | private: 85 | void parse(const std::string & principal); 86 | 87 | private: 88 | std::string name; 89 | std::string host; 90 | std::string realm; 91 | }; 92 | 93 | } 94 | } 95 | 96 | HDFS_HASH_DEFINE(::Hdfs::Internal::KerberosName); 97 | 98 | #endif /* _HDFS_LIBHDFS3_CLIENT_KERBEROSNAME_H_ */ 99 | -------------------------------------------------------------------------------- /libhdfs3/src/client/LeaseRenewer.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_LEASE_RENEW_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_LEASE_RENEW_H_ 24 | 25 | #include 26 | 27 | #include "Atomic.h" 28 | #include "Memory.h" 29 | #include "Thread.h" 30 | 31 | namespace Hdfs { 32 | namespace Internal { 33 | 34 | class FileSystemInter; 35 | 36 | class LeaseRenewer { 37 | public: 38 | virtual ~LeaseRenewer() { 39 | } 40 | 41 | virtual void StartRenew(shared_ptr filesystem) = 0; 42 | virtual void StopRenew(shared_ptr filesystem) = 0; 43 | 44 | public: 45 | static LeaseRenewer & GetLeaseRenewer(); 46 | static void CreateSinglten(); 47 | 48 | private: 49 | static once_flag once; 50 | static shared_ptr renewer; 51 | }; 52 | 53 | class LeaseRenewerImpl: public LeaseRenewer { 54 | public: 55 | LeaseRenewerImpl(); 56 | ~LeaseRenewerImpl(); 57 | int getInterval() const; 58 | void setInterval(int interval); 59 | void StartRenew(shared_ptr filesystem); 60 | void StopRenew(shared_ptr filesystem); 61 | 62 | private: 63 | void renewer(); 64 | 65 | private: 66 | atomic stop; 67 | condition_variable cond; 68 | int interval; 69 | mutex mut; 70 | std::map > maps; 71 | thread worker; 72 | }; 73 | 74 | } 75 | } 76 | #endif /* _HDFS_LIBHDFS3_CLIENT_LEASE_RENEW_H_ */ 77 | -------------------------------------------------------------------------------- /libhdfs3/src/client/OutputStream.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "Atomic.h" 23 | #include "FileSystemImpl.h" 24 | #include "Memory.h" 25 | #include "OutputStream.h" 26 | #include "OutputStreamImpl.h" 27 | 28 | using namespace Hdfs::Internal; 29 | 30 | namespace Hdfs { 31 | 32 | OutputStream::OutputStream() { 33 | impl = new Internal::OutputStreamImpl; 34 | } 35 | 36 | OutputStream::~OutputStream() { 37 | delete impl; 38 | } 39 | 40 | void OutputStream::open(FileSystem & fs, const char * path, int flag, 41 | const Permission permission, bool createParent, int replication, 42 | int64_t blockSize) { 43 | if (!fs.impl) { 44 | THROW(HdfsIOException, "FileSystem: not connected."); 45 | } 46 | 47 | impl->open(fs.impl->filesystem, path, flag, permission, createParent, replication, 48 | blockSize); 49 | } 50 | 51 | /** 52 | * To append data to file. 53 | * @param buf the data used to append. 54 | * @param size the data size. 55 | */ 56 | void OutputStream::append(const char * buf, int64_t size) { 57 | impl->append(buf, size); 58 | } 59 | 60 | /** 61 | * Flush all data in buffer and waiting for ack. 62 | * Will block until get all acks. 63 | */ 64 | void OutputStream::flush() { 65 | impl->flush(); 66 | } 67 | 68 | /** 69 | * return the current file length. 70 | * @return current file length. 71 | */ 72 | int64_t OutputStream::tell() { 73 | return impl->tell(); 74 | } 75 | 76 | /** 77 | * the same as flush right now. 78 | */ 79 | void OutputStream::sync() { 80 | impl->sync(); 81 | } 82 | 83 | /** 84 | * close the stream. 85 | */ 86 | void OutputStream::close() { 87 | impl->close(); 88 | } 89 | 90 | } 91 | -------------------------------------------------------------------------------- /libhdfs3/src/client/PacketHeader.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_PACKETHEADER_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_PACKETHEADER_H_ 24 | 25 | #include "datatransfer.pb.h" 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | class PacketHeader { 31 | public: 32 | PacketHeader(); 33 | PacketHeader(int packetLen, int64_t offsetInBlock, int64_t seqno, 34 | bool lastPacketInBlock, int dataLen); 35 | bool isLastPacketInBlock(); 36 | bool sanityCheck(int64_t lastSeqNo); 37 | int getDataLen(); 38 | int getPacketLen(); 39 | int64_t getOffsetInBlock(); 40 | int64_t getSeqno(); 41 | void readFields(const char * buf, size_t size); 42 | /** 43 | * Write the header into the buffer. 44 | * This requires that PKT_HEADER_LEN bytes are available. 45 | */ 46 | void writeInBuffer(char * buf, size_t size); 47 | 48 | public: 49 | static int GetPkgHeaderSize(); 50 | static int CalcPkgHeaderSize(); 51 | 52 | private: 53 | static int PkgHeaderSize; 54 | private: 55 | int32_t packetLen; 56 | PacketHeaderProto proto; 57 | }; 58 | 59 | } 60 | } 61 | 62 | #endif /* _HDFS_LIBHDFS3_CLIENT_PACKETHEADER_H_ */ 63 | -------------------------------------------------------------------------------- /libhdfs3/src/client/PacketPool.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "Logger.h" 23 | #include "Packet.h" 24 | #include "PacketPool.h" 25 | 26 | namespace Hdfs { 27 | namespace Internal { 28 | 29 | PacketPool::PacketPool(int size) : 30 | maxSize(size) { 31 | } 32 | 33 | shared_ptr PacketPool::getPacket(int pktSize, int chunksPerPkt, 34 | int64_t offsetInBlock, int64_t seqno, int checksumSize) { 35 | if (packets.empty()) { 36 | return shared_ptr( 37 | new Packet(pktSize, chunksPerPkt, offsetInBlock, seqno, 38 | checksumSize)); 39 | } else { 40 | shared_ptr retval = packets.front(); 41 | packets.pop_front(); 42 | retval->reset(pktSize, chunksPerPkt, offsetInBlock, seqno, 43 | checksumSize); 44 | return retval; 45 | } 46 | } 47 | 48 | void PacketPool::relesePacket(shared_ptr packet) { 49 | if (static_cast(packets.size()) >= maxSize) { 50 | return; 51 | } 52 | 53 | packets.push_back(packet); 54 | } 55 | 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /libhdfs3/src/client/PacketPool.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_PACKETPOOL_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_PACKETPOOL_H_ 24 | #include "Memory.h" 25 | 26 | #include 27 | 28 | namespace Hdfs { 29 | namespace Internal { 30 | 31 | class Packet; 32 | 33 | /* 34 | * A simple packet pool implementation. 35 | * 36 | * Packet is created here if no packet is available. 37 | * And then add to Pipeline's packet queue to wait for the ack. 38 | * The Pipeline's packet queue size is not larger than the PacketPool's max size, 39 | * otherwise the write operation will be pending for the ack. 40 | * Once the ack is received, packet will reutrn back to the PacketPool to reuse. 41 | */ 42 | class PacketPool { 43 | public: 44 | PacketPool(int size); 45 | shared_ptr getPacket(int pktSize, int chunksPerPkt, 46 | int64_t offsetInBlock, int64_t seqno, int checksumSize); 47 | void relesePacket(shared_ptr packet); 48 | 49 | void setMaxSize(int size) { 50 | this->maxSize = size; 51 | } 52 | 53 | int getMaxSize() const { 54 | return maxSize; 55 | } 56 | 57 | private: 58 | int maxSize; 59 | std::deque > packets; 60 | }; 61 | 62 | } 63 | } 64 | 65 | #endif /* _HDFS_LIBHDFS3_CLIENT_PACKETPOOL_H_ */ 66 | -------------------------------------------------------------------------------- /libhdfs3/src/client/PeerCache.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_PEERCACHE_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_PEERCACHE_H_ 24 | 25 | #include 26 | #include 27 | 28 | #include "common/DateTime.h" 29 | #include "common/LruMap.h" 30 | #include "common/Memory.h" 31 | #include "common/SessionConfig.h" 32 | #include "network/Socket.h" 33 | #include "server/DatanodeInfo.h" 34 | 35 | namespace Hdfs { 36 | namespace Internal { 37 | 38 | class PeerCache { 39 | public: 40 | explicit PeerCache(const SessionConfig& conf); 41 | 42 | shared_ptr getConnection(const DatanodeInfo& datanode); 43 | 44 | void addConnection(shared_ptr peer, const DatanodeInfo& datanode); 45 | 46 | typedef std::pair, steady_clock::time_point> value_type; 47 | 48 | private: 49 | std::string buildKey(const DatanodeInfo& datanode); 50 | 51 | private: 52 | const int cacheSize; 53 | int64_t expireTimeInterval; // milliseconds 54 | static LruMap Map; 55 | }; 56 | } 57 | } 58 | 59 | #endif /* _HDFS_LIBHDFS3_CLIENT_PEERCACHE_H_ */ 60 | -------------------------------------------------------------------------------- /libhdfs3/src/client/Permission.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "Permission.h" 23 | 24 | #include "Exception.h" 25 | #include "ExceptionInternal.h" 26 | 27 | namespace Hdfs { 28 | 29 | Permission::Permission(uint16_t mode) { 30 | uint16_t fileEncryptionBit = (1 << 13); 31 | bool isFileEncryption = (((mode & fileEncryptionBit) != 0) ? true : false); 32 | 33 | if (!isFileEncryption && mode >> 14) { 34 | THROW(InvalidParameter, 35 | "Invalid parameter: cannot convert %u to \"Permission\"", 36 | static_cast(mode)); 37 | } 38 | 39 | userAction = (Action)((mode >> 6) & 7); 40 | groupAction = (Action)((mode >> 3) & 7); 41 | otherAction = (Action)(mode & 7); 42 | stickyBit = (((mode >> 9) & 1) == 1); 43 | } 44 | 45 | } 46 | -------------------------------------------------------------------------------- /libhdfs3/src/client/PipelineAck.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_PIPELINEACK_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_PIPELINEACK_H_ 24 | 25 | #include "datatransfer.pb.h" 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | class PipelineAck { 31 | public: 32 | PipelineAck() : 33 | invalid(true) { 34 | } 35 | 36 | PipelineAck(const char * buf, int size) : 37 | invalid(false) { 38 | readFrom(buf, size); 39 | } 40 | 41 | bool isInvalid() { 42 | return invalid; 43 | } 44 | 45 | int getNumOfReplies() { 46 | return proto.status_size(); 47 | } 48 | 49 | int64_t getSeqno() { 50 | return proto.seqno(); 51 | } 52 | 53 | Status getReply(int i) { 54 | return proto.status(i); 55 | } 56 | 57 | bool isSuccess() { 58 | int size = proto.status_size(); 59 | 60 | for (int i = 0; i < size; ++i) { 61 | if (Status::DT_PROTO_SUCCESS != proto.status(i)) { 62 | return false; 63 | } 64 | } 65 | 66 | return true; 67 | } 68 | 69 | void readFrom(const char * buf, int size) { 70 | invalid = !proto.ParseFromArray(buf, size); 71 | } 72 | 73 | void reset() { 74 | proto.Clear(); 75 | invalid = true; 76 | } 77 | 78 | private: 79 | PipelineAckProto proto; 80 | bool invalid; 81 | }; 82 | 83 | } 84 | } 85 | 86 | #endif /* _HDFS_LIBHDFS3_CLIENT_PIPELINEACK_H_ */ 87 | -------------------------------------------------------------------------------- /libhdfs3/src/client/Token.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_TOKEN_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_TOKEN_H_ 24 | 25 | #include 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | class Token { 31 | public: 32 | const std::string & getIdentifier() const { 33 | return identifier; 34 | } 35 | 36 | void setIdentifier(const std::string & identifier) { 37 | this->identifier = identifier; 38 | } 39 | 40 | const std::string & getKind() const { 41 | return kind; 42 | } 43 | 44 | void setKind(const std::string & kind) { 45 | this->kind = kind; 46 | } 47 | 48 | const std::string & getPassword() const { 49 | return password; 50 | } 51 | 52 | void setPassword(const std::string & password) { 53 | this->password = password; 54 | } 55 | 56 | const std::string & getService() const { 57 | return service; 58 | } 59 | 60 | void setService(const std::string & service) { 61 | this->service = service; 62 | } 63 | 64 | bool operator ==(const Token & other) const { 65 | return identifier == other.identifier && password == other.password 66 | && kind == other.kind && service == other.service; 67 | } 68 | 69 | std::string toString() const; 70 | 71 | Token & fromString(const std::string & str); 72 | 73 | size_t hash_value() const; 74 | 75 | private: 76 | std::string identifier; 77 | std::string password; 78 | std::string kind; 79 | std::string service; 80 | }; 81 | 82 | } 83 | } 84 | 85 | #endif /* _HDFS_LIBHDFS3_CLIENT_TOKEN_H_ */ 86 | -------------------------------------------------------------------------------- /libhdfs3/src/client/TokenInternal.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_CLIENT_TOKENINTERNAL_H_ 23 | #define _HDFS_LIBHDFS3_CLIENT_TOKENINTERNAL_H_ 24 | 25 | #include "Hash.h" 26 | #include "Token.h" 27 | 28 | HDFS_HASH_DEFINE(::Hdfs::Internal::Token); 29 | 30 | #endif /* _HDFS_LIBHDFS3_CLIENT_TOKENINTERNAL_H_ */ 31 | -------------------------------------------------------------------------------- /libhdfs3/src/client/UserInfo.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "UserInfo.h" 23 | 24 | #include 25 | #include 26 | #include 27 | 28 | #include 29 | 30 | #include "Exception.h" 31 | #include "ExceptionInternal.h" 32 | 33 | namespace Hdfs { 34 | namespace Internal { 35 | 36 | UserInfo UserInfo::LocalUser() { 37 | UserInfo retval; 38 | uid_t uid, euid; 39 | int bufsize; 40 | struct passwd pwd, epwd, *result = NULL; 41 | euid = geteuid(); 42 | uid = getuid(); 43 | 44 | if ((bufsize = sysconf(_SC_GETPW_R_SIZE_MAX)) == -1) { 45 | THROW(InvalidParameter, 46 | "Invalid input: \"sysconf\" function failed to get the configure with key \"_SC_GETPW_R_SIZE_MAX\"."); 47 | } 48 | 49 | std::vector buffer(bufsize); 50 | 51 | if (getpwuid_r(euid, &epwd, &buffer[0], bufsize, &result) != 0 || !result) { 52 | THROW(InvalidParameter, 53 | "Invalid input: effective user name cannot be found with UID %u.", 54 | euid); 55 | } 56 | 57 | retval.setEffectiveUser(epwd.pw_name); 58 | 59 | if (getpwuid_r(uid, &pwd, &buffer[0], bufsize, &result) != 0 || !result) { 60 | THROW(InvalidParameter, 61 | "Invalid input: real user name cannot be found with UID %u.", 62 | uid); 63 | } 64 | 65 | retval.setRealUser(pwd.pw_name); 66 | return retval; 67 | } 68 | 69 | size_t UserInfo::hash_value() const { 70 | size_t values[] = { StringHasher(realUser), krbUser.hash_value(), StringHasher(effectiveUser) }; 71 | return CombineHasher(values, sizeof(values) / sizeof(values[0])); 72 | } 73 | 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /libhdfs3/src/common/Atomic.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_ATOMIC_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_ATOMIC_H_ 24 | 25 | #include "platform.h" 26 | 27 | #if defined(NEED_BOOST) && defined(HAVE_BOOST_ATOMIC) 28 | 29 | #include 30 | 31 | namespace Hdfs { 32 | namespace Internal { 33 | 34 | using boost::atomic; 35 | 36 | } 37 | } 38 | 39 | #elif defined(HAVE_STD_ATOMIC) 40 | 41 | #include 42 | 43 | namespace Hdfs { 44 | namespace Internal { 45 | 46 | using std::atomic; 47 | 48 | } 49 | } 50 | #else 51 | #error "no atomic library is available" 52 | #endif 53 | 54 | #endif /* _HDFS_LIBHDFS3_COMMON_ATOMIC_H_ */ 55 | 56 | -------------------------------------------------------------------------------- /libhdfs3/src/common/BigEndian.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_ 24 | 25 | #include 26 | #include 27 | 28 | namespace Hdfs { 29 | namespace Internal { 30 | 31 | static inline int16_t ReadBigEndian16FromArray(const char * buffer) { 32 | int16_t retval; 33 | retval = ntohs(*reinterpret_cast(buffer)); 34 | return retval; 35 | } 36 | 37 | static inline int32_t ReadBigEndian32FromArray(const char * buffer) { 38 | int32_t retval; 39 | retval = ntohl(*reinterpret_cast(buffer)); 40 | return retval; 41 | } 42 | 43 | static inline char * WriteBigEndian16ToArray(int16_t value, char * buffer) { 44 | int16_t bigValue = htons(value); 45 | memcpy(buffer, reinterpret_cast(&bigValue), sizeof(int16_t)); 46 | return buffer + sizeof(int16_t); 47 | } 48 | 49 | static inline char * WriteBigEndian32ToArray(int32_t value, char * buffer) { 50 | int32_t bigValue = htonl(value); 51 | memcpy(buffer, reinterpret_cast(&bigValue), sizeof(int32_t)); 52 | return buffer + sizeof(int32_t); 53 | } 54 | 55 | } 56 | } 57 | 58 | #endif /* _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_ */ 59 | -------------------------------------------------------------------------------- /libhdfs3/src/common/Checksum.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_ 24 | 25 | #include 26 | 27 | #define CHECKSUM_TYPE_SIZE 1 28 | #define CHECKSUM_BYTES_PER_CHECKSUM_SIZE 4 29 | #define CHECKSUM_TYPE_CRC32C 2 30 | 31 | namespace Hdfs { 32 | namespace Internal { 33 | 34 | /** 35 | * An abstract base CRC class. 36 | */ 37 | class Checksum { 38 | public: 39 | /** 40 | * @return Returns the current checksum value. 41 | */ 42 | virtual uint32_t getValue() = 0; 43 | 44 | /** 45 | * Resets the checksum to its initial value. 46 | */ 47 | virtual void reset() = 0; 48 | 49 | /** 50 | * Updates the current checksum with the specified array of bytes. 51 | * @param b The buffer of data. 52 | * @param len The buffer length. 53 | */ 54 | virtual void update(const void * b, int len) = 0; 55 | 56 | /** 57 | * Destroy the instance. 58 | */ 59 | virtual ~Checksum() { 60 | } 61 | }; 62 | 63 | } 64 | } 65 | 66 | #endif /* _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_ */ 67 | -------------------------------------------------------------------------------- /libhdfs3/src/common/DateTime.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_DATETIME_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_DATETIME_H_ 24 | 25 | #include "platform.h" 26 | 27 | #include 28 | #include 29 | 30 | #if defined(NEED_BOOST) && defined(HAVE_BOOST_CHRONO) 31 | 32 | #include 33 | 34 | namespace Hdfs { 35 | namespace Internal { 36 | 37 | using namespace boost::chrono; 38 | 39 | } 40 | } 41 | 42 | #elif defined(HAVE_STD_CHRONO) 43 | 44 | #include 45 | 46 | namespace Hdfs { 47 | namespace Internal { 48 | 49 | using namespace std::chrono; 50 | 51 | #ifndef HAVE_STEADY_CLOCK 52 | typedef std::chrono::monotonic_clock steady_clock; 53 | #endif 54 | 55 | } 56 | } 57 | #else 58 | #error "no chrono library is available" 59 | #endif 60 | 61 | namespace Hdfs { 62 | namespace Internal { 63 | 64 | template 65 | static int64_t ToMilliSeconds(TimeStamp const & s, TimeStamp const & e) { 66 | assert(e >= s); 67 | return duration_cast(e - s).count(); 68 | } 69 | 70 | } 71 | } 72 | 73 | #endif /* _HDFS_LIBHDFS3_COMMON_DATETIME_H_ */ 74 | -------------------------------------------------------------------------------- /libhdfs3/src/common/FileWrapper.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_ 24 | 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | namespace Hdfs { 32 | namespace Internal { 33 | 34 | class FileWrapper { 35 | public: 36 | virtual ~FileWrapper() { 37 | } 38 | 39 | virtual bool open(int fd, bool delegate) = 0; 40 | virtual bool open(const std::string & path) = 0; 41 | virtual void close() = 0; 42 | virtual const char * read(std::vector & buffer, int32_t size) = 0; 43 | virtual void copy(char * buffer, int32_t size) = 0; 44 | virtual void seek(int64_t position) = 0; 45 | }; 46 | 47 | class CFileWrapper: public FileWrapper { 48 | public: 49 | CFileWrapper(); 50 | ~CFileWrapper(); 51 | bool open(int fd, bool delegate); 52 | bool open(const std::string & path); 53 | void close(); 54 | const char * read(std::vector & buffer, int32_t size); 55 | void copy(char * buffer, int32_t size); 56 | void seek(int64_t offset); 57 | 58 | private: 59 | FILE * file; 60 | std::string path; 61 | }; 62 | 63 | class MappedFileWrapper: public FileWrapper { 64 | public: 65 | MappedFileWrapper(); 66 | ~MappedFileWrapper(); 67 | bool open(int fd, bool delegate); 68 | bool open(const std::string & path); 69 | void close(); 70 | const char * read(std::vector & buffer, int32_t size); 71 | void copy(char * buffer, int32_t size); 72 | void seek(int64_t offset); 73 | 74 | private: 75 | bool openInternal(int fd, bool delegate, size_t size); 76 | 77 | private: 78 | bool delegate; 79 | const char * begin; 80 | const char * position; 81 | int fd; 82 | int64_t size; 83 | std::string path; 84 | }; 85 | 86 | } 87 | } 88 | 89 | #endif /* _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_ */ 90 | -------------------------------------------------------------------------------- /libhdfs3/src/common/Function.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_FUNCTION_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_FUNCTION_H_ 24 | 25 | #include "platform.h" 26 | 27 | #ifdef NEED_BOOST 28 | #include 29 | #include 30 | 31 | namespace Hdfs { 32 | 33 | using boost::function; 34 | using boost::bind; 35 | using boost::reference_wrapper; 36 | 37 | } 38 | 39 | #else 40 | 41 | #include 42 | 43 | namespace Hdfs { 44 | 45 | using std::function; 46 | using std::bind; 47 | using std::reference_wrapper; 48 | using namespace std::placeholders; 49 | 50 | } 51 | 52 | #endif 53 | 54 | #endif /* _HDFS_LIBHDFS3_COMMON_FUNCTION_H_ */ 55 | -------------------------------------------------------------------------------- /libhdfs3/src/common/HWCrc32c.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_HWCHECKSUM_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_HWCHECKSUM_H_ 24 | 25 | #include "Checksum.h" 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | /** 31 | * Calculate CRC with hardware support. 32 | */ 33 | class HWCrc32c: public Checksum { 34 | public: 35 | /** 36 | * Constructor. 37 | */ 38 | HWCrc32c() : 39 | crc(0xFFFFFFFF) { 40 | } 41 | 42 | uint32_t getValue() { 43 | return ~crc; 44 | } 45 | 46 | /** 47 | * @ref Checksum#reset() 48 | */ 49 | void reset() { 50 | crc = 0xFFFFFFFF; 51 | } 52 | 53 | /** 54 | * @ref Checksum#update(const void *, int) 55 | */ 56 | void update(const void * b, int len); 57 | 58 | /** 59 | * Destory an HWCrc32 instance. 60 | */ 61 | ~HWCrc32c() { 62 | } 63 | 64 | /** 65 | * To test if the hardware support this function. 66 | * @return true if the hardware support to calculate the CRC. 67 | */ 68 | static bool available(); 69 | 70 | private: 71 | void updateInt64(const char * b, int len); 72 | 73 | private: 74 | uint32_t crc; 75 | }; 76 | 77 | } 78 | } 79 | 80 | #endif /* _HDFS_LIBHDFS3_COMMON_HWCHECKSUM_H_ */ 81 | -------------------------------------------------------------------------------- /libhdfs3/src/common/Hash.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "Hash.h" 23 | 24 | #ifdef NEED_BOOST 25 | 26 | #include 27 | 28 | namespace Hdfs { 29 | namespace Internal { 30 | 31 | /** 32 | * A hash function object used to hash a boolean value. 33 | */ 34 | boost::hash BoolHasher; 35 | 36 | /** 37 | * A hash function object used to hash an int value. 38 | */ 39 | boost::hash Int32Hasher; 40 | 41 | /** 42 | * A hash function object used to hash an 64 bit int value. 43 | */ 44 | boost::hash Int64Hasher; 45 | 46 | /** 47 | * A hash function object used to hash a size_t value. 48 | */ 49 | boost::hash SizeHasher; 50 | 51 | /** 52 | * A hash function object used to hash a std::string object. 53 | */ 54 | boost::hash StringHasher; 55 | } 56 | } 57 | 58 | #else 59 | 60 | #include 61 | 62 | namespace Hdfs { 63 | namespace Internal { 64 | 65 | /** 66 | * A hash function object used to hash a boolean value. 67 | */ 68 | std::hash BoolHasher; 69 | 70 | /** 71 | * A hash function object used to hash an int value. 72 | */ 73 | std::hash Int32Hasher; 74 | 75 | /** 76 | * A hash function object used to hash an 64 bit int value. 77 | */ 78 | std::hash Int64Hasher; 79 | 80 | /** 81 | * A hash function object used to hash a size_t value. 82 | */ 83 | std::hash SizeHasher; 84 | 85 | /** 86 | * A hash function object used to hash a std::string object. 87 | */ 88 | std::hash StringHasher; 89 | 90 | } 91 | } 92 | 93 | #endif 94 | -------------------------------------------------------------------------------- /libhdfs3/src/common/Logger.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_LOGGER_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_LOGGER_H_ 24 | 25 | #define DEFAULT_LOG_LEVEL INFO 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | extern const char * SeverityName[7]; 31 | 32 | enum LogSeverity { 33 | FATAL, LOG_ERROR, WARNING, INFO, DEBUG1, DEBUG2, DEBUG3 34 | }; 35 | 36 | class Logger; 37 | 38 | class Logger { 39 | public: 40 | Logger(); 41 | 42 | ~Logger(); 43 | 44 | void setOutputFd(int f); 45 | 46 | void setLogSeverity(LogSeverity l); 47 | 48 | void printf(LogSeverity s, const char * fmt, ...) __attribute__((format(printf, 3, 4))); 49 | 50 | private: 51 | int fd; 52 | LogSeverity severity; 53 | }; 54 | 55 | extern Logger RootLogger; 56 | 57 | } 58 | } 59 | 60 | #define LOG(s, fmt, ...) \ 61 | Hdfs::Internal::RootLogger.printf(s, fmt, ##__VA_ARGS__) 62 | 63 | #endif /* _HDFS_LIBHDFS3_COMMON_LOGGER_H_ */ 64 | -------------------------------------------------------------------------------- /libhdfs3/src/common/Memory.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_MEMORY_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_MEMORY_H_ 24 | 25 | #include "platform.h" 26 | 27 | #ifdef NEED_BOOST 28 | 29 | #include 30 | 31 | namespace Hdfs { 32 | namespace Internal { 33 | 34 | using boost::shared_ptr; 35 | 36 | } 37 | } 38 | 39 | #else 40 | 41 | #include 42 | 43 | namespace Hdfs { 44 | namespace Internal { 45 | 46 | using std::shared_ptr; 47 | 48 | } 49 | } 50 | #endif 51 | 52 | #endif /* _HDFS_LIBHDFS3_COMMON_MEMORY_H_ */ 53 | -------------------------------------------------------------------------------- /libhdfs3/src/common/SWCrc32c.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_SWCRC32C_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_SWCRC32C_H_ 24 | 25 | #include "platform.h" 26 | 27 | #include "Checksum.h" 28 | 29 | #ifdef NEED_BOOST 30 | 31 | #include 32 | 33 | namespace Hdfs { 34 | namespace Internal { 35 | 36 | typedef boost::crc_optimal<32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF, true, true> crc_32c_type; 37 | 38 | class SWCrc32c: public Checksum { 39 | public: 40 | SWCrc32c() { 41 | } 42 | 43 | uint32_t getValue() { 44 | return crc.checksum(); 45 | } 46 | 47 | void reset() { 48 | crc.reset(); 49 | } 50 | 51 | void update(const void * b, int len) { 52 | crc.process_bytes(b, len); 53 | } 54 | 55 | ~SWCrc32c() { 56 | } 57 | 58 | private: 59 | crc_32c_type crc; 60 | }; 61 | 62 | } 63 | } 64 | 65 | #else 66 | namespace Hdfs { 67 | namespace Internal { 68 | 69 | class SWCrc32c: public Checksum { 70 | public: 71 | SWCrc32c() : 72 | crc(0xFFFFFFFF) { 73 | } 74 | 75 | uint32_t getValue() { 76 | return ~crc; 77 | } 78 | 79 | void reset() { 80 | crc = 0xFFFFFFFF; 81 | } 82 | 83 | void update(const void * b, int len); 84 | 85 | ~SWCrc32c() { 86 | } 87 | 88 | private: 89 | uint32_t crc; 90 | }; 91 | 92 | } 93 | } 94 | #endif 95 | 96 | #endif /* _HDFS_LIBHDFS3_COMMON_SWCRC32C_H_ */ 97 | -------------------------------------------------------------------------------- /libhdfs3/src/common/StackPrinter.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_ 24 | 25 | #include "platform.h" 26 | 27 | #include 28 | 29 | #ifndef DEFAULT_STACK_PREFIX 30 | #define DEFAULT_STACK_PREFIX "\t@\t" 31 | #endif 32 | 33 | namespace Hdfs { 34 | namespace Internal { 35 | 36 | extern const std::string PrintStack(int skip, int maxDepth); 37 | 38 | } 39 | } 40 | 41 | #endif /* _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_ */ 42 | -------------------------------------------------------------------------------- /libhdfs3/src/common/Thread.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | 23 | #include 24 | 25 | #include "Thread.h" 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | sigset_t ThreadBlockSignal() { 31 | sigset_t sigs; 32 | sigset_t oldMask; 33 | sigemptyset(&sigs); 34 | sigaddset(&sigs, SIGHUP); 35 | sigaddset(&sigs, SIGINT); 36 | sigaddset(&sigs, SIGTERM); 37 | sigaddset(&sigs, SIGUSR1); 38 | sigaddset(&sigs, SIGUSR2); 39 | pthread_sigmask(SIG_BLOCK, &sigs, &oldMask); 40 | return oldMask; 41 | } 42 | 43 | void ThreadUnBlockSignal(sigset_t sigs) { 44 | pthread_sigmask(SIG_SETMASK, &sigs, 0); 45 | } 46 | 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /libhdfs3/src/common/Unordered.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_COMMON_UNORDEREDMAP_H_ 23 | #define _HDFS_LIBHDFS3_COMMON_UNORDEREDMAP_H_ 24 | 25 | #include "platform.h" 26 | 27 | #ifdef NEED_BOOST 28 | 29 | #include 30 | #include 31 | 32 | namespace Hdfs { 33 | namespace Internal { 34 | 35 | using boost::unordered_map; 36 | using boost::unordered_set; 37 | 38 | } 39 | } 40 | 41 | #else 42 | 43 | #include 44 | #include 45 | 46 | namespace Hdfs { 47 | namespace Internal { 48 | 49 | using std::unordered_map; 50 | using std::unordered_set; 51 | 52 | } 53 | } 54 | #endif 55 | 56 | #endif /* _HDFS_LIBHDFS3_COMMON_UNORDEREDMAP_H_ */ 57 | -------------------------------------------------------------------------------- /libhdfs3/src/common/WritableUtils.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS_3_UTIL_WritableUtils_H_ 23 | #define _HDFS_LIBHDFS_3_UTIL_WritableUtils_H_ 24 | 25 | #include 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | class WritableUtils { 31 | public: 32 | WritableUtils(char * b, size_t l); 33 | 34 | int32_t ReadInt32(); 35 | 36 | int64_t ReadInt64(); 37 | 38 | void ReadRaw(char * buf, size_t size); 39 | 40 | std::string ReadText(); 41 | 42 | int readByte(); 43 | 44 | size_t WriteInt32(int32_t value); 45 | 46 | size_t WriteInt64(int64_t value); 47 | 48 | size_t WriteRaw(const char * buf, size_t size); 49 | 50 | size_t WriteText(const std::string & str); 51 | 52 | private: 53 | int decodeWritableUtilsSize(int value); 54 | 55 | void writeByte(int val); 56 | 57 | bool isNegativeWritableUtils(int value); 58 | 59 | int32_t ReadBigEndian32(); 60 | 61 | private: 62 | char * buffer; 63 | size_t len; 64 | size_t current; 65 | }; 66 | 67 | } 68 | } 69 | #endif /* _HDFS_LIBHDFS_3_UTIL_WritableUtils_H_ */ 70 | -------------------------------------------------------------------------------- /libhdfs3/src/common/WriteBuffer.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "WriteBuffer.h" 23 | 24 | #include 25 | 26 | using namespace google::protobuf::io; 27 | using google::protobuf::uint8; 28 | 29 | namespace Hdfs { 30 | namespace Internal { 31 | 32 | #define WRITEBUFFER_INIT_SIZE 64 33 | 34 | WriteBuffer::WriteBuffer() : 35 | size(0), buffer(WRITEBUFFER_INIT_SIZE) { 36 | } 37 | 38 | WriteBuffer::~WriteBuffer() { 39 | } 40 | 41 | void WriteBuffer::writeVarint32(int32_t value, size_t pos) { 42 | char buffer[5]; 43 | uint8 * end = CodedOutputStream::WriteVarint32ToArray(value, 44 | reinterpret_cast(buffer)); 45 | write(buffer, reinterpret_cast(end) - buffer, pos); 46 | } 47 | 48 | char * WriteBuffer::alloc(size_t offset, size_t s) { 49 | assert(offset <= size && size <= buffer.size()); 50 | 51 | if (offset > size) { 52 | return NULL; 53 | } 54 | 55 | size_t target = offset + s; 56 | 57 | if (target >= buffer.size()) { 58 | target = target > 2 * buffer.size() ? target : 2 * buffer.size(); 59 | buffer.resize(target); 60 | } 61 | 62 | size = offset + s; 63 | return &buffer[offset]; 64 | } 65 | 66 | void WriteBuffer::write(const void * bytes, size_t s, size_t pos) { 67 | assert(NULL != bytes); 68 | assert(pos <= size && pos < buffer.size()); 69 | char * p = alloc(size, s); 70 | memcpy(p, bytes, s); 71 | } 72 | 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /libhdfs3/src/libhdfs3.pc.in: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | 18 | prefix=/usr 19 | exec_prefix=${prefix} 20 | libdir=${prefix}/lib 21 | includedir=${prefix}/include 22 | 23 | Name: libhdfs 24 | Description: Native C/C++ HDFS client 25 | Version: @libhdfs3_VERSION_STRING@ 26 | Libs: -L${libdir} -lhdfs3 27 | Cflags: -I${includedir} 28 | -------------------------------------------------------------------------------- /libhdfs3/src/network/Syscall.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_NETWORK_SYSCALL_H_ 23 | #define _HDFS_LIBHDFS3_NETWORK_SYSCALL_H_ 24 | 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | namespace System { 32 | 33 | using ::recv; 34 | using ::send; 35 | using ::getaddrinfo; 36 | using ::freeaddrinfo; 37 | using ::socket; 38 | using ::connect; 39 | using ::getpeername; 40 | using ::fcntl; 41 | using ::setsockopt; 42 | using ::poll; 43 | using ::shutdown; 44 | using ::close; 45 | using ::recvmsg; 46 | 47 | } 48 | 49 | #ifdef MOCK 50 | 51 | #include "MockSystem.h" 52 | namespace HdfsSystem = MockSystem; 53 | 54 | #else 55 | 56 | namespace HdfsSystem = System; 57 | 58 | #endif 59 | 60 | #endif /* _HDFS_LIBHDFS3_NETWORK_SYSCALL_H_ */ 61 | -------------------------------------------------------------------------------- /libhdfs3/src/platform.h.in: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, 13 | * software distributed under the License is distributed on an 14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | * KIND, either express or implied. See the License for the 16 | * specific language governing permissions and limitations 17 | * under the License. 18 | */ 19 | 20 | #define THREAD_LOCAL __thread 21 | #define ATTRIBUTE_NORETURN __attribute__ ((noreturn)) 22 | #define ATTRIBUTE_NOINLINE __attribute__ ((noinline)) 23 | 24 | #define GCC_VERSION (__GNUC__ * 10000 \ 25 | + __GNUC_MINOR__ * 100 \ 26 | + __GNUC_PATCHLEVEL__) 27 | 28 | #cmakedefine LIBUNWIND_FOUND 29 | #cmakedefine HAVE_DLADDR 30 | #cmakedefine OS_LINUX 31 | #cmakedefine OS_MACOSX 32 | #cmakedefine ENABLE_FRAME_POINTER 33 | #cmakedefine HAVE_SYMBOLIZE 34 | #cmakedefine NEED_BOOST 35 | #cmakedefine STRERROR_R_RETURN_INT 36 | #cmakedefine HAVE_STEADY_CLOCK 37 | #cmakedefine HAVE_NESTED_EXCEPTION 38 | #cmakedefine HAVE_BOOST_CHRONO 39 | #cmakedefine HAVE_STD_CHRONO 40 | #cmakedefine HAVE_BOOST_ATOMIC 41 | #cmakedefine HAVE_STD_ATOMIC 42 | 43 | // defined by gcc 44 | #if defined(__ELF__) && defined(OS_LINUX) 45 | # define HAVE_SYMBOLIZE 46 | #elif defined(OS_MACOSX) && defined(HAVE_DLADDR) 47 | // Use dladdr to symbolize. 48 | # define HAVE_SYMBOLIZE 49 | #endif 50 | 51 | #define STACK_LENGTH 64 52 | -------------------------------------------------------------------------------- /libhdfs3/src/proto/IpcConnectionContext.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.ipc.protobuf"; 26 | option java_outer_classname = "IpcConnectionContextProtos"; 27 | option java_generate_equals_and_hash = true; 28 | 29 | package Hdfs.Internal; 30 | 31 | 32 | /** 33 | * Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext 34 | */ 35 | message UserInformationProto { 36 | optional string effectiveUser = 1; 37 | optional string realUser = 2; 38 | } 39 | 40 | /** 41 | * The connection context is sent as part of the connection establishment. 42 | * It establishes the context for ALL Rpc calls within the connection. 43 | */ 44 | message IpcConnectionContextProto { 45 | // UserInfo beyond what is determined as part of security handshake 46 | // at connection time (kerberos, tokens etc). 47 | optional UserInformationProto userInfo = 2; 48 | 49 | // Protocol name for next rpc layer. 50 | // The client created a proxy with this protocol name 51 | optional string protocol = 3; 52 | } 53 | -------------------------------------------------------------------------------- /libhdfs3/src/proto/Security.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | option java_package = "org.apache.hadoop.security.proto"; 26 | option java_outer_classname = "SecurityProtos"; 27 | option java_generic_services = true; 28 | option java_generate_equals_and_hash = true; 29 | package Hdfs.Internal; 30 | 31 | /** 32 | * Security token identifier 33 | */ 34 | message TokenProto { 35 | required bytes identifier = 1; 36 | required bytes password = 2; 37 | required string kind = 3; 38 | required string service = 4; 39 | } 40 | 41 | message GetDelegationTokenRequestProto { 42 | required string renewer = 1; 43 | } 44 | 45 | message GetDelegationTokenResponseProto { 46 | optional TokenProto token = 1; 47 | } 48 | 49 | message RenewDelegationTokenRequestProto { 50 | required TokenProto token = 1; 51 | } 52 | 53 | message RenewDelegationTokenResponseProto { 54 | required uint64 newExpiryTime = 1; 55 | } 56 | 57 | message CancelDelegationTokenRequestProto { 58 | required TokenProto token = 1; 59 | } 60 | 61 | message CancelDelegationTokenResponseProto { // void response 62 | } 63 | 64 | -------------------------------------------------------------------------------- /libhdfs3/src/proto/encryption.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /** 20 | * These .proto interfaces are private and stable. 21 | * Please see http://wiki.apache.org/hadoop/Compatibility 22 | * for what changes are allowed for a *stable* .proto interface. 23 | */ 24 | 25 | /** This file contains protocol buffers that are used throughout HDFS -- i.e. 26 | * by the client, server, and data transfer protocols. 27 | */ 28 | 29 | option java_package = "org.apache.hadoop.hdfs.protocol.proto"; 30 | option java_outer_classname = "EncryptionZonesProtos"; 31 | option java_generate_equals_and_hash = true; 32 | package Hdfs.Internal; 33 | 34 | import "hdfs.proto"; 35 | 36 | message CreateEncryptionZoneRequestProto { 37 | required string src = 1; 38 | optional string keyName = 2; 39 | } 40 | 41 | message CreateEncryptionZoneResponseProto { 42 | } 43 | 44 | message ListEncryptionZonesRequestProto { 45 | required int64 id = 1; 46 | } 47 | 48 | message EncryptionZoneProto { 49 | required int64 id = 1; 50 | required string path = 2; 51 | required CipherSuiteProto suite = 3; 52 | required CryptoProtocolVersionProto cryptoProtocolVersion = 4; 53 | required string keyName = 5; 54 | } 55 | 56 | message ListEncryptionZonesResponseProto { 57 | repeated EncryptionZoneProto zones = 1; 58 | required bool hasMore = 2; 59 | } 60 | 61 | message GetEZForPathRequestProto { 62 | required string src = 1; 63 | } 64 | 65 | message GetEZForPathResponseProto { 66 | optional EncryptionZoneProto zone = 1; 67 | } 68 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcAuth.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "RpcAuth.h" 23 | 24 | #include "Exception.h" 25 | #include "ExceptionInternal.h" 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | AuthMethod RpcAuth::ParseMethod(const std::string & str) { 31 | if (0 == strcasecmp(str.c_str(), "SIMPLE")) { 32 | return AuthMethod::SIMPLE; 33 | } else if (0 == strcasecmp(str.c_str(), "KERBEROS")) { 34 | return AuthMethod::KERBEROS; 35 | } else if (0 == strcasecmp(str.c_str(), "TOKEN")) { 36 | return AuthMethod::TOKEN; 37 | } else { 38 | THROW(InvalidParameter, "RpcAuth: Unknown auth mechanism type: %s", 39 | str.c_str()); 40 | } 41 | } 42 | 43 | size_t RpcAuth::hash_value() const { 44 | size_t values[] = { Int32Hasher(method), user.hash_value() }; 45 | return CombineHasher(values, sizeof(values) / sizeof(values[0])); 46 | } 47 | 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcCall.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_RPC_RPCCALL_H_ 23 | #define _HDFS_LIBHDFS3_RPC_RPCCALL_H_ 24 | 25 | #include "google/protobuf/message.h" 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | class RpcCall { 31 | public: 32 | RpcCall(bool idemp, std::string n, google::protobuf::Message * req, 33 | google::protobuf::Message * resp) : 34 | idempotent(idemp), name(n), request(req), response(resp) { 35 | } 36 | 37 | bool isIdempotent() const { 38 | return idempotent; 39 | } 40 | 41 | const char * getName() const { 42 | return name.c_str(); 43 | } 44 | 45 | void setIdempotent(bool idempotent) { 46 | this->idempotent = idempotent; 47 | } 48 | 49 | void setName(const std::string & name) { 50 | this->name = name; 51 | } 52 | 53 | google::protobuf::Message * getRequest() { 54 | return request; 55 | } 56 | 57 | void setRequest(google::protobuf::Message * request) { 58 | this->request = request; 59 | } 60 | 61 | google::protobuf::Message * getResponse() { 62 | return response; 63 | } 64 | 65 | void setResponse(google::protobuf::Message * response) { 66 | this->response = response; 67 | } 68 | 69 | private: 70 | bool idempotent; 71 | std::string name; 72 | google::protobuf::Message * request; 73 | google::protobuf::Message * response; 74 | }; 75 | 76 | } 77 | } 78 | #endif /* _HDFS_LIBHDFS3_RPC_RPCCALL_H_ */ 79 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcChannelKey.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "RpcChannelKey.h" 23 | 24 | #include 25 | 26 | namespace Hdfs { 27 | namespace Internal { 28 | 29 | RpcChannelKey::RpcChannelKey(const RpcAuth & a, const RpcProtocolInfo & p, 30 | const RpcServerInfo & s, const RpcConfig & c) : 31 | auth(a), conf(c), protocol(p), server(s) { 32 | const Token * temp = auth.getUser().selectToken(protocol.getTokenKind(), 33 | server.getTokenService()); 34 | 35 | if (temp) { 36 | token = shared_ptr (new Token(*temp)); 37 | } 38 | } 39 | 40 | size_t RpcChannelKey::hash_value() const { 41 | size_t tokenHash = token ? token->hash_value() : 0; 42 | size_t values[] = { auth.hash_value(), protocol.hash_value(), 43 | server.hash_value(), conf.hash_value(), tokenHash 44 | }; 45 | return CombineHasher(values, sizeof(values) / sizeof(values[0])); 46 | } 47 | 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcChannelKey.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_RPC_RPCCHANNELKEY_H_ 23 | #define _HDFS_LIBHDFS3_RPC_RPCCHANNELKEY_H_ 24 | 25 | #include "client/Token.h" 26 | #include "Hash.h" 27 | #include "RpcAuth.h" 28 | #include "RpcConfig.h" 29 | #include "RpcProtocolInfo.h" 30 | #include "RpcServerInfo.h" 31 | #include 32 | 33 | namespace Hdfs { 34 | namespace Internal { 35 | 36 | class RpcChannelKey { 37 | public: 38 | RpcChannelKey(const RpcAuth & a, const RpcProtocolInfo & p, 39 | const RpcServerInfo & s, const RpcConfig & c); 40 | 41 | public: 42 | size_t hash_value() const; 43 | 44 | const RpcAuth & getAuth() const { 45 | return auth; 46 | } 47 | 48 | const RpcConfig & getConf() const { 49 | return conf; 50 | } 51 | 52 | const RpcProtocolInfo & getProtocol() const { 53 | return protocol; 54 | } 55 | 56 | const RpcServerInfo & getServer() const { 57 | return server; 58 | } 59 | 60 | bool operator ==(const RpcChannelKey & other) const { 61 | return this->auth == other.auth && this->protocol == other.protocol 62 | && this->server == other.server && this->conf == other.conf 63 | && ((token == NULL && other.token == NULL) 64 | || (token && other.token && *token == *other.token)); 65 | } 66 | 67 | const Token & getToken() const { 68 | assert(token != NULL); 69 | return *token; 70 | } 71 | 72 | bool hasToken() { 73 | return token != NULL; 74 | } 75 | 76 | private: 77 | const RpcAuth auth; 78 | const RpcConfig conf; 79 | const RpcProtocolInfo protocol; 80 | const RpcServerInfo server; 81 | shared_ptr token; 82 | }; 83 | 84 | } 85 | } 86 | 87 | HDFS_HASH_DEFINE(::Hdfs::Internal::RpcChannelKey); 88 | 89 | #endif /* _HDFS_LIBHDFS3_RPC_RPCCHANNELKEY_H_ */ 90 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcConfig.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "RpcConfig.h" 23 | 24 | #include 25 | 26 | namespace Hdfs { 27 | namespace Internal { 28 | 29 | size_t RpcConfig::hash_value() const { 30 | size_t values[] = { Int32Hasher(maxIdleTime), Int32Hasher(pingTimeout), 31 | Int32Hasher(connectTimeout), Int32Hasher(readTimeout), Int32Hasher( 32 | writeTimeout), Int32Hasher(maxRetryOnConnect), Int32Hasher( 33 | lingerTimeout), Int32Hasher(rpcTimeout), BoolHasher(tcpNoDelay) 34 | }; 35 | return CombineHasher(values, sizeof(values) / sizeof(values[0])); 36 | } 37 | 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcContentWrapper.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include 23 | 24 | #include "RpcContentWrapper.h" 25 | 26 | using namespace ::google::protobuf; 27 | using namespace ::google::protobuf::io; 28 | 29 | namespace Hdfs { 30 | namespace Internal { 31 | 32 | RpcContentWrapper::RpcContentWrapper(Message * header, Message * msg) : 33 | header(header), msg(msg) { 34 | } 35 | 36 | int RpcContentWrapper::getLength() { 37 | int headerLen, msgLen = 0; 38 | headerLen = header->ByteSize(); 39 | msgLen = msg == NULL ? 0 : msg->ByteSize(); 40 | return headerLen + CodedOutputStream::VarintSize32(headerLen) 41 | + (msg == NULL ? 42 | 0 : msgLen + CodedOutputStream::VarintSize32(msgLen)); 43 | } 44 | 45 | void RpcContentWrapper::writeTo(WriteBuffer & buffer) { 46 | int size = header->ByteSize(); 47 | buffer.writeVarint32(size); 48 | header->SerializeToArray(buffer.alloc(size), size); 49 | 50 | if (msg != NULL) { 51 | size = msg->ByteSize(); 52 | buffer.writeVarint32(size); 53 | msg->SerializeToArray(buffer.alloc(size), size); 54 | } 55 | } 56 | 57 | } 58 | } 59 | 60 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcContentWrapper.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_RPC_RPCCONTENTWRAPPER_H_ 23 | #define _HDFS_LIBHDFS3_RPC_RPCCONTENTWRAPPER_H_ 24 | 25 | #include 26 | 27 | #include "WriteBuffer.h" 28 | 29 | namespace Hdfs { 30 | namespace Internal { 31 | 32 | class RpcContentWrapper { 33 | public: 34 | RpcContentWrapper(::google::protobuf::Message * header, 35 | ::google::protobuf::Message * msg); 36 | 37 | int getLength(); 38 | void writeTo(WriteBuffer & buffer); 39 | 40 | public: 41 | ::google::protobuf::Message * header; 42 | ::google::protobuf::Message * msg; 43 | }; 44 | 45 | } 46 | } 47 | 48 | #endif /* _HDFS_LIBHDFS3_RPC_RPCCONTENTWRAPPER_H_ */ 49 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcProtocolInfo.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "RpcProtocolInfo.h" 23 | 24 | namespace Hdfs { 25 | namespace Internal { 26 | 27 | size_t RpcProtocolInfo::hash_value() const { 28 | size_t values[] = { Int32Hasher(version), StringHasher(protocol), StringHasher(tokenKind) }; 29 | return CombineHasher(values, sizeof(values) / sizeof(values[0])); 30 | } 31 | 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcProtocolInfo.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_RPC_RPCPROTOCOLINFO_H_ 23 | #define _HDFS_LIBHDFS3_RPC_RPCPROTOCOLINFO_H_ 24 | 25 | #include "Hash.h" 26 | 27 | #include 28 | 29 | namespace Hdfs { 30 | namespace Internal { 31 | 32 | class RpcProtocolInfo { 33 | public: 34 | RpcProtocolInfo(int v, const std::string & p, const std::string & tokenKind) : 35 | version(v), protocol(p), tokenKind(tokenKind) { 36 | } 37 | 38 | size_t hash_value() const; 39 | 40 | bool operator ==(const RpcProtocolInfo & other) const { 41 | return version == other.version && protocol == other.protocol && tokenKind == other.tokenKind; 42 | } 43 | 44 | const std::string & getProtocol() const { 45 | return protocol; 46 | } 47 | 48 | void setProtocol(const std::string & protocol) { 49 | this->protocol = protocol; 50 | } 51 | 52 | int getVersion() const { 53 | return version; 54 | } 55 | 56 | void setVersion(int version) { 57 | this->version = version; 58 | } 59 | 60 | const std::string & getTokenKind() const { 61 | return tokenKind; 62 | } 63 | 64 | void setTokenKind(const std::string & tokenKind) { 65 | this->tokenKind = tokenKind; 66 | } 67 | 68 | private: 69 | int version; 70 | std::string protocol; 71 | std::string tokenKind; 72 | 73 | }; 74 | 75 | } 76 | } 77 | 78 | HDFS_HASH_DEFINE(::Hdfs::Internal::RpcProtocolInfo); 79 | 80 | #endif /* _HDFS_LIBHDFS3_RPC_RPCPROTOCOLINFO_H_ */ 81 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcServerInfo.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "RpcServerInfo.h" 23 | 24 | #include 25 | 26 | namespace Hdfs { 27 | namespace Internal { 28 | 29 | size_t RpcServerInfo::hash_value() const { 30 | size_t values[] = { StringHasher(host), StringHasher(port), StringHasher(tokenService) }; 31 | return CombineHasher(values, sizeof(values) / sizeof(values[0])); 32 | } 33 | 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /libhdfs3/src/rpc/RpcServerInfo.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_RPC_RPCSERVERINFO_H_ 23 | #define _HDFS_LIBHDFS3_RPC_RPCSERVERINFO_H_ 24 | 25 | #include "Hash.h" 26 | 27 | #include 28 | #include 29 | 30 | namespace Hdfs { 31 | namespace Internal { 32 | 33 | class RpcServerInfo { 34 | public: 35 | 36 | RpcServerInfo(const std::string & tokenService, const std::string & h, const std::string & p) : 37 | host(h), port(p), tokenService(tokenService) { 38 | } 39 | 40 | RpcServerInfo(const std::string & h, uint32_t p) : 41 | host(h) { 42 | std::stringstream ss; 43 | ss.imbue(std::locale::classic()); 44 | ss << p; 45 | port = ss.str(); 46 | } 47 | 48 | size_t hash_value() const; 49 | 50 | bool operator ==(const RpcServerInfo & other) const { 51 | return this->host == other.host && this->port == other.port && tokenService == other.tokenService; 52 | } 53 | 54 | const std::string & getTokenService() const { 55 | return tokenService; 56 | } 57 | 58 | const std::string & getHost() const { 59 | return host; 60 | } 61 | 62 | const std::string & getPort() const { 63 | return port; 64 | } 65 | 66 | void setTokenService(const std::string & tokenService) { 67 | this->tokenService = tokenService; 68 | } 69 | 70 | private: 71 | std::string host; 72 | std::string port; 73 | std::string tokenService; 74 | 75 | }; 76 | 77 | } 78 | } 79 | 80 | HDFS_HASH_DEFINE(::Hdfs::Internal::RpcServerInfo); 81 | 82 | #endif /* _HDFS_LIBHDFS3_RPC_RPCSERVERINFO_H_ */ 83 | -------------------------------------------------------------------------------- /libhdfs3/src/server/BlockLocalPathInfo.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_SERVER_BLOCKLOCALPATHINFO_H_ 23 | #define _HDFS_LIBHDFS3_SERVER_BLOCKLOCALPATHINFO_H_ 24 | 25 | #include "ExtendedBlock.h" 26 | 27 | namespace Hdfs { 28 | namespace Internal { 29 | 30 | class BlockLocalPathInfo { 31 | public: 32 | const ExtendedBlock & getBlock() const { 33 | return block; 34 | } 35 | 36 | void setBlock(const ExtendedBlock & block) { 37 | this->block = block; 38 | } 39 | 40 | const char * getLocalBlockPath() const { 41 | return localBlockPath.c_str(); 42 | } 43 | 44 | void setLocalBlockPath(const char * localBlockPath) { 45 | this->localBlockPath = localBlockPath; 46 | } 47 | 48 | const char * getLocalMetaPath() const { 49 | return localMetaPath.c_str(); 50 | } 51 | 52 | void setLocalMetaPath(const char * localMetaPath) { 53 | this->localMetaPath = localMetaPath; 54 | } 55 | 56 | private: 57 | ExtendedBlock block; 58 | std::string localBlockPath; 59 | std::string localMetaPath; 60 | }; 61 | 62 | } 63 | } 64 | 65 | #endif /* _HDFS_LIBHDFS3_SERVER_BLOCKLOCALPATHINFO_H_ */ 66 | -------------------------------------------------------------------------------- /libhdfs3/src/server/LocatedBlocks.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "Exception.h" 23 | #include "ExceptionInternal.h" 24 | #include "LocatedBlock.h" 25 | #include "LocatedBlocks.h" 26 | 27 | #include 28 | #include 29 | #include 30 | 31 | namespace Hdfs { 32 | namespace Internal { 33 | 34 | const LocatedBlock * LocatedBlocksImpl::findBlock(int64_t position) { 35 | if (position < fileLength) { 36 | LocatedBlock target(position); 37 | std::vector::iterator bound; 38 | 39 | if (blocks.empty() || position < blocks.begin()->getOffset()) { 40 | return NULL; 41 | } 42 | 43 | /* 44 | * bound is first block which start offset is larger than 45 | * or equal to position 46 | */ 47 | bound = std::lower_bound(blocks.begin(), blocks.end(), target, 48 | std::less()); 49 | assert(bound == blocks.end() || bound->getOffset() >= position); 50 | LocatedBlock * retval = NULL; 51 | 52 | if (bound == blocks.end()) { 53 | retval = &blocks.back(); 54 | } else if (bound->getOffset() > position) { 55 | assert(bound != blocks.begin()); 56 | --bound; 57 | retval = &(*bound); 58 | } else { 59 | retval = &(*bound); 60 | } 61 | 62 | if (position < retval->getOffset() 63 | || position >= retval->getOffset() + retval->getNumBytes()) { 64 | return NULL; 65 | } 66 | 67 | return retval; 68 | } else { 69 | return lastBlock.get(); 70 | } 71 | } 72 | 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /libhdfs3/src/server/NamenodeInfo.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "NamenodeInfo.h" 23 | #include "StringUtil.h" 24 | #include "XmlConfig.h" 25 | 26 | #include 27 | #include 28 | 29 | using namespace Hdfs::Internal; 30 | 31 | namespace Hdfs { 32 | 33 | NamenodeInfo::NamenodeInfo() { 34 | } 35 | 36 | const char * DFS_NAMESERVICES = "dfs.nameservices"; 37 | const char * DFS_NAMENODE_HA = "dfs.ha.namenodes"; 38 | const char * DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address"; 39 | const char * DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address"; 40 | 41 | std::vector NamenodeInfo::GetHANamenodeInfo( 42 | const std::string & service, const Config & conf) { 43 | std::vector retval; 44 | std::string strNameNodes = StringTrim( 45 | conf.getString(std::string(DFS_NAMENODE_HA) + "." + service)); 46 | std::vector nns = StringSplit(strNameNodes, ","); 47 | retval.resize(nns.size()); 48 | 49 | for (size_t i = 0; i < nns.size(); ++i) { 50 | std::string dfsRpcAddress = StringTrim( 51 | std::string(DFS_NAMENODE_RPC_ADDRESS_KEY) + "." + service + "." 52 | + StringTrim(nns[i])); 53 | std::string dfsHttpAddress = StringTrim( 54 | std::string(DFS_NAMENODE_HTTP_ADDRESS_KEY) + "." + service + "." 55 | + StringTrim(nns[i])); 56 | retval[i].setRpcAddr(StringTrim(conf.getString(dfsRpcAddress, ""))); 57 | retval[i].setHttpAddr(StringTrim(conf.getString(dfsHttpAddress, ""))); 58 | } 59 | 60 | return retval; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /libhdfs3/src/server/NamenodeInfo.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS_SERVER_NAMENODEINFO_H_ 23 | #define _HDFS_LIBHDFS_SERVER_NAMENODEINFO_H_ 24 | 25 | #include "XmlConfig.h" 26 | 27 | #include 28 | #include 29 | 30 | namespace Hdfs { 31 | 32 | class NamenodeInfo { 33 | public: 34 | NamenodeInfo(); 35 | 36 | const std::string & getHttpAddr() const { 37 | return http_addr; 38 | } 39 | 40 | void setHttpAddr(const std::string & httpAddr) { 41 | http_addr = httpAddr; 42 | } 43 | 44 | const std::string & getRpcAddr() const { 45 | return rpc_addr; 46 | } 47 | 48 | void setRpcAddr(const std::string & rpcAddr) { 49 | rpc_addr = rpcAddr; 50 | } 51 | 52 | static std::vector GetHANamenodeInfo(const std::string & service, const Config & conf); 53 | 54 | private: 55 | std::string rpc_addr; 56 | std::string http_addr; 57 | }; 58 | 59 | } 60 | 61 | #endif /* _HDFS_LIBHDFS_SERVER_NAMENODEINFO_H_ */ 62 | -------------------------------------------------------------------------------- /libhdfs3/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8) 2 | 3 | SET(TEST_WORKING_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data/) 4 | ADD_DEFINITIONS(-DGTEST_LANG_CXX11=0 -fno-access-control -DDATA_DIR="${TEST_WORKING_DIR}/") 5 | 6 | ADD_SUBDIRECTORY(function) 7 | ADD_SUBDIRECTORY(unit) 8 | ADD_SUBDIRECTORY(secure) 9 | 10 | IF(TEST_RUNNER) 11 | SEPARATE_ARGUMENTS(TEST_RUNNER_LIST UNIX_COMMAND ${TEST_RUNNER}) 12 | ENDIF(TEST_RUNNER) 13 | 14 | ADD_CUSTOM_TARGET(unittest 15 | COMMAND ${TEST_RUNNER_LIST} ${CMAKE_CURRENT_BINARY_DIR}/unit/unit 16 | DEPENDS unit 17 | WORKING_DIRECTORY ${TEST_WORKING_DIR} 18 | COMMENT "Run Unit Test..." 19 | ) 20 | 21 | ADD_CUSTOM_TARGET(functiontest 22 | COMMAND ${TEST_RUNNER_LIST} ${CMAKE_CURRENT_BINARY_DIR}/function/function 23 | DEPENDS function 24 | WORKING_DIRECTORY ${TEST_WORKING_DIR} 25 | COMMENT "Run Function Test..." 26 | ) 27 | 28 | ADD_CUSTOM_TARGET(securetest 29 | COMMAND ${TEST_RUNNER_LIST} ${CMAKE_CURRENT_BINARY_DIR}/secure/secure 30 | DEPENDS secure 31 | WORKING_DIRECTORY ${TEST_WORKING_DIR} 32 | COMMENT "Run Security Function Test..." 33 | ) 34 | 35 | ADD_CUSTOM_TARGET(test 36 | COMMAND ${CMAKE_MAKE_PROGRAM} unittest || true 37 | COMMAND ${CMAKE_MAKE_PROGRAM} functiontest || true 38 | COMMENT "Run All Test..." 39 | ) 40 | 41 | SET(unit_SOURCES ${unit_SOURCES} PARENT_SCOPE) 42 | SET(function_SOURCES ${function_SOURCES} PARENT_SCOPE) 43 | SET(secure_SOURCES ${secure_SOURCES} PARENT_SCOPE) 44 | -------------------------------------------------------------------------------- /libhdfs3/test/data/function-secure.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | dfs.default.uri 5 | hdfs://localhost:8020 6 | 7 | 8 | 9 | hadoop.security.authentication 10 | kerberos 11 | 12 | 13 | 14 | dfs.nameservices 15 | gphd-cluster 16 | 17 | 18 | 19 | dfs.ha.namenodes.gphd-cluster 20 | nn1,nn2 21 | 22 | 23 | 24 | dfs.namenode.rpc-address.gphd-cluster.nn1 25 | smdw:8020 26 | 27 | 28 | 29 | dfs.namenode.rpc-address.gphd-cluster.nn2 30 | mdw:8020 31 | 32 | 33 | 34 | dfs.namenode.http-address.gphd-cluster.nn1 35 | smdw:50070 36 | 37 | 38 | 39 | dfs.namenode.http-address.gphd-cluster.nn2 40 | mdw:50070 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /libhdfs3/test/data/function-secure.xml.sample: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | dfs.default.uri 5 | hdfs://localhost:8020 6 | 7 | 8 | 9 | hadoop.security.authentication 10 | kerberos 11 | 12 | 13 | 14 | dfs.nameservices 15 | gphd-cluster 16 | 17 | 18 | 19 | dfs.ha.namenodes.gphd-cluster 20 | nn1,nn2 21 | 22 | 23 | 24 | dfs.namenode.rpc-address.gphd-cluster.nn1 25 | smdw:8020 26 | 27 | 28 | 29 | dfs.namenode.rpc-address.gphd-cluster.nn2 30 | mdw:8020 31 | 32 | 33 | 34 | dfs.namenode.http-address.gphd-cluster.nn1 35 | smdw:50070 36 | 37 | 38 | 39 | dfs.namenode.http-address.gphd-cluster.nn2 40 | mdw:50070 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /libhdfs3/test/data/function-test.xml.sample: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | dfs.default.uri 5 | hdfs://localhost:8020 6 | 7 | 8 | 9 | hadoop.security.authentication 10 | simple 11 | 12 | 13 | 14 | dfs.nameservices 15 | phdcluster 16 | 17 | 18 | 19 | dfs.default.replica 20 | 3 21 | 22 | 23 | 24 | dfs.client.log.severity 25 | INFO 26 | 27 | 28 | 29 | dfs.client.read.shortcircuit 30 | true 31 | 32 | 33 | 34 | input.localread.blockinfo.cachesize 35 | 10 36 | 37 | 38 | 39 | dfs.client.read.shortcircuit.streams.cache.size 40 | 10 41 | 42 | 43 | 44 | dfs.client.use.legacy.blockreader.local 45 | false 46 | 47 | 48 | 49 | output.replace-datanode-on-failure 50 | false 51 | 52 | 53 | 54 | input.localread.mappedfile 55 | true 56 | 57 | 58 | 59 | dfs.domain.socket.path 60 | /var/lib/hadoop-hdfs/hdfs_domain__PORT 61 | 62 | 63 | 64 | dfs.ha.namenodes.phdcluster 65 | nn1,nn2 66 | 67 | 68 | 69 | dfs.namenode.rpc-address.phdcluster.nn1 70 | mdw:8020 71 | 72 | 73 | 74 | dfs.namenode.rpc-address.phdcluster.nn2 75 | smdw:8020 76 | 77 | 78 | 79 | dfs.namenode.http-address.phdcluster.nn1 80 | mdw:50070 81 | 82 | 83 | 84 | dfs.namenode.http-address.phdcluster.nn2 85 | smdw:50070 86 | 87 | 88 | 89 | rpc.socekt.linger.timeout 90 | 20 91 | 92 | 93 | 94 | rpc.max.idle 95 | 100 96 | 97 | 98 | 99 | test.get.conf 100 | success 101 | 102 | 103 | 104 | test.get.confint32 105 | 10 106 | 107 | 108 | -------------------------------------------------------------------------------- /libhdfs3/test/data/invalid.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop.hdfs.configuration.version 5 | 1 6 | version of this configuration file -------------------------------------------------------------------------------- /libhdfs3/test/data/invalidha.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | hadoop.security.authentication 4 | kerberos 5 | 6 | 7 | dfs.nameservices 8 | phdcluster 9 | 10 | 11 | 12 | dfs.ha.namenodes.phdcluster 13 | nn1,nn2 14 | 15 | 16 | 17 | dfs.namenode.rpc-address.phdcluster.nn1 18 | mdw:8020 19 | 20 | 21 | dfs.namenode.rpc-address.phdcluster.nn2 22 | smdw:8020 23 | 24 | 25 | 26 | dfs.client.failover.proxy.provider.phdcluster 27 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider 28 | 29 | 30 | -------------------------------------------------------------------------------- /libhdfs3/test/data/unit-config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | TestString 5 | TestString 6 | 7 | 8 | 9 | TestInt32 10 | 123456 11 | 12 | 13 | 14 | TestInt32Invalid 15 | 123a456 16 | 17 | 18 | 19 | TestInt32OverFlow 20 | 12345678901 21 | 22 | 23 | 24 | TestInt64 25 | 12345678901 26 | 27 | 28 | 29 | TestInt64Invalid 30 | 12345a678901 31 | 32 | 33 | 34 | TestInt64OverFlow 35 | 123456789011234567890112345678901 36 | 37 | 38 | 39 | TestDouble 40 | 123.456 41 | 42 | 43 | 44 | TestDoubleInvalid 45 | 123e1234a 46 | 47 | 48 | 49 | TestDoubleOverflow 50 | 123e1234 51 | 52 | 53 | 54 | TestDoubleUnderflow 55 | 123e-1234 56 | 57 | 58 | 59 | TestTrue1 60 | true 61 | 62 | 63 | 64 | TestTrue2 65 | 1 66 | 67 | 68 | 69 | TestFalse1 70 | faLse 71 | 72 | 73 | 74 | TestFalse2 75 | 0 76 | 77 | 78 | 79 | TestBoolInvalid 80 | atrue 81 | 82 | 83 | 84 | -------------------------------------------------------------------------------- /libhdfs3/test/data/validha.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | hadoop.security.authentication 4 | kerberos 5 | 6 | 7 | dfs.nameservices 8 | phdcluster 9 | 10 | 11 | 12 | dfs.ha.namenodes.phdcluster 13 | nn1 , nn2 14 | 15 | 16 | 17 | dfs.namenode.rpc-address.phdcluster.nn1 18 | mdw:8020 19 | 20 | 21 | 22 | dfs.namenode.rpc-address.phdcluster.nn2 23 | smdw:8020 24 | 25 | 26 | 27 | dfs.namenode.http-address.phdcluster.nn1 28 | mdw:50070 29 | 30 | 31 | 32 | dfs.client.failover.proxy.provider.phdcluster 33 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider 34 | 35 | 36 | -------------------------------------------------------------------------------- /libhdfs3/test/function/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8) 2 | 3 | AUTO_SOURCES(function_SOURCES "*.cpp" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}") 4 | 5 | INCLUDE_DIRECTORIES(${gmock_INCLUDE_DIR} ${gtest_INCLUDE_DIR} ${libhdfs3_ROOT_SOURCES_DIR}) 6 | 7 | IF(NEED_BOOST) 8 | INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR}) 9 | ENDIF(NEED_BOOST) 10 | 11 | INCLUDE_DIRECTORIES(${libhdfs3_ROOT_SOURCES_DIR}) 12 | INCLUDE_DIRECTORIES(${libhdfs3_COMMON_SOURCES_DIR}) 13 | INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) 14 | INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIRS}) 15 | INCLUDE_DIRECTORIES(${libhdfs3_PLATFORM_HEADER_DIR}) 16 | INCLUDE_DIRECTORIES(${LIBXML2_INCLUDE_DIR}) 17 | INCLUDE_DIRECTORIES(${KERBEROS_INCLUDE_DIRS}) 18 | INCLUDE_DIRECTORIES(${GSASL_INCLUDE_DIR}) 19 | INCLUDE_DIRECTORIES(${SSL_INCLUDE_DIR}) 20 | INCLUDE_DIRECTORIES(${CURL_INCLUDE_DIR}) 21 | INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/mock) 22 | 23 | PROTOBUF_GENERATE_CPP(libhdfs3_PROTO_SOURCES libhdfs3_PROTO_HEADERS ${libhdfs3_PROTO_FILES}) 24 | 25 | IF(ENABLE_DEBUG STREQUAL ON) 26 | SET(libhdfs3_SOURCES ${libhdfs3_SOURCES} ${libhdfs3_MOCK_SOURCES}) 27 | ENDIF(ENABLE_DEBUG STREQUAL ON) 28 | 29 | IF(NOT HDFS_SUPERUSER) 30 | SET(HDFS_SUPERUSER $ENV{USER}) 31 | ENDIF(NOT HDFS_SUPERUSER) 32 | 33 | ADD_DEFINITIONS(-DHDFS_SUPERUSER="${HDFS_SUPERUSER}") 34 | ADD_DEFINITIONS(-DUSER="$ENV{USER}") 35 | 36 | ADD_EXECUTABLE(function EXCLUDE_FROM_ALL 37 | ${gtest_SOURCES} 38 | ${gmock_SOURCES} 39 | ${libhdfs3_SOURCES} 40 | ${libhdfs3_PROTO_SOURCES} 41 | ${libhdfs3_PROTO_HEADERS} 42 | ${function_SOURCES} 43 | ) 44 | 45 | TARGET_LINK_LIBRARIES(function pthread) 46 | 47 | IF(NEED_BOOST) 48 | INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR}) 49 | SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -L${Boost_LIBRARY_DIRS}") 50 | TARGET_LINK_LIBRARIES(function boost_thread) 51 | TARGET_LINK_LIBRARIES(function boost_chrono) 52 | TARGET_LINK_LIBRARIES(function boost_system) 53 | TARGET_LINK_LIBRARIES(function boost_atomic) 54 | TARGET_LINK_LIBRARIES(function boost_iostreams) 55 | ENDIF(NEED_BOOST) 56 | 57 | IF(NEED_GCCEH) 58 | TARGET_LINK_LIBRARIES(function gcc_eh) 59 | ENDIF(NEED_GCCEH) 60 | 61 | IF(OS_LINUX) 62 | TARGET_LINK_LIBRARIES(function ${LIBUUID_LIBRARIES}) 63 | INCLUDE_DIRECTORIES(${LIBUUID_INCLUDE_DIRS}) 64 | ENDIF(OS_LINUX) 65 | 66 | TARGET_LINK_LIBRARIES(function ${PROTOBUF_LIBRARIES}) 67 | TARGET_LINK_LIBRARIES(function ${LIBXML2_LIBRARIES}) 68 | TARGET_LINK_LIBRARIES(function ${KERBEROS_LIBRARIES}) 69 | TARGET_LINK_LIBRARIES(function ${GSASL_LIBRARIES}) 70 | TARGET_LINK_LIBRARIES(function ${GoogleTest_LIBRARIES}) 71 | TARGET_LINK_LIBRARIES(function ${SSL_LIBRARIES}) 72 | TARGET_LINK_LIBRARIES(function ${CURL_LIBRARIES}) 73 | 74 | SET(function_SOURCES ${function_SOURCES} PARENT_SCOPE) 75 | 76 | -------------------------------------------------------------------------------- /libhdfs3/test/function/FunctionTestMain.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "gtest/gtest.h" 23 | 24 | int main(int argc, char ** argv) { 25 | ::testing::InitGoogleTest(&argc, argv); 26 | #ifdef DATA_DIR 27 | if (0 != chdir(DATA_DIR)) { 28 | abort(); 29 | } 30 | #endif 31 | return RUN_ALL_TESTS(); 32 | } 33 | -------------------------------------------------------------------------------- /libhdfs3/test/secure/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8) 2 | 3 | AUTO_SOURCES(secure_SOURCES "*.cpp" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}") 4 | 5 | INCLUDE_DIRECTORIES(${gmock_INCLUDE_DIR} ${gtest_INCLUDE_DIR} ${libhdfs3_ROOT_SOURCES_DIR}) 6 | 7 | IF(NEED_BOOST) 8 | INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR}) 9 | ENDIF(NEED_BOOST) 10 | 11 | INCLUDE_DIRECTORIES(${libhdfs3_ROOT_SOURCES_DIR}) 12 | INCLUDE_DIRECTORIES(${libhdfs3_COMMON_SOURCES_DIR}) 13 | INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) 14 | INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIRS}) 15 | INCLUDE_DIRECTORIES(${libhdfs3_PLATFORM_HEADER_DIR}) 16 | INCLUDE_DIRECTORIES(${LIBXML2_INCLUDE_DIR}) 17 | INCLUDE_DIRECTORIES(${KERBEROS_INCLUDE_DIRS}) 18 | INCLUDE_DIRECTORIES(${GSASL_INCLUDE_DIR}) 19 | INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/mock) 20 | 21 | PROTOBUF_GENERATE_CPP(libhdfs3_PROTO_SOURCES libhdfs3_PROTO_HEADERS ${libhdfs3_PROTO_FILES}) 22 | 23 | IF(ENABLE_DEBUG STREQUAL ON) 24 | SET(libhdfs3_SOURCES ${libhdfs3_SOURCES} ${libhdfs3_MOCK_SOURCES}) 25 | ENDIF(ENABLE_DEBUG STREQUAL ON) 26 | 27 | IF(NOT HDFS_SUPERUSER) 28 | SET(HDFS_SUPERUSER $ENV{USER}) 29 | ENDIF(NOT HDFS_SUPERUSER) 30 | 31 | ADD_DEFINITIONS(-DHDFS_SUPERUSER="${HDFS_SUPERUSER}") 32 | ADD_DEFINITIONS(-DUSER="$ENV{USER}") 33 | 34 | ADD_EXECUTABLE(secure EXCLUDE_FROM_ALL 35 | ${gtest_SOURCES} 36 | ${gmock_SOURCES} 37 | ${libhdfs3_SOURCES} 38 | ${libhdfs3_PROTO_SOURCES} 39 | ${libhdfs3_PROTO_HEADERS} 40 | ${secure_SOURCES} 41 | ) 42 | 43 | TARGET_LINK_LIBRARIES(secure pthread) 44 | 45 | IF(NEED_BOOST) 46 | INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR}) 47 | SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -L${Boost_LIBRARY_DIRS}") 48 | TARGET_LINK_LIBRARIES(secure boost_thread) 49 | TARGET_LINK_LIBRARIES(secure boost_chrono) 50 | TARGET_LINK_LIBRARIES(secure boost_system) 51 | TARGET_LINK_LIBRARIES(secure boost_atomic) 52 | TARGET_LINK_LIBRARIES(secure boost_iostreams) 53 | ENDIF(NEED_BOOST) 54 | 55 | IF(NEED_GCCEH) 56 | TARGET_LINK_LIBRARIES(secure gcc_eh) 57 | ENDIF(NEED_GCCEH) 58 | 59 | IF(OS_LINUX) 60 | TARGET_LINK_LIBRARIES(secure uuid) 61 | ENDIF(OS_LINUX) 62 | 63 | TARGET_LINK_LIBRARIES(secure ${PROTOBUF_LIBRARIES}) 64 | TARGET_LINK_LIBRARIES(secure ${LIBXML2_LIBRARIES}) 65 | TARGET_LINK_LIBRARIES(secure ${KERBEROS_LIBRARIES}) 66 | TARGET_LINK_LIBRARIES(secure ${GSASL_LIBRARIES}) 67 | TARGET_LINK_LIBRARIES(secure ${GoogleTest_LIBRARIES}) 68 | 69 | SET(secure_SOURCES ${secure_SOURCES} PARENT_SCOPE) 70 | 71 | -------------------------------------------------------------------------------- /libhdfs3/test/secure/FunctionTestSecureMain.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "gtest/gtest.h" 23 | 24 | int main(int argc, char ** argv) { 25 | ::testing::InitGoogleTest(&argc, argv); 26 | #ifdef DATA_DIR 27 | if (0 != chdir(DATA_DIR)) { 28 | abort(); 29 | } 30 | #endif 31 | return RUN_ALL_TESTS(); 32 | } 33 | -------------------------------------------------------------------------------- /libhdfs3/test/unit/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8) 2 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8) 3 | 4 | AUTO_SOURCES(unit_SOURCES "*.cpp" "RECURSE" ${CMAKE_CURRENT_SOURCE_DIR}) 5 | 6 | INCLUDE_DIRECTORIES(${gmock_INCLUDE_DIR} ${gtest_INCLUDE_DIR} ${libhdfs3_ROOT_SOURCES_DIR}) 7 | 8 | IF(NEED_BOOST) 9 | INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR}) 10 | ENDIF(NEED_BOOST) 11 | 12 | INCLUDE_DIRECTORIES(${libhdfs3_ROOT_SOURCES_DIR}) 13 | INCLUDE_DIRECTORIES(${libhdfs3_COMMON_SOURCES_DIR}) 14 | INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIRS}) 15 | INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) 16 | INCLUDE_DIRECTORIES(${libhdfs3_PLATFORM_HEADER_DIR}) 17 | INCLUDE_DIRECTORIES(${LIBXML2_INCLUDE_DIR}) 18 | INCLUDE_DIRECTORIES(${KERBEROS_INCLUDE_DIRS}) 19 | INCLUDE_DIRECTORIES(${GSASL_INCLUDE_DIR}) 20 | INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/mock) 21 | INCLUDE_DIRECTORIES(${SSL_INCLUDE_DIR}) 22 | INCLUDE_DIRECTORIES(${CURL_INCLUDE_DIR}) 23 | 24 | ADD_DEFINITIONS(-DMOCK) 25 | 26 | PROTOBUF_GENERATE_CPP(libhdfs3_PROTO_SOURCES libhdfs3_PROTO_HEADERS ${libhdfs3_PROTO_FILES}) 27 | 28 | SET(libhdfs3_SOURCES ${libhdfs3_SOURCES} ${libhdfs3_MOCK_SOURCES}) 29 | 30 | ADD_EXECUTABLE(unit EXCLUDE_FROM_ALL 31 | ${gtest_SOURCES} 32 | ${gmock_SOURCES} 33 | ${libhdfs3_SOURCES} 34 | ${libhdfs3_PROTO_SOURCES} 35 | ${libhdfs3_PROTO_HEADERS} 36 | ${unit_SOURCES} 37 | ) 38 | 39 | TARGET_LINK_LIBRARIES(unit ${SSL_LIBRARIES}) 40 | TARGET_LINK_LIBRARIES(unit ${CURL_LIBRARIES}) 41 | TARGET_LINK_LIBRARIES(unit pthread) 42 | 43 | IF(NEED_BOOST) 44 | INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR}) 45 | SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -L${Boost_LIBRARY_DIRS}") 46 | TARGET_LINK_LIBRARIES(unit boost_thread) 47 | TARGET_LINK_LIBRARIES(unit boost_chrono) 48 | TARGET_LINK_LIBRARIES(unit boost_system) 49 | TARGET_LINK_LIBRARIES(unit boost_atomic) 50 | TARGET_LINK_LIBRARIES(unit boost_iostreams) 51 | ENDIF(NEED_BOOST) 52 | 53 | IF(NEED_GCCEH) 54 | TARGET_LINK_LIBRARIES(unit gcc_eh) 55 | ENDIF(NEED_GCCEH) 56 | 57 | IF(OS_LINUX) 58 | TARGET_LINK_LIBRARIES(unit ${LIBUUID_LIBRARIES}) 59 | INCLUDE_DIRECTORIES(${LIBUUID_INCLUDE_DIRS}) 60 | ENDIF(OS_LINUX) 61 | 62 | TARGET_LINK_LIBRARIES(unit ${PROTOBUF_LIBRARIES}) 63 | TARGET_LINK_LIBRARIES(unit ${LIBXML2_LIBRARIES}) 64 | TARGET_LINK_LIBRARIES(unit ${KERBEROS_LIBRARIES}) 65 | TARGET_LINK_LIBRARIES(unit ${GSASL_LIBRARIES}) 66 | TARGET_LINK_LIBRARIES(unit ${GoogleTest_LIBRARIES}) 67 | 68 | 69 | SET(unit_SOURCES ${unit_SOURCES} PARENT_SCOPE) 70 | 71 | -------------------------------------------------------------------------------- /libhdfs3/test/unit/TestGetHANamenodes.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "gtest/gtest.h" 23 | 24 | #include "client/hdfs.h" 25 | 26 | TEST(TestGetHAANamenodes, TestInvalidInput) { 27 | int size; 28 | EXPECT_TRUE(NULL == hdfsGetHANamenodesWithConfig(NULL, "phdcluster", &size)); 29 | EXPECT_TRUE(errno == EINVAL); 30 | EXPECT_TRUE(NULL == hdfsGetHANamenodesWithConfig("", "phdcluster", &size)); 31 | EXPECT_TRUE(errno == EINVAL); 32 | EXPECT_TRUE(NULL == hdfsGetHANamenodesWithConfig("invalidha.xml", "phdcluster", &size)); 33 | EXPECT_TRUE(errno == EINVAL); 34 | EXPECT_TRUE(NULL == hdfsGetHANamenodesWithConfig("notExist", "phdcluster", &size)); 35 | EXPECT_TRUE(errno == EINVAL); 36 | EXPECT_TRUE(NULL == hdfsGetHANamenodesWithConfig("validha.xml", NULL, &size)); 37 | EXPECT_TRUE(errno == EINVAL); 38 | EXPECT_TRUE(NULL == hdfsGetHANamenodesWithConfig("validha.xml", "", &size)); 39 | EXPECT_TRUE(errno == EINVAL); 40 | EXPECT_TRUE(NULL == hdfsGetHANamenodesWithConfig("validha.xml", "phdcluster", NULL)); 41 | EXPECT_TRUE(errno == EINVAL); 42 | } 43 | 44 | TEST(TestGetHAANamenodes, GetHANamenodes) { 45 | int size; 46 | Namenode * namenodes = NULL; 47 | ASSERT_TRUE(NULL != (namenodes = hdfsGetHANamenodesWithConfig("validha.xml", "phdcluster", &size))); 48 | ASSERT_EQ(2, size); 49 | EXPECT_STREQ("mdw:8020", namenodes[0].rpc_addr); 50 | EXPECT_STREQ("mdw:50070", namenodes[0].http_addr); 51 | EXPECT_STREQ("smdw:8020", namenodes[1].rpc_addr); 52 | EXPECT_EQ((char *)NULL, namenodes[1].http_addr); 53 | EXPECT_NO_THROW(hdfsFreeNamenodeInformation(namenodes, size)); 54 | } 55 | -------------------------------------------------------------------------------- /libhdfs3/test/unit/TestLeaseRenewer.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "gtest/gtest.h" 23 | 24 | #include "client/LeaseRenewer.h" 25 | #include "DateTime.h" 26 | #include "MockFileSystemInter.h" 27 | 28 | using namespace Hdfs::Internal; 29 | using namespace testing; 30 | 31 | TEST(TestRenewer, Renew) { 32 | shared_ptr filesystem(new MockFileSystemInter()); 33 | LeaseRenewerImpl renewer; 34 | renewer.setInterval(1000); 35 | EXPECT_CALL(*filesystem, getClientName()).Times(2).WillRepeatedly(Return("MockFS")); 36 | EXPECT_CALL(*filesystem, registerOpenedOutputStream()).Times(1); 37 | EXPECT_CALL(*filesystem, unregisterOpenedOutputStream()).Times(1).WillOnce(Return(true)); 38 | EXPECT_CALL(*filesystem, renewLease()).Times(AtLeast(1)).WillRepeatedly(Return(true)); 39 | renewer.StartRenew(filesystem); 40 | sleep_for(seconds(2)); 41 | renewer.StopRenew(filesystem); 42 | } 43 | -------------------------------------------------------------------------------- /libhdfs3/test/unit/TestLruMap.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "gtest/gtest.h" 23 | #include "LruMap.h" 24 | 25 | using namespace Hdfs::Internal; 26 | 27 | TEST(TestLruMap, TestInsertAndFind) { 28 | LruMap map(3); 29 | map.insert(1, 1); 30 | map.insert(2, 2); 31 | map.insert(3, 3); 32 | map.insert(4, 4); 33 | int value = 0; 34 | EXPECT_TRUE(map.find(2, &value)); 35 | EXPECT_TRUE(value == 2); 36 | EXPECT_TRUE(map.find(3, &value)); 37 | EXPECT_TRUE(value == 3); 38 | EXPECT_TRUE(map.find(4, &value)); 39 | EXPECT_TRUE(value == 4); 40 | EXPECT_FALSE(map.find(1, &value)); 41 | EXPECT_TRUE(map.find(2, &value)); 42 | EXPECT_TRUE(value == 2); 43 | map.insert(5, 5); 44 | EXPECT_FALSE(map.find(3, &value)); 45 | } 46 | 47 | TEST(TestLruMap, TestFindAndErase) { 48 | LruMap map(3); 49 | map.insert(1, 1); 50 | map.insert(2, 2); 51 | map.insert(3, 3); 52 | map.insert(4, 4); 53 | int value = 0; 54 | EXPECT_EQ(3u, map.size()); 55 | EXPECT_TRUE(map.findAndErase(2, &value)); 56 | EXPECT_TRUE(value == 2); 57 | EXPECT_EQ(2u, map.size()); 58 | } 59 | -------------------------------------------------------------------------------- /libhdfs3/test/unit/TestSessionConfig.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "gtest/gtest.h" 23 | #include "SessionConfig.h" 24 | #include "XmlConfig.h" 25 | 26 | using namespace Hdfs; 27 | using namespace Hdfs::Internal; 28 | 29 | TEST(TestSessionConfig, TestSelect) { 30 | Config conf; 31 | SessionConfig session(conf); 32 | ASSERT_STREQ("hdfs://localhost:8020", session.getDefaultUri().c_str()); 33 | } 34 | -------------------------------------------------------------------------------- /libhdfs3/test/unit/UnitTestMain.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #include "gtest/gtest.h" 23 | 24 | int main(int argc, char ** argv) { 25 | ::testing::InitGoogleTest(&argc, argv); 26 | #ifdef DATA_DIR 27 | if (0 != chdir(DATA_DIR)) { 28 | abort(); 29 | } 30 | #endif 31 | return RUN_ALL_TESTS(); 32 | } 33 | -------------------------------------------------------------------------------- /libhdfs3/test/unit/UnitTestUtils.h: -------------------------------------------------------------------------------- 1 | /******************************************************************** 2 | * 2014 - 3 | * open source under Apache License Version 2.0 4 | ********************************************************************/ 5 | /** 6 | * Licensed to the Apache Software Foundation (ASF) under one 7 | * or more contributor license agreements. See the NOTICE file 8 | * distributed with this work for additional information 9 | * regarding copyright ownership. The ASF licenses this file 10 | * to you under the Apache License, Version 2.0 (the 11 | * "License"); you may not use this file except in compliance 12 | * with the License. You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | #ifndef _HDFS_LIBHDFS3_TEST_UNIT_UNITTESTUTILS_H_ 23 | #define _HDFS_LIBHDFS3_TEST_UNIT_UNITTESTUTILS_H_ 24 | 25 | #include "DateTime.h" 26 | #include "ExceptionInternal.h" 27 | 28 | namespace Hdfs { 29 | namespace Internal { 30 | 31 | template 32 | void InvokeThrow(const char * str, bool * trigger) { 33 | if (!trigger || *trigger) { 34 | THROW(T, "%s", str); 35 | } 36 | } 37 | 38 | template 39 | R InvokeThrowAndReturn(const char * str, bool * trigger, R ret) { 40 | if (!trigger || *trigger) { 41 | THROW(T, "%s", str); 42 | } 43 | 44 | return ret; 45 | } 46 | 47 | template 48 | R InvokeWaitAndReturn(int timeout, R ret, int ec) { 49 | if (timeout > 0) { 50 | sleep_for(milliseconds(timeout)); 51 | } 52 | 53 | errno = ec; 54 | return ret; 55 | } 56 | 57 | static inline void InvokeDoNothing() { 58 | 59 | } 60 | 61 | } 62 | } 63 | #endif /* _HDFS_LIBHDFS3_TEST_UNIT_UNITTESTUTILS_H_ */ 64 | -------------------------------------------------------------------------------- /sync_with_upstream.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | 15 | set -ex 16 | 17 | git submodule update 18 | pushd incubator-hawq 19 | git pull --ff-only 20 | popd 21 | 22 | rsync -r incubator-hawq/depends/libhdfs3/ libhdfs3 23 | 24 | git add incubator-hawq 25 | git add libhdfs3 26 | 27 | echo "Please commit results" 28 | --------------------------------------------------------------------------------