├── .gitignore
├── LICENSE
├── README.md
├── holo-chatbot-webui
├── .gitignore
├── Pooling.py
├── README.md
├── args.py
├── docs
│ └── webui.jpg
├── examples
│ ├── PAI_QA_robot.py
│ ├── client_api.py
│ └── run.sh
├── html
│ └── footer.html
├── main.py
├── modules
│ ├── CustomLLM.py
│ ├── CustomPrompt.py
│ ├── EASAgent.py
│ ├── EmbeddingModel.py
│ ├── KeywordExtractor.py
│ ├── LLMService.py
│ ├── QuestionPrompt.py
│ ├── TextSplitter.py
│ ├── UI.py
│ └── VectorDB.py
├── requirements.txt
├── style.css
├── tokenizer.model
└── webui.py
├── holo-chatbot
├── .gitignore
├── LICENSE
├── README.md
├── chatbot.py
├── config
│ ├── config.yaml.example
│ └── prompt.txt
├── data
│ └── example_data.csv
├── dingding_server.py
├── generate_config.py
└── utils.py
├── holo-client-c
├── CMakeLists.txt
├── LICENSE
├── Readme.md
├── include
│ ├── defs.h
│ ├── holo_client.h
│ ├── holo_config.h
│ ├── logger.h
│ ├── record.h
│ ├── request.h
│ ├── table_schema.h
│ └── worker_pool.h
├── src
│ ├── action.c
│ ├── action.h
│ ├── batch.c
│ ├── batch.h
│ ├── connection_holder.c
│ ├── connection_holder.h
│ ├── direct_collector.c
│ ├── direct_collector.h
│ ├── exception.c
│ ├── exception.h
│ ├── future.c
│ ├── future.h
│ ├── get_collector.c
│ ├── get_collector.h
│ ├── holo_client.c
│ ├── holo_client_private.h
│ ├── holo_config.c
│ ├── holo_config_private.h
│ ├── ilist.h
│ ├── keywords.c
│ ├── keywords.h
│ ├── kwlist.h
│ ├── log4crc
│ ├── logger.c
│ ├── logger_log4c.c
│ ├── logger_log4c.h
│ ├── logger_private.h
│ ├── lp_map.c
│ ├── lp_map.h
│ ├── meta_cache.c
│ ├── meta_cache.h
│ ├── metrics.c
│ ├── metrics.h
│ ├── murmur3.c
│ ├── murmur3.h
│ ├── mutation_collector.c
│ ├── mutation_collector.h
│ ├── mutation_map.c
│ ├── mutation_map.h
│ ├── record.c
│ ├── record_private.h
│ ├── request.c
│ ├── request_private.h
│ ├── sql_builder.c
│ ├── sql_builder.h
│ ├── table_schema.c
│ ├── table_schema_private.h
│ ├── utils.c
│ ├── utils.h
│ ├── worker.c
│ ├── worker.h
│ ├── worker_pool.c
│ └── worker_pool_private.h
└── test
│ ├── Makefile
│ ├── log4crc
│ ├── testDataType.c
│ ├── testFunctions.c
│ ├── testHoloClient.c
│ ├── testMultiThread.c
│ ├── testPerformance.c
│ └── unit_test.c
├── holo-client-go
├── Readme.md
├── example.go
├── go.mod
├── holo-client
│ ├── holo_client.go
│ ├── holo_config.go
│ ├── include
│ │ ├── defs.h
│ │ ├── holo_client.h
│ │ ├── holo_config.h
│ │ ├── logger.h
│ │ ├── record.h
│ │ ├── request.h
│ │ ├── table_schema.h
│ │ └── worker_pool.h
│ ├── lib
│ │ └── libholo-client.so
│ └── request.go
└── log4crc
├── holo-client
├── .gitignore
├── LICENSE
├── README.md
├── pom.xml
├── src
│ ├── main
│ │ ├── java
│ │ │ ├── com
│ │ │ │ └── alibaba
│ │ │ │ │ └── hologres
│ │ │ │ │ └── client
│ │ │ │ │ ├── BinlogPartitionGroupReader.java
│ │ │ │ │ ├── BinlogShardGroupReader.java
│ │ │ │ │ ├── CheckAndPut.java
│ │ │ │ │ ├── Command.java
│ │ │ │ │ ├── EqualsFilter.java
│ │ │ │ │ ├── Exporter.java
│ │ │ │ │ ├── Filter.java
│ │ │ │ │ ├── Get.java
│ │ │ │ │ ├── HoloClient.java
│ │ │ │ │ ├── HoloConfig.java
│ │ │ │ │ ├── Importer.java
│ │ │ │ │ ├── Put.java
│ │ │ │ │ ├── RangeFilter.java
│ │ │ │ │ ├── RecordInputFormat.java
│ │ │ │ │ ├── RecordOutputFormat.java
│ │ │ │ │ ├── Scan.java
│ │ │ │ │ ├── SortKeys.java
│ │ │ │ │ ├── Subscribe.java
│ │ │ │ │ ├── Trace.java
│ │ │ │ │ ├── copy
│ │ │ │ │ ├── CopyInOutputStream.java
│ │ │ │ │ ├── CopyMode.java
│ │ │ │ │ ├── CopyUtil.java
│ │ │ │ │ ├── RecordBinaryOutputStream.java
│ │ │ │ │ ├── RecordOutputStream.java
│ │ │ │ │ ├── RecordTextOutputStream.java
│ │ │ │ │ └── WithCopyResult.java
│ │ │ │ │ ├── ddl
│ │ │ │ │ ├── DDLGenerator.java
│ │ │ │ │ ├── DDLGeneratorUtil.java
│ │ │ │ │ └── StatementKeywords.java
│ │ │ │ │ ├── exception
│ │ │ │ │ ├── ExceptionCode.java
│ │ │ │ │ ├── HoloClientException.java
│ │ │ │ │ ├── HoloClientWithDetailsException.java
│ │ │ │ │ └── InvalidIdentifierException.java
│ │ │ │ │ ├── function
│ │ │ │ │ └── FunctionWithSQLException.java
│ │ │ │ │ ├── impl
│ │ │ │ │ ├── Cache.java
│ │ │ │ │ ├── ConnectionHolder.java
│ │ │ │ │ ├── ExecutionPool.java
│ │ │ │ │ ├── MetaStore.java
│ │ │ │ │ ├── ObjectChan.java
│ │ │ │ │ ├── OneshotWorker.java
│ │ │ │ │ ├── PreparedStatementWithBatchInfo.java
│ │ │ │ │ ├── RecordReader.java
│ │ │ │ │ ├── UnnestUpsertStatementBuilder.java
│ │ │ │ │ ├── UpsertStatementBuilder.java
│ │ │ │ │ ├── Worker.java
│ │ │ │ │ ├── action
│ │ │ │ │ │ ├── AbstractAction.java
│ │ │ │ │ │ ├── CopyAction.java
│ │ │ │ │ │ ├── EmptyAction.java
│ │ │ │ │ │ ├── GetAction.java
│ │ │ │ │ │ ├── MetaAction.java
│ │ │ │ │ │ ├── PutAction.java
│ │ │ │ │ │ ├── ScanAction.java
│ │ │ │ │ │ └── SqlAction.java
│ │ │ │ │ ├── binlog
│ │ │ │ │ │ ├── ArrayBuffer.java
│ │ │ │ │ │ ├── BinlogEventType.java
│ │ │ │ │ │ ├── BinlogLevel.java
│ │ │ │ │ │ ├── BinlogOffset.java
│ │ │ │ │ │ ├── BinlogRecordCollector.java
│ │ │ │ │ │ ├── Committer.java
│ │ │ │ │ │ ├── HoloBinlogDecoder.java
│ │ │ │ │ │ ├── TableSchemaSupplier.java
│ │ │ │ │ │ ├── action
│ │ │ │ │ │ │ └── BinlogAction.java
│ │ │ │ │ │ └── handler
│ │ │ │ │ │ │ └── BinlogActionHandler.java
│ │ │ │ │ ├── collector
│ │ │ │ │ │ ├── ActionCollector.java
│ │ │ │ │ │ ├── BatchState.java
│ │ │ │ │ │ ├── CollectorStatistics.java
│ │ │ │ │ │ ├── DefaultResizePolicy.java
│ │ │ │ │ │ ├── RecordCollector.java
│ │ │ │ │ │ ├── ResizePolicy.java
│ │ │ │ │ │ ├── TableCollector.java
│ │ │ │ │ │ ├── TableShardCollector.java
│ │ │ │ │ │ └── shard
│ │ │ │ │ │ │ ├── DistributionKeyShardPolicy.java
│ │ │ │ │ │ │ └── ShardPolicy.java
│ │ │ │ │ ├── copy
│ │ │ │ │ │ ├── CopyContext.java
│ │ │ │ │ │ └── InternalPipedOutputStream.java
│ │ │ │ │ ├── handler
│ │ │ │ │ │ ├── ActionHandler.java
│ │ │ │ │ │ ├── CopyActionHandler.java
│ │ │ │ │ │ ├── EmptyActionHandler.java
│ │ │ │ │ │ ├── GetActionHandler.java
│ │ │ │ │ │ ├── MetaActionHandler.java
│ │ │ │ │ │ ├── PutActionHandler.java
│ │ │ │ │ │ ├── ScanActionHandler.java
│ │ │ │ │ │ ├── SqlActionHandler.java
│ │ │ │ │ │ └── jdbc
│ │ │ │ │ │ │ ├── JdbcBigDecimalColumnValues.java
│ │ │ │ │ │ │ ├── JdbcBooleanColumnValues.java
│ │ │ │ │ │ │ ├── JdbcByteaColumnValues.java
│ │ │ │ │ │ │ ├── JdbcColumnValues.java
│ │ │ │ │ │ │ ├── JdbcColumnValuesBuilder.java
│ │ │ │ │ │ │ ├── JdbcDateColumnValues.java
│ │ │ │ │ │ │ ├── JdbcDoubleColumnValues.java
│ │ │ │ │ │ │ ├── JdbcFloatColumnValues.java
│ │ │ │ │ │ │ ├── JdbcIntegerColumnValues.java
│ │ │ │ │ │ │ ├── JdbcLongColumnValues.java
│ │ │ │ │ │ │ ├── JdbcShortColumnValues.java
│ │ │ │ │ │ │ ├── JdbcStringColumnValues.java
│ │ │ │ │ │ │ ├── JdbcTimeColumnValues.java
│ │ │ │ │ │ │ └── JdbcTimestampColumnValues.java
│ │ │ │ │ └── util
│ │ │ │ │ │ ├── ConnectionUtil.java
│ │ │ │ │ │ ├── ExceptionUtil.java
│ │ │ │ │ │ └── ShardUtil.java
│ │ │ │ │ ├── model
│ │ │ │ │ ├── AutoPartitioning.java
│ │ │ │ │ ├── Column.java
│ │ │ │ │ ├── ExportContext.java
│ │ │ │ │ ├── HoloVersion.java
│ │ │ │ │ ├── ImportContext.java
│ │ │ │ │ ├── Partition.java
│ │ │ │ │ ├── Record.java
│ │ │ │ │ ├── RecordKey.java
│ │ │ │ │ ├── RecordScanner.java
│ │ │ │ │ ├── SSLMode.java
│ │ │ │ │ ├── TableName.java
│ │ │ │ │ ├── TableSchema.java
│ │ │ │ │ ├── WriteFailStrategy.java
│ │ │ │ │ ├── WriteMode.java
│ │ │ │ │ ├── binlog
│ │ │ │ │ │ ├── BinlogHeartBeatRecord.java
│ │ │ │ │ │ ├── BinlogPartitionSubscribeMode.java
│ │ │ │ │ │ └── BinlogRecord.java
│ │ │ │ │ └── checkandput
│ │ │ │ │ │ ├── CheckAndPutCondition.java
│ │ │ │ │ │ ├── CheckAndPutRecord.java
│ │ │ │ │ │ └── CheckCompareOp.java
│ │ │ │ │ ├── type
│ │ │ │ │ ├── PGroaringbitmap.java
│ │ │ │ │ └── PgDefaultBinaryObject.java
│ │ │ │ │ └── utils
│ │ │ │ │ ├── CommonUtil.java
│ │ │ │ │ ├── ConfLoader.java
│ │ │ │ │ ├── FutureUtil.java
│ │ │ │ │ ├── IdentifierUtil.java
│ │ │ │ │ ├── Metrics.java
│ │ │ │ │ ├── PartitionUtil.java
│ │ │ │ │ ├── RecordChecker.java
│ │ │ │ │ ├── Tuple.java
│ │ │ │ │ ├── Tuple3.java
│ │ │ │ │ └── Tuple4.java
│ │ │ └── org
│ │ │ │ └── postgresql
│ │ │ │ └── jdbc
│ │ │ │ ├── ArrayUtil.java
│ │ │ │ └── TimestampUtil.java
│ │ └── resources
│ │ │ ├── META-INF
│ │ │ └── services
│ │ │ │ └── java.sql.Driver
│ │ │ └── holo-client.properties
│ ├── saveVersion.sh
│ └── test
│ │ └── java
│ │ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── client
│ │ ├── BinlogReaderTest.java
│ │ ├── BulkScanTest.java
│ │ ├── CopyTest.java
│ │ ├── HoloClientByteSizeTest.java
│ │ ├── HoloClientCheckAndPutTest.java
│ │ ├── HoloClientDefaultValueTest.java
│ │ ├── HoloClientFixedFeTest.java
│ │ ├── HoloClientGenericTest.java
│ │ ├── HoloClientGetTest.java
│ │ ├── HoloClientPartitionTest.java
│ │ ├── HoloClientPrefixScanTest.java
│ │ ├── HoloClientScanTest.java
│ │ ├── HoloClientShardTest.java
│ │ ├── HoloClientTest.java
│ │ ├── HoloClientTestBase.java
│ │ ├── HoloClientTypesTest.java
│ │ ├── exception
│ │ └── HoloClientExceptionTest.java
│ │ ├── impl
│ │ ├── ExecutionPoolTest.java
│ │ ├── TableCollectorTest.java
│ │ └── UpsertStatementBuilderTest.java
│ │ ├── model
│ │ ├── SSLModeTest.java
│ │ ├── TableNameTest.java
│ │ └── TableSchemaTest.java
│ │ ├── statefull
│ │ ├── HoloClientTypeConvertTest.java
│ │ └── connectionhandler
│ │ │ └── RetryTest.java
│ │ ├── test
│ │ ├── Demo.java
│ │ └── GeneratorDemo.java
│ │ └── utils
│ │ ├── ConnectionUtilTest.java
│ │ ├── DataTypeTestUtil.java
│ │ ├── IdentifierTest.java
│ │ ├── PartitionUtilTest.java
│ │ └── ShardUtilTest.java
├── testng.xml
└── tools
│ └── maven
│ ├── checkstyle.xml
│ ├── scalastyle-config.xml
│ ├── spotbugs-exclude.xml
│ ├── suppressions-core.xml
│ ├── suppressions-optimizer.xml
│ ├── suppressions-runtime.xml
│ └── suppressions.xml
├── holo-e2e-performance-tool
├── README.md
├── pom.xml
└── src
│ └── main
│ └── java
│ └── com
│ └── alibaba
│ └── hologres
│ └── performace
│ ├── CaseUtil.java
│ ├── client
│ ├── BinlogTest.java
│ ├── FixedCopyTest.java
│ ├── GetTest.java
│ ├── HoloClientExecutionPool.java
│ ├── InsertTest.java
│ ├── Main.java
│ ├── PrepareBinlogData.java
│ ├── PrepareGetData.java
│ ├── PrepareScanData.java
│ ├── PutTest.java
│ ├── Reporter.java
│ ├── ScanTest.java
│ ├── SqlUtil.java
│ └── Util.java
│ └── params
│ ├── IntRandomParamProvider.java
│ ├── LongRandomParamProvider.java
│ ├── ParamProvider.java
│ └── ParamsProvider.java
├── holo-llm-deepseek
├── README.md
├── config
│ └── config.json
├── data
│ └── example.csv
├── main.py
└── requirements.txt
├── holo-llm
├── README.md
├── config
│ └── config.json
├── data
│ └── example.csv
└── main.py
├── holo-shipper
├── .gitignore
├── README.md
├── dependency-reduced-pom.xml
├── pom.xml
└── src
│ ├── META-INF
│ └── MANIFEST.MF
│ └── main
│ ├── java
│ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── shipper
│ │ ├── HoloDBShipper.java
│ │ ├── HoloShipper.java
│ │ ├── generic
│ │ ├── AbstractDB.java
│ │ ├── AbstractInstance.java
│ │ └── AbstractTable.java
│ │ ├── holo
│ │ ├── HoloDB.java
│ │ ├── HoloInstance.java
│ │ ├── HoloTable.java
│ │ └── HoloUtils.java
│ │ ├── localstorage
│ │ ├── LocalStorageDB.java
│ │ ├── LocalStorageInstance.java
│ │ └── LocalStorageTable.java
│ │ ├── oss
│ │ ├── OSSDB.java
│ │ ├── OSSInstance.java
│ │ ├── OSSTable.java
│ │ └── OSSUtils.java
│ │ └── utils
│ │ ├── CustomPipedInputStream.java
│ │ ├── ProcessBar.java
│ │ ├── SqlUtil.java
│ │ ├── TableInfo.java
│ │ └── TablesMeta.java
│ └── resources
│ └── logback.xml
├── holo-utils
└── find-incompatible-flink-jobs
│ ├── README.md
│ ├── pom.xml
│ └── src
│ └── main
│ ├── java
│ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ ├── FindIncompatibleFlinkJobs.java
│ │ └── IncompatibleResult.java
│ └── resources
│ └── log4j.properties
├── hologres-connector-datax-writer
├── README.md
├── pom.xml
└── src
│ └── main
│ ├── assembly
│ └── package.xml
│ ├── java
│ └── com
│ │ └── alibaba
│ │ └── datax
│ │ └── plugin
│ │ └── writer
│ │ └── hologresjdbcwriter
│ │ ├── BaseWriter.java
│ │ ├── Constant.java
│ │ ├── HologresJdbcWriter.java
│ │ ├── Key.java
│ │ └── util
│ │ ├── ConfLoader.java
│ │ ├── OriginalConfPretreatmentUtil.java
│ │ └── WriterUtil.java
│ └── resources
│ ├── plugin.json
│ └── plugin_job_template.json
├── hologres-connector-examples
├── README.md
├── hologres-connector-flink-examples
│ ├── README.md
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ ├── java
│ │ └── com
│ │ │ └── alibaba
│ │ │ └── ververica
│ │ │ └── connectors
│ │ │ └── hologres
│ │ │ └── example
│ │ │ ├── FlinkDSAndSQLToHoloExample.java
│ │ │ ├── FlinkDataStreamToHoloExample.java
│ │ │ ├── FlinkRoaringBitmapAggJob.java
│ │ │ ├── FlinkSQLSourceAndSinkExample.java
│ │ │ ├── FlinkSQLToHoloExample.java
│ │ │ ├── FlinkSQLToHoloRePartitionExample.java
│ │ │ ├── FlinkToHoloRePartitionExample.java
│ │ │ └── SourceItem.java
│ │ └── resources
│ │ ├── log4j.properties
│ │ ├── ods_app_example.csv
│ │ ├── repartition.sql
│ │ └── setting.properties
├── hologres-connector-flink-ordergen
│ ├── README.md
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ ├── java
│ │ └── io
│ │ │ └── hologres
│ │ │ └── flink
│ │ │ └── ordergen
│ │ │ ├── City.java
│ │ │ ├── OrderGenSourceFunction.java
│ │ │ ├── OrderGenTableFactory.java
│ │ │ ├── OrderGenTableSource.java
│ │ │ ├── PrefectureCity.java
│ │ │ └── Province.java
│ │ └── resources
│ │ ├── META-INF
│ │ └── services
│ │ │ └── org.apache.flink.table.factories.Factory
│ │ ├── china_cities.json
│ │ └── log4j.properties
└── hologres-connector-spark-examples
│ ├── README.md
│ ├── pom.xml
│ └── src
│ └── main
│ ├── java
│ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── spark
│ │ └── example
│ │ ├── SparkDataFrameToHoloExample.java
│ │ ├── SparkHoloTableCatalogExample.java
│ │ ├── SparkHoloToDataFrameExample.java
│ │ ├── SparkReadHoloToDataFrameExample.java
│ │ ├── SparkToHoloRepartitionExample.scala
│ │ ├── SparkWriteDataFrameToHoloExample.java
│ │ └── SparkWriteToHoloRepartitionExample.scala
│ └── resources
│ ├── customer.tbl
│ ├── log4j.properties
│ └── setting.properties
├── hologres-connector-flink-1.15
├── README.md
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── alibaba
│ │ │ └── ververica
│ │ │ └── connectors
│ │ │ └── hologres
│ │ │ └── source
│ │ │ └── lookup
│ │ │ └── Flink115HologresLookUpFunctionFactory.java
│ └── resources
│ │ └── META-INF
│ │ └── services
│ │ └── com.alibaba.ververica.connectors.hologres.source.HologresLookUpFunctionFactory
│ └── test
│ └── java
│ └── com
│ └── alibaba
│ └── ververica
│ └── connectors
│ └── hologres
│ ├── dim
│ ├── HologresDimTableITTest.java
│ └── HologresJDBCDimTableITTest.java
│ ├── sink
│ └── HologresSinkTableITTest.java
│ └── source
│ └── HologresSourceTableITTest.java
├── hologres-connector-flink-1.17
├── README.md
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── alibaba
│ │ │ └── ververica
│ │ │ └── connectors
│ │ │ └── hologres
│ │ │ └── source
│ │ │ └── lookup
│ │ │ └── Flink117HologresLookUpFunctionFactory.java
│ └── resources
│ │ └── META-INF
│ │ └── services
│ │ └── com.alibaba.ververica.connectors.hologres.source.HologresLookUpFunctionFactory
│ └── test
│ └── java
│ └── com
│ └── alibaba
│ └── ververica
│ └── connectors
│ └── hologres
│ ├── dim
│ ├── HologresDimTableITTest.java
│ └── HologresJDBCDimTableITTest.java
│ ├── sink
│ └── HologresSinkTableITTest.java
│ └── source
│ └── HologresSourceTableITTest.java
├── hologres-connector-flink-base
├── README.md
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── alibaba
│ │ │ └── ververica
│ │ │ └── connectors
│ │ │ └── hologres
│ │ │ ├── api
│ │ │ ├── HologresIOClient.java
│ │ │ ├── HologresReader.java
│ │ │ ├── HologresRecordConverter.java
│ │ │ ├── HologresTableSchema.java
│ │ │ ├── HologresWriter.java
│ │ │ └── table
│ │ │ │ ├── HologresRowDataConverter.java
│ │ │ │ ├── PrimaryKeyBuilder.java
│ │ │ │ ├── RowDataReader.java
│ │ │ │ └── RowDataWriter.java
│ │ │ ├── config
│ │ │ ├── HologresConfigs.java
│ │ │ ├── HologresConnectionParam.java
│ │ │ └── JDBCOptions.java
│ │ │ ├── factory
│ │ │ └── HologresTableFactory.java
│ │ │ ├── jdbc
│ │ │ ├── HologresJDBCClientProvider.java
│ │ │ ├── HologresJDBCConfigs.java
│ │ │ ├── HologresJDBCReader.java
│ │ │ ├── HologresJDBCRecordReader.java
│ │ │ ├── HologresJDBCRecordWriter.java
│ │ │ ├── HologresJDBCWriter.java
│ │ │ └── copy
│ │ │ │ └── HologresJDBCCopyWriter.java
│ │ │ ├── sink
│ │ │ ├── HologresDataStreamSinkProvider.java
│ │ │ ├── HologresDynamicTableSink.java
│ │ │ ├── HologresOutputFormat.java
│ │ │ ├── HologresRepartitionSinkBuilder.java
│ │ │ ├── HologresSinkFunction.java
│ │ │ ├── HologresStreamPartitioner.java
│ │ │ ├── HologresTableOutputFormat.java
│ │ │ ├── repartition
│ │ │ │ ├── HoloKeySelector.java
│ │ │ │ └── RowDataShardUtil.java
│ │ │ └── v2
│ │ │ │ ├── HologresSink.java
│ │ │ │ └── HologresSinkWriter.java
│ │ │ ├── source
│ │ │ ├── HologresLookUpFunctionFactory.java
│ │ │ ├── HologresTableSource.java
│ │ │ ├── bulkread
│ │ │ │ ├── HologresBulkReader.java
│ │ │ │ ├── HologresBulkreadInputFormat.java
│ │ │ │ └── HologresShardInputSplit.java
│ │ │ └── lookup
│ │ │ │ ├── AbstractHologresLookupFunction.java
│ │ │ │ ├── DimJoinFetcher.java
│ │ │ │ ├── HologresAsyncLookupFunction.java
│ │ │ │ └── HologresLookupFunction.java
│ │ │ └── utils
│ │ │ ├── FlinkUtil.java
│ │ │ ├── HologresUtils.java
│ │ │ ├── JDBCUtils.java
│ │ │ ├── PostgresTypeUtil.java
│ │ │ └── SchemaUtil.java
│ └── resources
│ │ └── META-INF
│ │ └── services
│ │ └── org.apache.flink.table.factories.Factory
│ └── test
│ └── java
│ └── com
│ └── alibaba
│ └── ververica
│ └── connectors
│ └── hologres
│ ├── HologresJDBCConfigTest.java
│ ├── HologresTestBase.java
│ ├── HologresTestUtils.java
│ └── JDBCTestUtils.java
├── hologres-connector-hive-2.x
├── README.md
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── hive
│ │ └── HoloStorageHandler.java
│ └── test
│ ├── java
│ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── hive
│ │ ├── HologresHiveReadWriteTest.java
│ │ └── HologresHiveTestBase.java
│ └── resources
│ ├── log4j.properties
│ └── setting.properties
├── hologres-connector-hive-3.x
├── README.md
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── hive
│ │ └── HoloStorageHandler.java
│ └── test
│ ├── java
│ ├── com
│ │ └── alibaba
│ │ │ └── hologres
│ │ │ └── hive
│ │ │ ├── HologresHiveReadWriteTest.java
│ │ │ └── HologresHiveTestBase.java
│ └── test.md
│ └── resources
│ ├── log4j.properties
│ └── setting.properties
├── hologres-connector-hive-base
├── README.md
├── pom.xml
└── src
│ └── main
│ └── java
│ └── com
│ └── alibaba
│ └── hologres
│ └── hive
│ ├── BaseHoloStorageHandler.java
│ ├── HoloClientProvider.java
│ ├── HoloRecordWritable.java
│ ├── HoloSerDe.java
│ ├── conf
│ ├── HoloClientParam.java
│ ├── HoloStorageConfig.java
│ └── HoloStorageConfigManager.java
│ ├── exception
│ └── HiveHoloStorageException.java
│ ├── input
│ ├── HoloInputFormat.java
│ ├── HoloInputSplit.java
│ └── HoloRecordReader.java
│ ├── output
│ ├── HoloOutputFormat.java
│ ├── HoloRecordCopyWriter.java
│ └── HoloRecordWriter.java
│ └── utils
│ ├── DataTypeUtils.java
│ └── JDBCUtils.java
├── hologres-connector-kafka
├── README.md
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── kafka
│ │ ├── HoloSinkConnector.java
│ │ ├── conf
│ │ ├── HoloSinkConfig.java
│ │ └── HoloSinkConfigManager.java
│ │ ├── exception
│ │ └── KafkaHoloException.java
│ │ ├── model
│ │ ├── DirtyDataStrategy.java
│ │ └── InputFormat.java
│ │ ├── sink
│ │ ├── HoloSinkTask.java
│ │ ├── HoloSinkWriter.java
│ │ └── writer
│ │ │ ├── AbstractHoloWriter.java
│ │ │ ├── HoloCopyWriter.java
│ │ │ └── HoloSqlWriter.java
│ │ └── utils
│ │ ├── DirtyDataUtils.java
│ │ ├── JDBCUtils.java
│ │ └── MessageInfo.java
│ └── resources
│ ├── connect-distributed.properties
│ ├── connect-standalone.properties
│ ├── holo-sink.json
│ └── holo-sink.properties
├── hologres-connector-spark-3.x
├── README.md
├── pom.xml
└── src
│ ├── main
│ ├── resources
│ │ └── META-INF
│ │ │ └── services
│ │ │ └── org.apache.spark.sql.sources.DataSourceRegister
│ └── scala
│ │ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── spark3
│ │ ├── HoloTable.scala
│ │ ├── HoloTableCatalog.scala
│ │ ├── SourceProvider.scala
│ │ ├── sink
│ │ ├── HoloDataWriter.scala
│ │ ├── HoloTable.scala
│ │ ├── HoloWriterBuilder.scala
│ │ ├── HoloWriterBuilderV1.scala
│ │ ├── SourceProvider.scala
│ │ └── copy
│ │ │ └── HoloDataCopyWriter.scala
│ │ └── source
│ │ ├── HoloInputPartition.scala
│ │ ├── HoloPartitionReader.scala
│ │ ├── HoloScanBuilder.scala
│ │ └── copy
│ │ └── HoloCopyPartitionReader.scala
│ └── test
│ ├── resources
│ ├── log4j.properties
│ └── setting.properties
│ └── scala
│ └── com
│ └── alibaba
│ └── hologres
│ └── spark3
│ ├── SparkHoloCopyWriteSuite.scala
│ ├── SparkHoloReadWriteSuite.scala
│ ├── SparkHoloSuiteBase.scala
│ ├── SparkHoloTableCatalogSuite.scala
│ └── SparkHoloWriteSuite.scala
├── hologres-connector-spark-base
├── README.md
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── alibaba
│ │ │ └── hologres
│ │ │ └── spark
│ │ │ └── exception
│ │ │ └── SparkHoloException.java
│ └── scala
│ │ └── com
│ │ └── alibaba
│ │ └── hologres
│ │ └── spark
│ │ ├── BaseSourceProvider.scala
│ │ ├── config
│ │ └── HologresConfigs.scala
│ │ ├── sink
│ │ ├── BaseHoloDataWriter.scala
│ │ ├── BaseSourceProvider.scala
│ │ ├── FieldWriter.scala
│ │ └── copy
│ │ │ ├── BaseHoloDataCopyWriter.scala
│ │ │ └── CopyContext.scala
│ │ ├── source
│ │ ├── BaseHoloPartitionReader.scala
│ │ ├── FieldReader.scala
│ │ └── copy
│ │ │ ├── BaseHoloCopyPartitionReader.scala
│ │ │ ├── CopyContext.scala
│ │ │ └── arrow
│ │ │ ├── SparkArrowArrayAccessor.scala
│ │ │ ├── SparkArrowDateDayAccessor.scala
│ │ │ ├── SparkArrowDateMilliAccessor.scala
│ │ │ ├── SparkArrowDecimalAccessor.scala
│ │ │ ├── SparkArrowStringAccessor.scala
│ │ │ ├── SparkArrowTimeStampMicroAccessor.scala
│ │ │ └── SparkArrowVectorAccessorUtil.scala
│ │ ├── table
│ │ ├── Column.scala
│ │ ├── ColumnType.scala
│ │ └── TableColumn.scala
│ │ └── utils
│ │ ├── DataTypeUtil.scala
│ │ ├── JDBCUtil.scala
│ │ ├── RepartitionUtil.scala
│ │ └── SparkHoloUtil.scala
│ └── test
│ ├── resources
│ └── log4j.properties
│ └── scala
│ └── com
│ └── alibaba
│ └── hologres
│ └── spark
│ ├── SparkHoloTestUtils.scala
│ └── WriteType.scala
├── pom.xml
└── tools
└── maven
├── checkstyle.xml
├── scalastyle-config.xml
├── spotbugs-exclude.xml
├── suppressions-core.xml
├── suppressions-optimizer.xml
├── suppressions-runtime.xml
└── suppressions.xml
/.gitignore:
--------------------------------------------------------------------------------
1 | .cache
2 | scalastyle-output.xml
3 | .classpath
4 | .idea
5 | .metadata
6 | .settings
7 | .project
8 | .version.properties
9 | filter.properties
10 | logs.zip
11 | target
12 | tmp
13 | *.class
14 | *.iml
15 | *.swp
16 | *.jar
17 | *.zip
18 | *.log
19 | *.pyc
20 | .DS_Store
21 | build-targetatlassian-ide-plugin.xml
22 | *.ipr
23 | *.iws
24 | tools/flink
25 | tools/flink-*
26 | tools/releasing/release
27 | tools/japicmp-output
28 | user-local.properties
29 | image-building-target/
--------------------------------------------------------------------------------
/holo-chatbot-webui/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 |
--------------------------------------------------------------------------------
/holo-chatbot-webui/docs/webui.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aliyun/alibabacloud-hologres-connectors/2e2c4ad044ab020af0e80deaaa080acd1b8c8f71/holo-chatbot-webui/docs/webui.jpg
--------------------------------------------------------------------------------
/holo-chatbot-webui/examples/run.sh:
--------------------------------------------------------------------------------
1 | SERVICE_PATH=`pwd`
2 |
3 | export PYTHONPATH=${PYTHONPATH}:${SERVICE_PATH}
4 |
5 | python examples/PAI_QA_robot.py \
6 | --config ./config.json \
7 | --prompt_engineering "Retrieval-Augmented Generation" \
8 | --embed_model "SGPT-125M-weightedmean-nli-bitfit" \
9 | --embed_dim 768 \
10 | --upload \
11 | --query "什么是流式计算?请详细向我解释" \
12 | |& tee output.log
13 |
--------------------------------------------------------------------------------
/holo-chatbot-webui/html/footer.html:
--------------------------------------------------------------------------------
1 |
2 | 请勿通过该工具生成违规内容,违反者将被追究法律责任。
3 |
4 | Do not generate illegal content through this tool, violators will be held legally responsible.
5 |
6 |
7 |
8 |
9 |
10 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/holo-chatbot-webui/modules/EASAgent.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Alibaba Cloud PAI.
2 | # SPDX-License-Identifier: Apache-2.0
3 | # deling.sc
4 |
5 | import requests
6 | import json
7 |
8 | class EASAgent:
9 | def __init__(self, cfg):
10 | self.url = cfg['EASCfg']['url']
11 | self.token = cfg['EASCfg']['token']
12 |
13 | def post_to_eas(self, query):
14 | headers = {
15 | "Authorization": self.token,
16 | 'Accept': "*/*",
17 | "Content-Type": "application/x-www-form-urlencoded;charset=utf-8"
18 | }
19 | # query_json = {"prompt": query}
20 | res = requests.post(
21 | url=self.url,
22 | # json=query_json,
23 | data=query.encode('utf8'),
24 | headers=headers,
25 | timeout=10000,
26 | )
27 | # return json.loads(res.text)['response']
28 | return res.text
29 |
--------------------------------------------------------------------------------
/holo-chatbot-webui/modules/EmbeddingModel.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Alibaba Cloud PAI.
2 | # SPDX-License-Identifier: Apache-2.0
3 | # deling.sc
4 |
5 | import os
6 | import torch
7 | from langchain.embeddings.huggingface import HuggingFaceEmbeddings
8 |
9 | class EmbeddingModel:
10 | def __init__(self, model_name):
11 | model_dir = "embedding_model"
12 | self.model_name_or_path = os.path.join(model_dir, model_name)
13 | self.embed = HuggingFaceEmbeddings(model_name=self.model_name_or_path,
14 | model_kwargs={'device': 'cpu'})
15 |
16 | # def embed_query(self, query):
17 | # return self.embed.embed_query(query)
18 |
19 | # def embed_documents(self, query):
20 | # return self.embed.embed_documents(query)
21 |
--------------------------------------------------------------------------------
/holo-chatbot-webui/modules/KeywordExtractor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Alibaba Cloud PAI.
2 | # SPDX-License-Identifier: Apache-2.0
3 | # deling.sc
4 |
5 | import jieba
6 | from keybert import KeyBERT
7 |
8 | class KeywordExtractor:
9 | def __init__(self, args):
10 | pass
11 |
12 | def keywords_textrank(self, doc):
13 | keywords = jieba.analyse.extract_tags(query, topK=5, withWeight=False, allowPOS=('n', 'ns', 'v', 'a', 'eng'))
14 | query_with_keywords = ' '.join(keywords) + ' ' + query
15 | print(query_with_keywords)
16 | query_with_keywords_list = []
17 | query_with_keywords_list.append(query_with_keywords)
18 |
19 | def keywords_keybert(self, doc):
20 | kw_model = KeyBERT()
21 | keywords = kw_model.extract_keywords(doc)
22 | key_words = kw_model.extract_keywords(doc, keyphrase_ngram_range=(1, 1), stop_words=None)
--------------------------------------------------------------------------------
/holo-chatbot-webui/modules/QuestionPrompt.py:
--------------------------------------------------------------------------------
1 | from langchain.prompts.prompt import PromptTemplate
2 | from langchain.chains import LLMChain
3 |
4 | _template_en = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
5 | Chat History:
6 | {chat_history}
7 | Follow Up Input: {question}
8 | Standalone question:"""
9 | CONDENSE_QUESTION_PROMPT_EN = PromptTemplate.from_template(_template_en)
10 |
11 | _template_ch = """请根据聊天记录和新问题,将新问题改写为一个独立问题。
12 | 不需要回答问题,一定要返回一个疑问句。
13 | 聊天记录:
14 | {chat_history}
15 | 新问题:{question}
16 | 独立问题:"""
17 | CONDENSE_QUESTION_PROMPT_CH = PromptTemplate.from_template(_template_ch)
18 |
19 | def get_standalone_question_en(llm):
20 | question_generator_chain = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT_EN)
21 | return question_generator_chain
22 |
23 | def get_standalone_question_ch(llm):
24 | question_generator_chain = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT_CH)
25 | return question_generator_chain
26 |
--------------------------------------------------------------------------------
/holo-chatbot-webui/requirements.txt:
--------------------------------------------------------------------------------
1 | cffi
2 | langchain
3 | pandas
4 | datasets
5 | sentence-transformers
6 | transformers
7 | accelerate
8 | tokenizers
9 | unstructured
10 | loguru
11 | hologres-vector
12 | uvicorn
13 | fastapi
14 | gradio
--------------------------------------------------------------------------------
/holo-chatbot-webui/style.css:
--------------------------------------------------------------------------------
1 | /* replace original footer with ours */
2 |
3 | footer {
4 | display: none !important;
5 | }
6 |
7 | #footer{
8 | text-align: center;
9 | }
10 |
11 | #footer div{
12 | display: inline-block;
13 | }
14 |
15 | #footer .versions{
16 | font-size: 85%;
17 | opacity: 0.85;
18 | }
--------------------------------------------------------------------------------
/holo-chatbot-webui/tokenizer.model:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aliyun/alibabacloud-hologres-connectors/2e2c4ad044ab020af0e80deaaa080acd1b8c8f71/holo-chatbot-webui/tokenizer.model
--------------------------------------------------------------------------------
/holo-chatbot/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | .vscode
3 | config.yaml
4 |
--------------------------------------------------------------------------------
/holo-chatbot/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Alibaba
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/holo-chatbot/config/config.yaml.example:
--------------------------------------------------------------------------------
1 | DINGDING_SECRET: ''
2 | DINGDING_TOKEN: ''
3 | OPENAI_API_BASE: 'https://api.openai-proxy.com/v1'
4 | OPENAI_API_KEY: 'sk-xxxxxxxx'
5 | HOLO_ENDPOINT: 'xxxxxxx.hologres.aliyuncs.com'
6 | HOLO_PORT: '80'
7 | HOLO_DATABASE: 'db_name'
8 | HOLO_USER: 'BASIC$user1'
9 | HOLO_PASSWORD: 'xxxxxx'
10 |
--------------------------------------------------------------------------------
/holo-chatbot/config/prompt.txt:
--------------------------------------------------------------------------------
1 | 以下是人类与名为“Holo Chatbot”的 AI 之间的友好对话。
2 | 该AI旨在从给定的文档库中搜索最相关的文档对用户提出的问题进行解答。
3 | 该AI很健谈,并从其上下文中提取了许多具体细节。
4 | 如果AI不知道问题的答案,它会如实说它不知道。
--------------------------------------------------------------------------------
/holo-chatbot/dingding_server.py:
--------------------------------------------------------------------------------
1 | from bottle import route, request, response, run
2 | import json, time, hmac, hashlib, base64, urllib.parse, os, requests
3 |
4 | from chatbot import Chatbot
5 | from utils import export_env
6 |
7 | def _get_sign():
8 | timestamp = str(round(time.time() * 1000))
9 | secret = os.environ['DINGDING_SECRET']
10 | secret_enc = secret.encode('utf-8')
11 | string_to_sign = '{}\n{}'.format(timestamp, secret)
12 | string_to_sign_enc = string_to_sign.encode('utf-8')
13 | hmac_code = hmac.new(secret_enc, string_to_sign_enc,
14 | digestmod=hashlib.sha256).digest()
15 | sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
16 | return timestamp, sign
17 |
18 | def send_to_group(msg: str):
19 | timestamp, sign = _get_sign()
20 | token = os.environ["DINGDING_TOKEN"]
21 | url = f'https://oapi.dingtalk.com/robot/send?access_token={token}×tamp={timestamp}&sign={sign}'
22 | headers = {'Content-Type': 'application/json;charset=utf-8'}
23 | data = {
24 | 'msgtype': 'text',
25 | 'text': {
26 | 'content': msg,
27 | },
28 | }
29 | requests.post(url, data=json.dumps(data), headers=headers)
30 |
31 | export_env()
32 | bot = Chatbot()
33 |
34 | @route('/chat', method='POST')
35 | def do_chat():
36 | if request.content_type != 'application/json; charset=utf-8':
37 | return json.dumps({'error': 'Only accept content type: "application/json; charset=utf-8"'})
38 |
39 | message = request.json['text']['content']
40 | answer = bot.query(message) # 调用机器人
41 |
42 | send_to_group(f'{request.json["senderNick"]},' + answer)
43 |
44 | run(host='localhost', port=8889)
--------------------------------------------------------------------------------
/holo-chatbot/generate_config.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import os
3 |
4 | print('欢迎使用holo chatbot设置向导,请按提示输入所需密钥信息。')
5 | print('您也可以手动编辑 config/config.yaml 来修改设置')
6 | print()
7 |
8 | keys = ['HOLO_ENDPOINT', 'HOLO_PORT', 'HOLO_USER',
9 | 'HOLO_PASSWORD', 'HOLO_DATABASE', 'DINGDING_TOKEN', 'DINGDING_SECRET']
10 |
11 | config_file = 'config/config.yaml'
12 |
13 | data = {}
14 |
15 | if os.path.exists(config_file):
16 | with open(config_file, 'r') as f:
17 | data = yaml.load(f, yaml.CLoader)
18 |
19 | for key in keys:
20 | value = input(f'请输入 {key} (按回车跳过): ')
21 | if key not in data or value != '':
22 | data[key] = value
23 |
24 |
25 | with open(config_file, 'w') as f:
26 | yaml.dump(data, f)
27 |
--------------------------------------------------------------------------------
/holo-chatbot/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import yaml
3 |
4 | DIR_PATH = os.path.dirname(os.path.realpath(__file__))
5 |
6 |
7 | def export_env():
8 | config_fname = os.path.join(DIR_PATH, 'config', 'config.yaml')
9 | if not os.path.exists(config_fname):
10 | print('Please run `python generate_config.py` to generate config file first.')
11 | exit(0)
12 | with open(config_fname) as f:
13 | config = yaml.load(f, yaml.CLoader)
14 | for k in config:
15 | print(f'setting env variable {k}: {config[k]}')
16 | os.environ[k] = config[k]
17 | os.environ['TOKENIZERS_PARALLELISM'] = 'false'
18 |
--------------------------------------------------------------------------------
/holo-client-c/include/holo_client.h:
--------------------------------------------------------------------------------
1 | #ifndef _HOLO_CLIENT_H_
2 | #define _HOLO_CLIENT_H_
3 |
4 | #include "defs.h"
5 | #include "logger.h"
6 | #include "request.h"
7 | #include "worker_pool.h"
8 | #include "record.h"
9 |
10 | __HOLO_CLIENT_BEGIN_DECLS
11 |
12 | struct _HoloClient;
13 | typedef struct _HoloClient HoloClient;
14 |
15 | HoloClient* holo_client_new_client(HoloConfig);
16 | HoloClient* holo_client_new_client_with_workerpool(HoloConfig, HoloWorkerPool*);
17 |
18 | int holo_client_flush_client(HoloClient*);
19 | int holo_client_flush_client_with_errmsg(HoloClient*, char**);
20 | int holo_client_close_client(HoloClient*);
21 |
22 | //Schema name可以为NULL,会被设为"public"
23 | HoloTableSchema* holo_client_get_tableschema(HoloClient*, const char*, const char*, bool);
24 | HoloTableSchema* holo_client_get_tableschema_with_errmsg(HoloClient*, const char*, const char*, bool, char**);
25 |
26 | int holo_client_submit(HoloClient*, HoloMutation);
27 | int holo_client_submit_with_errmsg(HoloClient*, HoloMutation, char**);
28 | int holo_client_submit_with_attachments(HoloClient*, HoloMutation, int64_t, int64_t);
29 | int holo_client_get(HoloClient*, HoloGet);
30 | HoloRecord* holo_client_get_record(const HoloGet);
31 | char* holo_client_get_record_val(const HoloRecord* record, int colIndex);
32 |
33 | //通过接口返回的error code,可以获取相应错误的error message(字符串形式)
34 | const char* holo_client_get_errmsg_with_errcode(int errCode);
35 |
36 | __HOLO_CLIENT_END_DECLS
37 |
38 | #endif
--------------------------------------------------------------------------------
/holo-client-c/include/logger.h:
--------------------------------------------------------------------------------
1 | #ifndef _LOGGER_H_
2 | #define _LOGGER_H_
3 |
4 | #include "defs.h"
5 |
6 | __HOLO_CLIENT_BEGIN_DECLS
7 |
8 | /**
9 | * Definition of HoloLogger callback, the default logger is log4c
10 | * you can make your own implement of HoloLogger, and use holo_client_setup_logger to setup
11 | * the first arg is log level, the second is message that will be logged
12 | * if you don't want any log, set logger to holo_client_log_do_nothing, which will reduce overhead
13 | */
14 | typedef void* (*HoloLogger)(const int, const char*);
15 |
16 | void* holo_client_log_do_nothing(const int logLevel, const char* msg);
17 |
18 | void holo_client_setup_logger(HoloLogger logger, int loglevel);
19 |
20 | void holo_client_logger_open();
21 | void holo_client_logger_close();
22 |
23 | __HOLO_CLIENT_END_DECLS
24 |
25 | #endif
--------------------------------------------------------------------------------
/holo-client-c/include/table_schema.h:
--------------------------------------------------------------------------------
1 | #ifndef _TABLE_SCHEMA_H_
2 | #define _TABLE_SCHEMA_H_
3 |
4 | //#include
5 | #include
6 | #include "defs.h"
7 |
8 | __HOLO_CLIENT_BEGIN_DECLS
9 |
10 | typedef struct _HoloTableName HoloTableName;
11 |
12 | typedef struct _HoloColumn {
13 | char *name;
14 | char *quoted;
15 | unsigned int type;
16 | bool nullable;
17 | bool isPrimaryKey;
18 | char *defaultValue;
19 | } HoloColumn;
20 |
21 | typedef struct _HoloTableSchema {
22 | unsigned int tableId;
23 | HoloTableName* tableName; //为了隐藏HoloTableName结构,此处定义为指针
24 | int nColumns;
25 | HoloColumn *columns;
26 | int nDistributionKeys;
27 | int *distributionKeys; //column index
28 | //int *dictionaryEncoding;
29 | //int *bitmapIndexKey;
30 | //int *clusteringKey;
31 | //int *segmentKey;
32 | int nPrimaryKeys;
33 | int *primaryKeys;
34 | int partitionColumn;
35 | } HoloTableSchema;
36 |
37 | const char* holo_client_get_column_name(HoloTableSchema* schema, int colIndex);
38 | const char* holo_client_get_column_type_name(HoloTableSchema* schema, int colIndex);
39 |
40 | //通过HoloColumn的type字段(无符号整型数字),可以获取HoloColumn的类型(字符串)
41 | const char* holo_client_get_type_name_with_type_oid(unsigned int typeOid);
42 |
43 | __HOLO_CLIENT_END_DECLS
44 |
45 | #endif
--------------------------------------------------------------------------------
/holo-client-c/include/worker_pool.h:
--------------------------------------------------------------------------------
1 | #ifndef _WORKER_POOL_H_
2 | #define _WORKER_POOL_H_
3 |
4 | #include "defs.h"
5 | #include "holo_config.h"
6 |
7 | __HOLO_CLIENT_BEGIN_DECLS
8 |
9 | struct _HoloWorkerPool;
10 | typedef struct _HoloWorkerPool HoloWorkerPool;
11 |
12 | HoloWorkerPool* holo_client_new_worker_pool(HoloConfig, bool, int);
13 | int holo_client_start_worker_pool(HoloWorkerPool*);
14 | int holo_client_worker_pool_status(const HoloWorkerPool*);
15 | int holo_client_stop_worker_pool(HoloWorkerPool*);
16 | int holo_client_close_worker_pool(HoloWorkerPool*);
17 |
18 | __HOLO_CLIENT_END_DECLS
19 |
20 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/action.h:
--------------------------------------------------------------------------------
1 | #ifndef _ACTION_H_
2 | #define _ACTION_H_
3 |
4 | #include "request_private.h"
5 | #include "batch.h"
6 | #include "../include/request.h"
7 |
8 | typedef struct _Action{
9 | int type;
10 | } Action;
11 |
12 | typedef struct _ActionItem {
13 | dlist_node list_node;
14 | Action* action;
15 | } ActionItem;
16 |
17 | ActionItem* create_action_item(Action*);
18 |
19 | typedef enum _ActionStatus {
20 | SUCCESS,
21 | FAILURE_NEED_RETRY,
22 | FAILURE_NOT_NEED_RETRY,
23 | FAILURE_TO_BE_DETERMINED
24 | } ActionStatus;
25 |
26 | typedef struct _MetaAction {
27 | int type; //0
28 | Meta meta;
29 | } MetaAction;
30 |
31 | MetaAction* holo_client_new_meta_action(Meta);
32 | void holo_client_destroy_meta_action(MetaAction*);
33 |
34 | typedef struct _MutationAction {
35 | int type; //1
36 | dlist_head requests; //list of mutation requests
37 | int numRequests;
38 | Future* future;
39 | } MutationAction;
40 |
41 | MutationAction* holo_client_new_mutation_action();
42 | void mutation_action_add_mutation(MutationAction*, HoloMutation);
43 | void holo_client_destroy_mutation_action(MutationAction*);
44 |
45 | typedef struct _SqlAction {
46 | int type; //2
47 | Sql sql;
48 | } SqlAction;
49 |
50 | SqlAction* holo_client_new_sql_action(Sql);
51 | void holo_client_destroy_sql_action(SqlAction*);
52 |
53 | typedef struct _GetAction {
54 | int type; //3
55 | dlist_head requests; //list of get requests
56 | int numRequests;
57 | HoloTableSchema* schema;
58 | } GetAction;
59 | GetAction* holo_client_new_get_action();
60 | void holo_client_destroy_get_action(GetAction*);
61 | void get_action_add_request(GetAction*, HoloGet);
62 | void abort_get_action(GetAction*);
63 |
64 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/batch.h:
--------------------------------------------------------------------------------
1 | #ifndef _BATCH_H_
2 | #define _BATCH_H_
3 |
4 | #include "table_schema.h"
5 | #include "ilist.h"
6 | #include "stdbool.h"
7 | #include "record.h"
8 | #include "utils.h"
9 | #include "request_private.h"
10 | #include "holo_config.h"
11 |
12 | typedef struct _Batch{
13 | HoloTableSchema *schema;
14 | bool* valuesSet;
15 | int* valueFormats;
16 | int nValues;
17 | dlist_head recordList;
18 | int nRecords;
19 | HoloMutationMode mode;
20 | HoloWriteMode writeMode;
21 | bool isSupportUnnest;
22 | } Batch;
23 |
24 | typedef struct _BatchItem {
25 | dlist_node list_node;
26 | Batch* batch;
27 | } BatchItem;
28 |
29 | BatchItem* create_batch_item(Batch*);
30 |
31 | Batch* holo_client_new_batch_with_record(HoloRecord*);
32 | Batch* holo_client_new_batch_with_mutation_request(HoloMutation);
33 | Batch* holo_client_clone_batch_without_records(Batch*);
34 | void holo_client_destroy_batch(Batch*);
35 | bool batch_can_apply_normalized_record(Batch*, HoloRecord*);
36 | bool batch_try_apply_normalized_record(Batch*, HoloRecord*);
37 | bool batch_can_apply_update_record(Batch*, HoloRecord*);
38 | bool batch_try_apply_update_record(Batch*, HoloRecord*);
39 | bool batch_can_apply_mutation_request(Batch*, HoloMutation);
40 | bool batch_try_apply_mutation_request(Batch*, HoloMutation);
41 | bool batch_matches(Batch*, Batch*, int);
42 |
43 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/direct_collector.h:
--------------------------------------------------------------------------------
1 | #ifndef _DIRECT_COLLECTOR_H_
2 | #define _DIRECT_COLLECTOR_H_
3 | //ActionWatcher 只要ActionQueue不为空且有空余worker就提交
4 | //ActionQueue 所有要执行的action
5 |
6 | #include
7 | #include "request_private.h"
8 | #include "action.h"
9 | #include "ilist.h"
10 | #include "worker_pool_private.h"
11 |
12 |
13 | typedef struct _DirectCollector {
14 | dlist_head actionsToDo;
15 | int numActions;
16 | pthread_t* actionWatcherThread;
17 | pthread_mutex_t* mutex;
18 | pthread_cond_t* cond;
19 | int status; //0:ready 1:started 2:stopping 3:stopped 4:error
20 | } DirectCollector;
21 |
22 | DirectCollector* holo_client_new_direct_collector();
23 | int holo_client_start_watch_direct_collector(DirectCollector*, HoloWorkerPool*);
24 | void holo_client_add_meta_request_to_direct_collector(DirectCollector*, Meta);
25 | void holo_client_add_sql_request_to_direct_collector(DirectCollector*, Sql);
26 | int holo_client_stop_watch_direct_collector(DirectCollector*);
27 | void holo_client_destroy_direct_collector(DirectCollector*);
28 |
29 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/future.c:
--------------------------------------------------------------------------------
1 | #include "future.h"
2 | #include "utils.h"
3 |
4 | Future* create_future() {
5 | Future* future = MALLOC(1, Future);
6 | future->completed = false;
7 | future->retVal = NULL;
8 | future->errMsg = NULL;
9 | future->mutex = MALLOC(1, pthread_mutex_t);
10 | future->cond = MALLOC(1, pthread_cond_t);
11 | pthread_mutex_init(future->mutex, NULL);
12 | pthread_cond_init(future->cond, NULL);
13 | return future;
14 | }
15 | void destroy_future(Future* future) {
16 | pthread_mutex_destroy(future->mutex);
17 | pthread_cond_destroy(future->cond);
18 | FREE(future->mutex);
19 | FREE(future->cond);
20 | FREE(future->errMsg);
21 | FREE(future);
22 | future = NULL;
23 | }
24 | void complete_future(Future* future, void* value) {
25 | pthread_mutex_lock(future->mutex);
26 | future->retVal = value;
27 | future->completed = true;
28 | pthread_cond_signal(future->cond);
29 | pthread_mutex_unlock(future->mutex);
30 | }
31 | void* get_future_result(Future* future) {
32 | pthread_mutex_lock(future->mutex);
33 | while(!future->completed) {
34 | pthread_cond_wait(future->cond, future->mutex);
35 | }
36 | pthread_mutex_unlock(future->mutex);
37 | return future->retVal;
38 | }
--------------------------------------------------------------------------------
/holo-client-c/src/future.h:
--------------------------------------------------------------------------------
1 | #ifndef _FUTURE_H_
2 | #define _FUTURE_H_
3 |
4 | #include
5 | #include
6 |
7 | typedef struct _Future {
8 | bool completed;
9 | void* retVal;
10 | char* errMsg;
11 | pthread_mutex_t* mutex;
12 | pthread_cond_t* cond;
13 | } Future;
14 |
15 | Future* create_future();
16 | void destroy_future(Future*);
17 | void complete_future(Future*, void*);
18 | void* get_future_result(Future*);
19 |
20 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/get_collector.h:
--------------------------------------------------------------------------------
1 | #ifndef _GET_COLLECTOR_H_
2 | #define _GET_COLLECTOR_H_
3 |
4 | #include
5 | #include "worker_pool_private.h"
6 | #include "request_private.h"
7 |
8 | typedef struct _TableGetCollector {
9 | HoloTableSchema* schema;
10 | HoloWorkerPool* pool;
11 | int numRequests;
12 | int batchSize;
13 | pthread_mutex_t* mutex;
14 | HoloGet* requests;
15 | pthread_cond_t* signal;
16 | } TableGetCollector;
17 |
18 | TableGetCollector* holo_client_new_table_get_collector(HoloTableSchema* schema, HoloWorkerPool* pool, int batchSize, pthread_cond_t* signal);
19 | void holo_client_destroy_table_get_collector(TableGetCollector*);
20 | GetAction* do_flush_table_get_collector(TableGetCollector*);
21 | void flush_table_get_collector(TableGetCollector*);
22 | void table_get_collector_add_request(TableGetCollector*, HoloGet);
23 |
24 | typedef struct _TableGetCollectorItem {
25 | dlist_node list_node;
26 | TableGetCollector* tableGetCollector;
27 | } TableGetCollectorItem;
28 |
29 | typedef struct _GetCollector {
30 | dlist_head tableCollectors;
31 | int numTables;
32 | pthread_t* actionWatcherThread;
33 | pthread_mutex_t* mutex;
34 | pthread_cond_t* cond;
35 | int status; //0:ready 1:started 2:stopping 3:stopped 4:error
36 | HoloWorkerPool* pool;
37 | int batchSize;
38 | } GetCollector;
39 |
40 | GetCollector* holo_client_new_get_collector(HoloWorkerPool* pool, int batchSize);
41 | int holo_client_start_watch_get_collector(GetCollector*);
42 | void holo_client_add_request_to_get_collector(GetCollector*, HoloGet);
43 | int holo_client_stop_watch_get_collector(GetCollector*);
44 | void holo_client_destroy_get_collector(GetCollector*);
45 | void holo_client_do_flush_get_collector(GetCollector*);
46 |
47 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/holo_client_private.h:
--------------------------------------------------------------------------------
1 | #ifndef _HOLO_CLIENT_PRIVATE_H_
2 | #define _HOLO_CLIENT_PRIVATE_H_
3 |
4 | #include "../include/holo_client.h"
5 | #include "direct_collector.h"
6 | #include "mutation_collector.h"
7 | #include "get_collector.h"
8 | #include
9 |
10 | struct _HoloClient {
11 | HoloWorkerPool *workerPool;
12 | HoloWorkerPool *fixedPool;
13 | bool isEmbeddedPool;
14 | DirectCollector* directCollector;
15 | MutationCollector* mutationCollector;
16 | GetCollector* getCollector;
17 | HoloConfig config;
18 | };
19 |
20 | HoloTableSchema* holo_client_get_tableschema_by_tablename(HoloClient* client, HoloTableName name, bool withCache, char** errMsgAddr);
21 |
22 | void* holo_client_sql(HoloClient*, Sql);
23 |
24 | void holo_client_ensure_pool_open(HoloClient*); //open a new workerpool if doesn't have one
25 |
26 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/holo_config_private.h:
--------------------------------------------------------------------------------
1 | #ifndef _HOLO_CONFIG_PRIVATE_H_
2 | #define _HOLO_CONFIG_PRIVATE_H_
3 |
4 | #include "holo_config.h"
5 |
6 | bool holo_config_is_valid(HoloConfig*);
7 | void log_holo_config(HoloConfig*);
8 |
9 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/keywords.h:
--------------------------------------------------------------------------------
1 | #ifndef _KEYWORDS_H_
2 | #define _KEYWORDS_H_
3 |
4 | #include
5 |
6 | /* Keyword categories --- should match lists in gram.y */
7 | #define UNRESERVED_KEYWORD 0
8 | #define COL_NAME_KEYWORD 1
9 | #define TYPE_FUNC_NAME_KEYWORD 2
10 | #define RESERVED_KEYWORD 3
11 |
12 | typedef struct ScanKeyword
13 | {
14 | const char *name; /* in lower case */
15 | int16_t value; /* grammar's token code */
16 | int16_t category; /* see codes above */
17 | } ScanKeyword;
18 |
19 | extern const ScanKeyword ScanKeywords[];
20 | extern const int NumScanKeywords;
21 |
22 | extern const ScanKeyword *ScanKeywordLookup(const char *text,
23 | const ScanKeyword *keywords,
24 | int num_keywords);
25 |
26 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/log4crc:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | 0
8 |
9 | 0
10 | 1
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/holo-client-c/src/logger.c:
--------------------------------------------------------------------------------
1 | #include "logger_private.h"
2 | #include "stdio.h"
3 |
4 | HoloLogger holo_client_logger = holo_client_log_log4c;
5 | int holo_client_logger_level = HOLO_LOG_LEVEL_INFO;
6 |
7 | void* holo_client_log_do_nothing(const int logLevel, const char* msg) {
8 | return NULL;
9 | }
10 |
11 | void holo_client_setup_logger(HoloLogger logger, int loglevel) {
12 | holo_client_logger = logger;
13 | holo_client_logger_level = loglevel;
14 | }
15 |
16 | void holo_client_logger_open() {
17 | if (holo_client_logger == holo_client_log_log4c) {
18 | log4c_open();
19 | }
20 | }
21 |
22 | void holo_client_logger_close() {
23 | if (holo_client_logger == holo_client_log_log4c) {
24 | log4c_close();
25 | }
26 | }
--------------------------------------------------------------------------------
/holo-client-c/src/logger_log4c.h:
--------------------------------------------------------------------------------
1 | #ifndef _LOGGER_LOG4C_H_
2 | #define _LOGGER_LOG4C_H_
3 |
4 | #include "log4c.h"
5 |
6 | extern log4c_category_t *log_category;
7 |
8 | int log4c_open();
9 | int log4c_close();
10 |
11 | void* holo_client_log_log4c(const int logLevel, const char* msg);
12 |
13 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/lp_map.c:
--------------------------------------------------------------------------------
1 | #include "lp_map.h"
2 |
3 | LPMap* holo_client_new_lp_map(int size) {
4 | LPMap* map = MALLOC(1, LPMap);
5 | map->maxSize = size * 2;
6 | map->size = 0;
7 | map->values = MALLOC(map->maxSize, void*);
8 | for (int i = 0; i < map->maxSize; i++) {
9 | map->values[i] = NULL;
10 | }
11 | return map;
12 | }
13 |
14 | void holo_client_destroy_lp_map(LPMap* map) {
15 | FREE(map->values);
16 | FREE(map);
17 | map = NULL;
18 | }
19 |
20 | void holo_client_clear_lp_map(LPMap* map) {
21 | for (int i = 0; i < map->maxSize; i++) {
22 | map->values[i] = NULL;
23 | }
24 | }
25 |
26 | // void lp_map_add(LPMap* map, int index, void* value, ValueComparer equals) {
27 | // int M = map->maxSize;
28 | // for(int i = 0; i < M; i++) {
29 | // if(map->values[index] == NULL) {
30 | // map->values[index] = value;
31 | // map->size++;
32 | // return;
33 | // }
34 | // if(equals(value, map->values[index])) {
35 | // return;
36 | // }
37 | // index = (index + 1) % M;
38 | // }
39 | // }
40 |
--------------------------------------------------------------------------------
/holo-client-c/src/lp_map.h:
--------------------------------------------------------------------------------
1 | #ifndef _LP_MAP_H_
2 | #define _LP_MAP_H_
3 |
4 | #include "murmur3.h"
5 | #include "utils.h"
6 |
7 | typedef bool (*ValueComparer)(void*, void*);
8 |
9 | typedef struct _LPMap {
10 | int maxSize;
11 | int size;
12 | void** values;
13 | } LPMap;
14 |
15 | LPMap* holo_client_new_lp_map(int size);
16 |
17 | void holo_client_destroy_lp_map(LPMap* map);
18 |
19 | void holo_client_clear_lp_map(LPMap* map);
20 |
21 | // void lp_map_add(LPMap* map, int index, void* value, ValueComparer comparer);
22 |
23 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/meta_cache.h:
--------------------------------------------------------------------------------
1 | #ifndef _META_CACHE_H_
2 | #define _META_CACHE_H_
3 |
4 | #include "../include/table_schema.h"
5 | #include
6 | #include
7 | #include "ilist.h"
8 |
9 | typedef struct _MetaCache {
10 | dlist_head schemaList;
11 | dlist_head parentList;
12 | dlist_head garbageList;
13 | pthread_rwlock_t* rwlock;
14 | } MetaCache;
15 |
16 | typedef struct _SchemaItem {
17 | dlist_node list_node;
18 | HoloTableSchema* schema;
19 | time_t age;
20 | } SchemaItem;
21 |
22 | typedef struct _ParentItem {
23 | dlist_node list_node;
24 | HoloTableSchema** parent;
25 | dlist_head partitions;
26 | } ParentItem;
27 |
28 | typedef struct _PartitionItem {
29 | dlist_node list_node;
30 | char* value;
31 | HoloTableSchema** partition;
32 | } PartitionItem;
33 |
34 | MetaCache* holo_client_new_metacache();
35 | void holo_client_destroy_metacache(MetaCache*);
36 | void clear_all_contents(MetaCache*);
37 |
38 | HoloTableSchema* find_tableschema_in_metacache(MetaCache*, HoloTableName);
39 | void add_tableschema_to_metacache(MetaCache*, HoloTableSchema*);
40 | HoloTableSchema* meta_cache_find_partition(MetaCache*, HoloTableSchema*, char*);
41 | void meta_cache_add_partition(MetaCache*, HoloTableSchema*, HoloTableSchema*, char*);
42 | void add_tableschema_to_garbage_list(MetaCache*, HoloTableSchema*);
43 |
44 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/murmur3.h:
--------------------------------------------------------------------------------
1 | //-----------------------------------------------------------------------------
2 | // MurmurHash3 was written by Austin Appleby, and is placed in the
3 | // public domain. The author hereby disclaims copyright to this source
4 | // code.
5 |
6 | #ifndef _MURMURHASH3_H_
7 | #define _MURMURHASH3_H_
8 |
9 | #include
10 |
11 | #ifdef __cplusplus
12 | extern "C" {
13 | #endif
14 |
15 | //-----------------------------------------------------------------------------
16 |
17 | void MurmurHash3_x86_32 (const void *key, int len, uint32_t seed, void *out);
18 |
19 | void MurmurHash3_x86_128(const void *key, int len, uint32_t seed, void *out);
20 |
21 | void MurmurHash3_x64_128(const void *key, int len, uint32_t seed, void *out);
22 |
23 | //-----------------------------------------------------------------------------
24 |
25 | #ifdef __cplusplus
26 | }
27 | #endif
28 |
29 | #endif // _MURMURHASH3_H_
30 |
--------------------------------------------------------------------------------
/holo-client-c/src/mutation_map.h:
--------------------------------------------------------------------------------
1 | #ifndef _MUTATION_MAP_H_
2 | #define _MUTATION_MAP_H_
3 |
4 | #include "request_private.h"
5 | #include "murmur3.h"
6 | #include "ilist.h"
7 | #include "utils.h"
8 |
9 | //linear probing
10 | typedef struct _MutationMap {
11 | int maxSize;
12 | int size;
13 | long byteSize;
14 | HoloMutation* mutations;
15 | } MutationMap;
16 |
17 | MutationMap* holo_client_new_mutation_map(int size);
18 |
19 | void holo_client_destroy_mutation_map(MutationMap* map);
20 |
21 | int mutation_hash_code(HoloMutation mutation, int size);
22 |
23 | void mutation_map_add(MutationMap* map, HoloMutation mutation, bool hasPK);
24 |
25 | HoloMutation mutation_map_find_origin(MutationMap* map, HoloMutation mutation);
26 |
27 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/record_private.h:
--------------------------------------------------------------------------------
1 | #ifndef _RECORD_PRIVATE_H_
2 | #define _RECORD_PRIVATE_H_
3 |
4 | #include "table_schema_private.h"
5 | #include "ilist.h"
6 | #include "../include/record.h"
7 |
8 | struct _HoloRecord{
9 | HoloTableSchema *schema;
10 | char **values;
11 | bool *valuesSet;
12 | int *valueLengths;
13 | int *valueFormats;
14 | int nValues;
15 | int byteSize;
16 | int64_t sequence;
17 | int64_t timestamp;
18 | };
19 |
20 | HoloRecord* holo_client_new_record(HoloTableSchema*);
21 | void holo_client_destroy_record(HoloRecord*);
22 | bool record_conflict(HoloRecord*, HoloRecord*);
23 | void* new_record_val(HoloRecord* , int);
24 | void revoke_record_val(void*, HoloRecord*, int);
25 | void destroy_record_val(HoloRecord*, int);
26 |
27 | typedef struct _RecordItem {
28 | dlist_node list_node;
29 | HoloRecord* record;
30 | } RecordItem;
31 |
32 | RecordItem* create_record_item(HoloRecord*);
33 |
34 | bool has_same_pk(HoloRecord*, HoloRecord*);
35 | int record_pk_hash_code(HoloRecord* record, int size);
36 |
37 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/sql_builder.h:
--------------------------------------------------------------------------------
1 | #ifndef _SQL_BUILDER_H_
2 | #define _SQL_BUILDER_H_
3 |
4 | #include "batch.h"
5 |
6 | typedef struct _SqlCache{
7 | char* command;
8 | Oid* paramTypes;
9 | int* paramFormats;
10 | int* paramLengths;
11 | } SqlCache;
12 |
13 | char* build_unnest_insert_sql_with_batch(Batch*);
14 | char* build_insert_sql_with_batch(Batch*, int);
15 | char* build_delete_sql_with_batch(Batch*, int);
16 | char* build_get_sql(HoloTableSchema*, int);
17 |
18 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/table_schema_private.h:
--------------------------------------------------------------------------------
1 | #ifndef _TABLE_SCHEMA_PRIVATE_H_
2 | #define _TABLE_SCHEMA_PRIVATE_H_
3 |
4 | #include "../include/table_schema.h"
5 |
6 | struct _HoloTableName {
7 | char *schemaName; //no "" e.g. = "schemaName"
8 | char *tableName; //no "" e.g. = "tableName"
9 | char *fullName; //has "" e.g. = "\"schemaName\".\"tableName\""
10 | };
11 |
12 | void holo_client_destroy_tablename(HoloTableName*);
13 |
14 | void holo_client_destroy_columns(HoloColumn*, int);
15 |
16 | HoloColumn* holo_client_new_columns(int);
17 |
18 | HoloTableSchema* holo_client_new_tableschema();
19 | void holo_client_destroy_tableschema(HoloTableSchema*);
20 |
21 | int get_colindex_by_colname(HoloTableSchema*, const char* );
22 |
23 | bool table_has_pk(HoloTableSchema*);
24 |
25 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/utils.h:
--------------------------------------------------------------------------------
1 | #ifndef _UTILS_H_
2 | #define _UTILS_H_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | #define MALLOC(n, type) \
11 | ((type *)malloc((n) * sizeof(type)))
12 |
13 | #define FREE(ptr) \
14 | do { \
15 | if (ptr != NULL) { \
16 | free(ptr); \
17 | } \
18 | } while (0)
19 |
20 | #define SQL_STR_DOUBLE(ch, escape_backslash) \
21 | ((ch) == '\'' || ((ch) == '\\' && (escape_backslash)))
22 | #define ESCAPE_STRING_SYNTAX 'E'
23 |
24 | char* deep_copy_string(const char*);
25 | void deep_copy_string_to(const char*, char*, int);
26 | long long get_time_usec();
27 | struct timespec get_out_time(long long);
28 | struct timespec get_time_spec_from_ms(long long);
29 | char* itoa(int);
30 | int len_of_int(int);
31 | char* quote_table_name(const char*, const char*);
32 | char* quote_identifier(const char*);
33 | size_t quote_literal_internal(char *dst, const char *src, size_t len);
34 | char* quote_literal_cstr(const char*);
35 | int get_max_pow(int);
36 |
37 | long current_time_ms();
38 | void endian_swap(void*, int);
39 | void to_lower_case(char*, int len);
40 | bool compare_strings(const char *str1, const char *str2);
41 |
42 | char* int16toa(int16_t);
43 | char* int32toa(int32_t);
44 | char* int64toa(int64_t);
45 | char* btoa(bool);
46 | char* ftoa(float);
47 | char* dtoa(double);
48 |
49 | char* int32_array_toa(int32_t*, int);
50 | char* int64_array_toa(int64_t*, int);
51 | char* bool_array_toa(bool*, int);
52 | char* float_array_toa(float*, int);
53 | char* double_array_toa(double*, int);
54 | char* text_array_toa(char**, int);
55 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/worker.h:
--------------------------------------------------------------------------------
1 | #ifndef _WORKER_H_
2 | #define _WORKER_H_
3 |
4 | #include
5 | #include
6 | #include
7 | #include "../include/holo_client.h"
8 | #include "action.h"
9 | #include "connection_holder.h"
10 | #include "metrics.h"
11 | #include "lp_map.h"
12 |
13 | typedef struct _Worker Worker;
14 |
15 | typedef struct _Worker {
16 | ConnectionHolder *connHolder;
17 | HoloConfig config;
18 | int status; //0: initialized 1: started 2: stopping 3: stopped 4: error
19 | int index;
20 | Action* action; //action the worker is currently working on
21 | pthread_t* thread;
22 | pthread_mutex_t* mutex;
23 | pthread_cond_t* cond;
24 | MetricsInWorker* metrics;
25 | long lastUpdateTime;
26 | pthread_mutex_t* idleMutex;
27 | pthread_cond_t* idleCond;
28 | LPMap* map;
29 | } Worker;
30 |
31 | Worker* holo_client_new_worker(HoloConfig, int, bool);
32 | int holo_client_start_worker(Worker*);
33 | int holo_client_stop_worker(Worker*);
34 | void holo_client_close_worker(Worker*);
35 |
36 | bool holo_client_try_submit_action_to_worker(Worker*, Action*);
37 |
38 | ActionStatus handle_meta_action(ConnectionHolder* , Action*);
39 | ActionStatus handle_mutation_action(ConnectionHolder* , Action*);
40 | ActionStatus handle_sql_action(ConnectionHolder* , Action*);
41 | ActionStatus handle_get_action(ConnectionHolder*, Action*);
42 |
43 | void worker_abort_action(Worker*);
44 | #endif
--------------------------------------------------------------------------------
/holo-client-c/src/worker_pool_private.h:
--------------------------------------------------------------------------------
1 | #ifndef _WORKER_POOL_PRIVATE_H_
2 | #define _WORKER_POOL_PRIVATE_H_
3 |
4 | #include "worker.h"
5 | #include "meta_cache.h"
6 | #include "metrics.h"
7 | #include "../include/worker_pool.h"
8 |
9 | struct _HoloWorkerPool {
10 | Worker** workers;
11 | int numWorkers;
12 | MetaCache* metaCache;
13 | HoloConfig config;
14 | int status; //0:ready 1:started 2:stopped
15 | Metrics* metrics;
16 | pthread_mutex_t* idleMutex;
17 | pthread_cond_t* idleCond;
18 | };
19 |
20 | bool holo_client_submit_action_to_worker_pool(HoloWorkerPool*, Action*);
21 |
22 | #endif
--------------------------------------------------------------------------------
/holo-client-c/test/Makefile:
--------------------------------------------------------------------------------
1 | BIN=test
2 | CC=gcc
3 | GIT_VERSION=$(shell git tag --sort=-taggerdate | head -n 1)
4 | VERSION_FLAG=-DBUILD_VERSION=\"$(GIT_VERSION)\"
5 | CFLAGS=-lpthread -fsanitize=address -fno-omit-frame-pointer -std=c11 -D_XOPEN_SOURCE=600 -Wall -Werror
6 | STATCI_LIBS=-L../src/lib -llog4c -L/usr/local/Cellar/cunit/2.1-3/lib -lcunit #Link liblog4c.a
7 | SHARED_LIBS=-L/usr/local/opt/postgresql@11/lib -lpq #Link libpq.so
8 | INCS=-I/usr/local/opt/postgresql@11/include -I../src/include -I../include -I../src -I/usr/local/Cellar/cunit/2.1-3/include #Header file directory
9 | SRCS:= \
10 | unit_test.c \
11 | ../src/action.c \
12 | ../src/batch.c \
13 | ../src/connection_holder.c \
14 | ../src/direct_collector.c \
15 | ../src/future.c \
16 | ../src/get_collector.c \
17 | ../src/holo_client.c \
18 | ../src/holo_config.c \
19 | ../src/keywords.c \
20 | ../src/logger.c \
21 | ../src/logger_log4c.c \
22 | ../src/lp_map.c \
23 | ../src/meta_cache.c \
24 | ../src/murmur3.c \
25 | ../src/mutation_collector.c \
26 | ../src/mutation_map.c \
27 | ../src/record.c \
28 | ../src/request.c \
29 | ../src/sql_builder.c \
30 | ../src/table_schema.c \
31 | ../src/utils.c \
32 | ../src/worker_pool.c \
33 | ../src/worker.c \
34 | ../src/metrics.c \
35 | ../src/exception.c
36 |
37 | COBJS:=$(SRCS:.c=.o)
38 |
39 | all:$(BIN)
40 |
41 | $(COBJS) : %.o: %.c
42 | $(CC) -c $< -o $@ $(INCS) $(VERSION_FLAG) -std=c11 -D_XOPEN_SOURCE=600 -Wall -Werror -O0 -g3
43 |
44 | $(BIN):$(COBJS)
45 | $(CC) -o $(BIN) $(COBJS) $(STATCI_LIBS) $(SHARED_LIBS) $(CFLAGS)
46 |
47 | clean:
48 | rm $(BIN) $(COBJS)
--------------------------------------------------------------------------------
/holo-client-c/test/log4crc:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | 0
8 |
9 | 0
10 | 1
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/holo-client-go/go.mod:
--------------------------------------------------------------------------------
1 | module holoclient
2 |
3 | go 1.16
4 |
--------------------------------------------------------------------------------
/holo-client-go/holo-client/include/holo_client.h:
--------------------------------------------------------------------------------
1 | #ifndef _HOLO_CLIENT_H_
2 | #define _HOLO_CLIENT_H_
3 |
4 | #include "defs.h"
5 | #include "logger.h"
6 | #include "request.h"
7 | #include "worker_pool.h"
8 | #include "record.h"
9 |
10 | __HOLO_CLIENT_BEGIN_DECLS
11 |
12 | struct _HoloClient;
13 | typedef struct _HoloClient HoloClient;
14 |
15 | HoloClient* holo_client_new_client(HoloConfig);
16 | HoloClient* holo_client_new_client_with_workerpool(HoloConfig, HoloWorkerPool*);
17 |
18 | int holo_client_flush_client(HoloClient*);
19 | int holo_client_flush_client_with_errmsg(HoloClient*, char**);
20 | int holo_client_close_client(HoloClient*);
21 |
22 | //Schema name可以为NULL,会被设为"public"
23 | HoloTableSchema* holo_client_get_tableschema(HoloClient*, const char*, const char*, bool);
24 | HoloTableSchema* holo_client_get_tableschema_with_errmsg(HoloClient*, const char*, const char*, bool, char**);
25 |
26 | int holo_client_submit(HoloClient*, HoloMutation);
27 | int holo_client_submit_with_errmsg(HoloClient*, HoloMutation, char**);
28 | int holo_client_submit_with_attachments(HoloClient*, HoloMutation, int64_t, int64_t);
29 | int holo_client_get(HoloClient*, HoloGet);
30 | HoloRecord* holo_client_get_record(const HoloGet);
31 | char* holo_client_get_record_val(const HoloRecord* record, int colIndex);
32 |
33 | //通过接口返回的error code,可以获取相应错误的error message(字符串形式)
34 | const char* holo_client_get_errmsg_with_errcode(int errCode);
35 |
36 | __HOLO_CLIENT_END_DECLS
37 |
38 | #endif
--------------------------------------------------------------------------------
/holo-client-go/holo-client/include/logger.h:
--------------------------------------------------------------------------------
1 | #ifndef _LOGGER_H_
2 | #define _LOGGER_H_
3 |
4 | #include "defs.h"
5 |
6 | __HOLO_CLIENT_BEGIN_DECLS
7 |
8 | /**
9 | * Definition of HoloLogger callback, the default logger is log4c
10 | * you can make your own implement of HoloLogger, and use holo_client_setup_logger to setup
11 | * the first arg is log level, the second is message that will be logged
12 | * if you don't want any log, set logger to holo_client_log_do_nothing, which will reduce overhead
13 | */
14 | typedef void* (*HoloLogger)(const int, const char*);
15 |
16 | void* holo_client_log_do_nothing(const int logLevel, const char* msg);
17 |
18 | void holo_client_setup_logger(HoloLogger logger, int loglevel);
19 |
20 | void holo_client_logger_open();
21 | void holo_client_logger_close();
22 |
23 | __HOLO_CLIENT_END_DECLS
24 |
25 | #endif
--------------------------------------------------------------------------------
/holo-client-go/holo-client/include/table_schema.h:
--------------------------------------------------------------------------------
1 | #ifndef _TABLE_SCHEMA_H_
2 | #define _TABLE_SCHEMA_H_
3 |
4 | //#include
5 | #include
6 | #include "defs.h"
7 |
8 | __HOLO_CLIENT_BEGIN_DECLS
9 |
10 | typedef struct _HoloTableName HoloTableName;
11 |
12 | typedef struct _HoloColumn {
13 | char *name;
14 | char *quoted;
15 | unsigned int type;
16 | bool nullable;
17 | bool isPrimaryKey;
18 | char *defaultValue;
19 | } HoloColumn;
20 |
21 | typedef struct _HoloTableSchema {
22 | unsigned int tableId;
23 | HoloTableName* tableName; //为了隐藏HoloTableName结构,此处定义为指针
24 | int nColumns;
25 | HoloColumn *columns;
26 | int nDistributionKeys;
27 | int *distributionKeys; //column index
28 | //int *dictionaryEncoding;
29 | //int *bitmapIndexKey;
30 | //int *clusteringKey;
31 | //int *segmentKey;
32 | int nPrimaryKeys;
33 | int *primaryKeys;
34 | int partitionColumn;
35 | } HoloTableSchema;
36 |
37 | const char* holo_client_get_column_name(HoloTableSchema* schema, int colIndex);
38 | const char* holo_client_get_column_type_name(HoloTableSchema* schema, int colIndex);
39 |
40 | //通过HoloColumn的type字段(无符号整型数字),可以获取HoloColumn的类型(字符串)
41 | const char* holo_client_get_type_name_with_type_oid(unsigned int typeOid);
42 |
43 | __HOLO_CLIENT_END_DECLS
44 |
45 | #endif
--------------------------------------------------------------------------------
/holo-client-go/holo-client/include/worker_pool.h:
--------------------------------------------------------------------------------
1 | #ifndef _WORKER_POOL_H_
2 | #define _WORKER_POOL_H_
3 |
4 | #include "defs.h"
5 | #include "holo_config.h"
6 |
7 | __HOLO_CLIENT_BEGIN_DECLS
8 |
9 | struct _HoloWorkerPool;
10 | typedef struct _HoloWorkerPool HoloWorkerPool;
11 |
12 | HoloWorkerPool* holo_client_new_worker_pool(HoloConfig, bool, int);
13 | int holo_client_start_worker_pool(HoloWorkerPool*);
14 | int holo_client_worker_pool_status(const HoloWorkerPool*);
15 | int holo_client_stop_worker_pool(HoloWorkerPool*);
16 | int holo_client_close_worker_pool(HoloWorkerPool*);
17 |
18 | __HOLO_CLIENT_END_DECLS
19 |
20 | #endif
--------------------------------------------------------------------------------
/holo-client-go/holo-client/lib/libholo-client.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aliyun/alibabacloud-hologres-connectors/2e2c4ad044ab020af0e80deaaa080acd1b8c8f71/holo-client-go/holo-client/lib/libholo-client.so
--------------------------------------------------------------------------------
/holo-client-go/log4crc:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | 0
8 |
9 | 0
10 | 1
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/holo-client/.gitignore:
--------------------------------------------------------------------------------
1 | /target
2 | /.idea
3 | .classpath
4 | .project
5 | *.iml
6 | /.settings
7 | /.vscode
8 | .DS_Store
9 | dependency-reduced-pom.xml
10 | endpoint.properties
11 | *.swp
12 |
--------------------------------------------------------------------------------
/holo-client/LICENSE:
--------------------------------------------------------------------------------
1 | COPYRIGHT(c) 2017 Alibaba Group Holding Limited and/or its affiliates and licensors.
2 | All Rights Reserved.
3 |
4 | This material contains information which is proprietary and confidential to
5 | Alibaba Group Holding Limited. It is disclosed to the customer solely for the following
6 | purpose: namely, for the purpose of enabling the customer to evaluate the
7 | Alibaba products described herein or for the use of the information for the
8 | operation of the product delivered to the customer. The customer is not to
9 | reproduce, copy, divulge or sell all or any part thereof without the prior
10 | consent of an authorized representative of Alibaba.
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/EqualsFilter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client;
6 |
7 | /**
8 | * EqualsFilter.
9 | */
10 | public class EqualsFilter implements Filter {
11 | int index;
12 | Object obj;
13 |
14 | public EqualsFilter(int index, Object obj) {
15 | this.index = index;
16 | this.obj = obj;
17 | }
18 |
19 | public int getIndex() {
20 | return index;
21 | }
22 |
23 | public Object getObj() {
24 | return obj;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/Filter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client;
6 |
7 | /**
8 | * Filter.
9 | */
10 | public interface Filter {
11 | }
12 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/RangeFilter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client;
6 |
7 | /**
8 | * RangeFilter.
9 | */
10 | public class RangeFilter implements Filter {
11 | int index;
12 | Object start;
13 | Object stop;
14 |
15 | public RangeFilter(int index, Object start, Object stop) {
16 | this.index = index;
17 | this.start = start;
18 | this.stop = stop;
19 | }
20 |
21 | public int getIndex() {
22 | return index;
23 | }
24 |
25 | public Object getStart() {
26 | return start;
27 | }
28 |
29 | public Object getStop() {
30 | return stop;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/SortKeys.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client;
6 |
7 | /**
8 | * Scan操作的排序keys.
9 | */
10 | public enum SortKeys {
11 | PRIMARY_KEY,
12 | CLUSTERING_KEY,
13 | NONE,
14 | }
15 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/Trace.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client;
6 |
7 | import java.util.concurrent.atomic.AtomicInteger;
8 |
9 | /**
10 | * 用于debug性能追踪.
11 | */
12 | public class Trace {
13 |
14 | long[] values = new long[20];
15 | String[] text = new String[20];
16 |
17 | AtomicInteger index = new AtomicInteger(1);
18 |
19 | public void begin() {
20 | values[0] = System.nanoTime();
21 | text[0] = "start";
22 | }
23 |
24 | public void step(String name) {
25 | long current = System.nanoTime();
26 | int index = this.index.getAndIncrement();
27 | if (index < values.length - 1) {
28 | values[index] = current;
29 | text[index] = name;
30 | }
31 | }
32 |
33 | public long getCost() {
34 | int index = this.index.get();
35 | return (values[index - 1] - values[0]) / 1000L;
36 | }
37 |
38 | @Override
39 | public String toString() {
40 | StringBuilder sb = new StringBuilder();
41 | int index = this.index.get();
42 | for (int i = 1; i < index && i < values.length; ++i) {
43 | sb.append(text[i]).append(":").append((values[i] - values[i - 1]) / 1000L).append(" us -> ");
44 | }
45 | sb.append("total:").append(getCost()).append(" us");
46 | return sb.toString();
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/copy/CopyInOutputStream.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.copy;
6 |
7 | import org.postgresql.copy.CopyIn;
8 |
9 | import java.io.IOException;
10 | import java.io.OutputStream;
11 | import java.sql.SQLException;
12 |
13 | /**
14 | * 把CopyIn封装成一个OutputStream.
15 | */
16 | public class CopyInOutputStream extends OutputStream implements WithCopyResult {
17 | private final CopyIn copyIn;
18 | private long result = -1;
19 |
20 | public CopyInOutputStream(CopyIn copyIn) {
21 | this.copyIn = copyIn;
22 | }
23 |
24 | @Override
25 | public void write(int b) throws IOException {
26 | throw new UnsupportedOperationException("please use void write(byte b[], int off, int len)");
27 | }
28 |
29 | @Override
30 | public void write(byte[] b, int off, int len) throws IOException {
31 | if (b == null) {
32 | throw new NullPointerException();
33 | } else if ((off < 0) || (off > b.length) || (len < 0) ||
34 | ((off + len) > b.length) || ((off + len) < 0)) {
35 | throw new IndexOutOfBoundsException();
36 | } else if (len == 0) {
37 | return;
38 | }
39 | try {
40 | copyIn.writeToCopy(b, off, len);
41 | } catch (SQLException e) {
42 | throw new IOException(e);
43 | }
44 | }
45 |
46 | @Override
47 | public void flush() throws IOException {
48 | try {
49 | copyIn.flushCopy();
50 | } catch (SQLException e) {
51 | throw new IOException(e);
52 | }
53 | }
54 |
55 | @Override
56 | public long getResult() {
57 | return result;
58 | }
59 |
60 | @Override
61 | public void close() throws IOException {
62 | try {
63 | result = copyIn.endCopy();
64 | } catch (SQLException e) {
65 | throw new IOException(e);
66 | }
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/copy/CopyMode.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.client.copy;
2 |
3 | /**
4 | * enum for write mode.
5 | * */
6 | public enum CopyMode {
7 | // fixed copy
8 | STREAM,
9 | // 批量copy
10 | BULK_LOAD,
11 | // bulkLoad 自Hologres 2.2.25版本起支持on_conflict, 为了兼容历史版本, 分为两种模式
12 | BULK_LOAD_ON_CONFLICT
13 | }
14 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/copy/WithCopyResult.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.copy;
6 |
7 | /**
8 | * 用于返回copy行数.
9 | */
10 | public interface WithCopyResult {
11 | long getResult();
12 | }
13 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/exception/ExceptionCode.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.exception;
6 |
7 | /**
8 | * enum for exception code.
9 | */
10 | public enum ExceptionCode {
11 | INVALID_Config(1),
12 | INVALID_REQUEST(2),
13 |
14 | GENERATOR_PARAMS_ERROR(51),
15 |
16 | /* 可重试,非脏数据 */
17 | CONNECTION_ERROR(100),
18 | READ_ONLY(103),
19 | META_NOT_MATCH(201),
20 | TIMEOUT(250),
21 | BUSY(251),
22 | TOO_MANY_CONNECTIONS(106),
23 |
24 | /* 不重试,非脏数据 */
25 | AUTH_FAIL(101),
26 | ALREADY_CLOSE(102),
27 | PERMISSION_DENY(104),
28 | SYNTAX_ERROR(105),
29 | TOO_MANY_WAL_SENDERS(107),
30 | INTERNAL_ERROR(300),
31 | INTERRUPTED(301),
32 | NOT_SUPPORTED(302),
33 |
34 | /* 不重试,脏数据 */
35 | TABLE_NOT_FOUND(200, true),
36 | CONSTRAINT_VIOLATION(202, true),
37 | DATA_TYPE_ERROR(203, true),
38 | DATA_VALUE_ERROR(204, true),
39 |
40 | UNKNOWN_ERROR(500);
41 |
42 | private final int code;
43 |
44 | private final boolean dirtyDataException;
45 |
46 | ExceptionCode(int code) {
47 | this(code, false);
48 | }
49 |
50 | ExceptionCode(int code, boolean dirtyDataException) {
51 | this.code = code;
52 | this.dirtyDataException = dirtyDataException;
53 | }
54 |
55 | public int getCode() {
56 | return this.code;
57 | }
58 |
59 | public boolean isDirtyDataException() {
60 | return dirtyDataException;
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/exception/InvalidIdentifierException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.exception;
6 |
7 | /**
8 | * TableName不合法时的异常.
9 | */
10 | public class InvalidIdentifierException extends RuntimeException {
11 | public InvalidIdentifierException() {
12 | }
13 |
14 | public InvalidIdentifierException(String message) {
15 | super(message);
16 | }
17 |
18 | public InvalidIdentifierException(String message, Throwable cause) {
19 | super(message, cause);
20 | }
21 |
22 | public InvalidIdentifierException(Throwable cause) {
23 | super(cause);
24 | }
25 | }
26 |
27 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/function/FunctionWithSQLException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.function;
6 |
7 | import java.sql.SQLException;
8 |
9 | /**
10 | * 带异常的function.
11 | *
12 | * @param 输入
13 | * @param 输出
14 | */
15 | public interface FunctionWithSQLException {
16 | O apply(I input) throws SQLException;
17 | }
18 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/MetaStore.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl;
6 |
7 | import com.alibaba.hologres.client.model.Partition;
8 | import com.alibaba.hologres.client.model.TableName;
9 | import com.alibaba.hologres.client.model.TableSchema;
10 |
11 | /**
12 | * 元信息缓存.
13 | */
14 | public class MetaStore {
15 | public final Cache tableCache;
16 | public final Cache> partitionCache;
17 |
18 | public MetaStore(long tableCacheTTL) {
19 | this.tableCache = new Cache<>(tableCacheTTL, null);
20 | this.partitionCache = new Cache<>((tableName) -> new Cache<>());
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/ObjectChan.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl;
6 |
7 | import java.util.concurrent.TimeUnit;
8 | import java.util.concurrent.locks.Condition;
9 | import java.util.concurrent.locks.ReentrantLock;
10 |
11 | /**
12 | * 长度为1的队列.
13 | *
14 | * @param 类型
15 | */
16 | public class ObjectChan {
17 |
18 | T value;
19 | private final ReentrantLock lock;
20 | private final Condition notEmpty;
21 |
22 | public ObjectChan() {
23 | lock = new ReentrantLock(false);
24 | notEmpty = lock.newCondition();
25 | }
26 |
27 | public boolean set(T t) {
28 | lock.lock();
29 | try {
30 | if (value == null) {
31 | value = t;
32 | notEmpty.signalAll();
33 | return true;
34 | } else {
35 | return false;
36 | }
37 | } finally {
38 | lock.unlock();
39 | }
40 | }
41 |
42 | public T get(long timeout, TimeUnit unit) throws InterruptedException {
43 | long nanos = unit.toNanos(timeout);
44 | final ReentrantLock lock = this.lock;
45 | lock.lockInterruptibly();
46 | try {
47 | while (value == null) {
48 | if (nanos <= 0) {
49 | return null;
50 | }
51 | nanos = notEmpty.awaitNanos(nanos);
52 | }
53 | return value;
54 | } finally {
55 | lock.unlock();
56 | }
57 | }
58 |
59 | public void clear() {
60 | lock.lock();
61 | try {
62 | value = null;
63 | } finally {
64 | lock.unlock();
65 | }
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/OneshotWorker.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl;
6 |
7 | import com.alibaba.hologres.client.HoloConfig;
8 | import com.alibaba.hologres.client.impl.action.AbstractAction;
9 | import com.alibaba.hologres.client.impl.binlog.action.BinlogAction;
10 | import com.alibaba.hologres.client.impl.binlog.handler.BinlogActionHandler;
11 |
12 | import java.util.concurrent.TimeUnit;
13 | import java.util.concurrent.atomic.AtomicBoolean;
14 |
15 | /**
16 | * 只跑一次的worker,没有while.
17 | */
18 | public class OneshotWorker extends Worker {
19 |
20 | public OneshotWorker(HoloConfig config, AtomicBoolean started, String index, boolean isShadingEnv, boolean isFixed) {
21 | super(config, started, index, isShadingEnv, isFixed);
22 | handlerMap.put(BinlogAction.class, new BinlogActionHandler(started, config, isShadingEnv, isFixed));
23 | }
24 |
25 | @Override
26 | public void run() {
27 | LOGGER.info("worker:{} start", this);
28 | try {
29 | AbstractAction action = recordCollector.get(2000L, TimeUnit.MILLISECONDS);
30 | /*
31 | * 每个循环做2件事情:
32 | * 1 有action就执行action
33 | * 2 根据connectionMaxIdleMs释放空闲connection
34 | * */
35 | if (null != action) {
36 | try {
37 | handle(action);
38 | } finally {
39 | recordCollector.clear();
40 | }
41 | } else {
42 |
43 | }
44 | } catch (Throwable e) {
45 | LOGGER.error("should not happen", e);
46 | }
47 | LOGGER.info("worker:{} stop", this);
48 | connectionHolder.close();
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/PreparedStatementWithBatchInfo.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl;
6 |
7 | import com.alibaba.hologres.client.Put;
8 | import com.alibaba.hologres.client.utils.Tuple;
9 |
10 | import java.sql.PreparedStatement;
11 |
12 | /**
13 | * preparedStatement,是否需要batchExecute.
14 | */
15 | public class PreparedStatementWithBatchInfo extends Tuple {
16 | long byteSize;
17 | int batchCount;
18 | Put.MutationType type;
19 | public PreparedStatementWithBatchInfo(PreparedStatement preparedStatement, Boolean isBatch, Put.MutationType type) {
20 | super(preparedStatement, isBatch);
21 | this.type = type;
22 | }
23 |
24 | public Put.MutationType getType() {
25 | return type;
26 | }
27 |
28 | public void setType(Put.MutationType type) {
29 | this.type = type;
30 | }
31 |
32 | public int getBatchCount() {
33 | return batchCount;
34 | }
35 |
36 | public void setBatchCount(int batchCount) {
37 | this.batchCount = batchCount;
38 | }
39 |
40 | public long getByteSize() {
41 | return byteSize;
42 | }
43 |
44 | public void setByteSize(long byteSize) {
45 | this.byteSize = byteSize;
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/action/AbstractAction.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.action;
6 |
7 | import com.alibaba.hologres.client.exception.ExceptionCode;
8 | import com.alibaba.hologres.client.exception.HoloClientException;
9 |
10 | import java.util.concurrent.CompletableFuture;
11 | import java.util.concurrent.ExecutionException;
12 | import java.util.concurrent.Semaphore;
13 |
14 | /**
15 | * ca.
16 | *
17 | * @param t
18 | */
19 | public abstract class AbstractAction {
20 | CompletableFuture future;
21 |
22 | Semaphore semaphore;
23 |
24 | public AbstractAction() {
25 | this.future = new CompletableFuture<>();
26 | }
27 |
28 | public CompletableFuture getFuture() {
29 | return future;
30 | }
31 |
32 | public T getResult() throws HoloClientException {
33 | try {
34 | return future.get();
35 | } catch (InterruptedException e) {
36 | throw new HoloClientException(ExceptionCode.INTERNAL_ERROR, "interrupt", e);
37 | } catch (ExecutionException e) {
38 | Throwable cause = e.getCause();
39 | if (cause instanceof HoloClientException) {
40 | throw (HoloClientException) cause;
41 | } else {
42 | throw new HoloClientException(ExceptionCode.INTERNAL_ERROR, "", cause);
43 | }
44 | }
45 | }
46 |
47 | public Semaphore getSemaphore() {
48 | return semaphore;
49 | }
50 |
51 | public void setSemaphore(Semaphore semaphore) {
52 | this.semaphore = semaphore;
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/action/EmptyAction.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.action;
6 |
7 | /**
8 | * 空的Action,用于主动触发Worker工作循环.
9 | */
10 | public class EmptyAction extends AbstractAction {
11 | }
12 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/action/GetAction.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.action;
6 |
7 | import com.alibaba.hologres.client.Get;
8 |
9 | import java.util.List;
10 |
11 | /**
12 | * ga.
13 | */
14 | public class GetAction extends AbstractAction {
15 |
16 | List getList;
17 |
18 | public GetAction(List getList) {
19 | this.getList = getList;
20 | }
21 |
22 | public List getGetList() {
23 | return getList;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/action/MetaAction.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.action;
6 |
7 | import com.alibaba.hologres.client.model.TableName;
8 | import com.alibaba.hologres.client.model.TableSchema;
9 |
10 | /**
11 | * ma.
12 | */
13 | public class MetaAction extends AbstractAction {
14 |
15 | TableName tableName;
16 |
17 | /**
18 | * @param tableName 表名
19 | */
20 | public MetaAction(TableName tableName) {
21 | this.tableName = tableName;
22 | }
23 |
24 | public TableName getTableName() {
25 | return tableName;
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/action/ScanAction.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.action;
6 |
7 | import com.alibaba.hologres.client.Scan;
8 | import com.alibaba.hologres.client.model.RecordScanner;
9 |
10 | /**
11 | * ga.
12 | */
13 | public class ScanAction extends AbstractAction {
14 |
15 | Scan scan;
16 |
17 | public ScanAction(Scan scan) {
18 | this.scan = scan;
19 | }
20 |
21 | public Scan getScan() {
22 | return scan;
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/action/SqlAction.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.action;
6 |
7 | import com.alibaba.hologres.client.function.FunctionWithSQLException;
8 |
9 | import java.sql.Connection;
10 |
11 | /**
12 | * ga.
13 | */
14 | public class SqlAction extends AbstractAction {
15 |
16 | FunctionWithSQLException handler;
17 |
18 | public SqlAction(FunctionWithSQLException handler) {
19 | this.handler = handler;
20 | }
21 |
22 | public FunctionWithSQLException getHandler() {
23 | return handler;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/binlog/BinlogEventType.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.client.impl.binlog;
2 |
3 | import java.security.InvalidParameterException;
4 |
5 | /**
6 | * BinlogEventType
7 | * hg_binlog_event_type: INSERT、DELETE、UPDATE会有对应Binlog
8 | * 其中UPDATE 操作会产生两条Binlog记录, 一条更新前, 一条更新后的.
9 | */
10 | public enum BinlogEventType {
11 | // 客户端的一个虚拟eventType,引擎中并不存在.
12 | HeartBeat(-1),
13 |
14 | // 表示当前Binlog 为删除一条已有的记录
15 | DELETE(2),
16 |
17 | // 表示当前Binlog 为更新一条已有的记录中更新前的记录
18 | BEFORE_UPDATE(3),
19 |
20 | // 表示当前Binlog 为插入一条新的记录
21 | INSERT(5),
22 |
23 | // 表示当前Binlog 为更新一条已有的记录中更新后的记录
24 | AFTER_UPDATE(7),
25 |
26 | // 表示该shard截止到hg_binlog_timestamp_us的binlog均已消费完毕
27 | HEARTBEAT_LOG_EVENT(27);
28 |
29 | private final long value;
30 |
31 | BinlogEventType(long value) {
32 | this.value = value;
33 | }
34 |
35 | public long getValue() {
36 | return value;
37 | }
38 |
39 | public static BinlogEventType of(long value) {
40 | switch ((int) value) {
41 | case 2:
42 | return DELETE;
43 | case 3:
44 | return BEFORE_UPDATE;
45 | case 5:
46 | return INSERT;
47 | case 7:
48 | return AFTER_UPDATE;
49 | case 27:
50 | return HEARTBEAT_LOG_EVENT;
51 | default:
52 | throw new InvalidParameterException("unknown binlog event type" + value);
53 | }
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/binlog/BinlogLevel.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.client.impl.binlog;
2 |
3 |
4 | /**
5 | * BinlogLevel
6 | * hologres物理表的binlog level,对应"binlog.level" table property.
7 | */
8 | public enum BinlogLevel {
9 | // binlog.level设置为replica,表示holo物理表开启了binlog
10 | REPLICA,
11 |
12 | // 表示没有设置binlog.level属性
13 | NONE
14 | }
15 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/binlog/Committer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.binlog;
6 |
7 | import com.alibaba.hologres.client.utils.Tuple;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | import java.util.concurrent.BlockingQueue;
12 | import java.util.concurrent.CompletableFuture;
13 | import java.util.concurrent.TimeUnit;
14 | import java.util.concurrent.TimeoutException;
15 |
16 | /**
17 | * 用来提交lsn的.
18 | */
19 | @Deprecated
20 | public class Committer {
21 | public static final Logger LOGGER = LoggerFactory.getLogger(Committer.class);
22 |
23 | final BlockingQueue, Long>> queue;
24 |
25 | long lastReadLsn = -1;
26 |
27 | public Committer(BlockingQueue, Long>> queue) {
28 | this.queue = queue;
29 | }
30 |
31 | public void updateLastReadLsn(long lastReadLsn) {
32 | this.lastReadLsn = lastReadLsn;
33 | }
34 |
35 | public CompletableFuture commit(long timeout) throws InterruptedException, TimeoutException {
36 | return commit(lastReadLsn, timeout);
37 | }
38 |
39 | public CompletableFuture commit(long lsn, long timeout) throws InterruptedException, TimeoutException {
40 | CompletableFuture future = new CompletableFuture<>();
41 | if (lsn < 0) {
42 | LOGGER.info("last read lsn {} < 0, skip commit it", lsn);
43 | future.complete(null);
44 | return future;
45 | }
46 | boolean ret = queue.offer(new Tuple<>(future, lsn), timeout, TimeUnit.MILLISECONDS);
47 | if (!ret) {
48 | throw new TimeoutException();
49 | } else {
50 | return future;
51 | }
52 | }
53 |
54 | public long getLastReadLsn() {
55 | return lastReadLsn;
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/binlog/TableSchemaSupplier.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.binlog;
6 |
7 | import com.alibaba.hologres.client.exception.HoloClientException;
8 | import com.alibaba.hologres.client.model.TableSchema;
9 |
10 | /**
11 | * 提供TableSchema.
12 | */
13 | public interface TableSchemaSupplier {
14 | TableSchema apply() throws HoloClientException;
15 | }
16 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/collector/BatchState.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.collector;
6 |
7 | /**
8 | * 每个batch的状态.
9 | */
10 | public enum BatchState {
11 | /**
12 | * 还可以继续攒批.
13 | */
14 | NotEnough(0),
15 | /**
16 | * buffer条数达到目标值.
17 | */
18 | SizeEnough(1),
19 | /**
20 | * buffer大小达到目标值.
21 | */
22 | ByteSizeEnough(2),
23 | /**
24 | * 等待超过目标时长.
25 | */
26 | TimeWaitEnough(3),
27 | /**
28 | * 猜测在到达目标时长前已无法翻倍,提早提交.
29 | */
30 | TimeCondition(4),
31 | /**
32 | * 猜测在到达目标大小前前已无法翻倍,提早提交.
33 | * 例如,目标大小为2MB,当前128条已经1.8MB,那么此时就可以直接提交,减少sql碎片
34 | */
35 | ByteSizeCondition(5),
36 | /**
37 | * 猜测在到达目标总大小前已无法翻倍,提早提交.
38 | */
39 | TotalByteSizeCondition(6),
40 | /**
41 | * 强制提交.
42 | */
43 | Force(7),
44 | /**
45 | * 因为批量执行失败,所以被拆成最多1条一个batch.
46 | */
47 | RetryOneByOne(8);
48 |
49 | int index;
50 |
51 | BatchState(int index) {
52 | this.index = index;
53 | }
54 |
55 | public int getIndex() {
56 | return index;
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/collector/CollectorStatistics.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.collector;
6 |
7 | import java.util.concurrent.atomic.AtomicInteger;
8 |
9 | /**
10 | * 攒批的统计信息.
11 | */
12 | public class CollectorStatistics {
13 |
14 | /**
15 | * 因为条数或byte大小提交的Batch数.
16 | */
17 | AtomicInteger fullBatchCount = new AtomicInteger(0);
18 |
19 | /**
20 | * 因为时间(或者总内存不够了,不一定是这个表的问题)提交的Batch数.
21 | *
22 | */
23 | AtomicInteger notFullBatchCount = new AtomicInteger(0);
24 |
25 | /**
26 | * 每轮统计的开始时间.
27 | */
28 | long nanoTime = System.nanoTime();
29 |
30 | public void add(BatchState state) {
31 | switch (state) {
32 | case SizeEnough:
33 | case ByteSizeEnough:
34 | case ByteSizeCondition:
35 | fullBatchCount.incrementAndGet();
36 | break;
37 | case NotEnough:
38 | break;
39 | default:
40 | notFullBatchCount.incrementAndGet();
41 | }
42 | }
43 |
44 | public int getFullBatchCount() {
45 | return fullBatchCount.get();
46 | }
47 |
48 | public int getNotFullBatchCount() {
49 | return notFullBatchCount.get();
50 | }
51 |
52 | public long getNanoTime() {
53 | return nanoTime;
54 | }
55 |
56 | public void clear() {
57 | fullBatchCount.set(0);
58 | notFullBatchCount.set(0);
59 | nanoTime = System.nanoTime();
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/collector/ResizePolicy.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.collector;
6 |
7 | import com.alibaba.hologres.client.HoloConfig;
8 | import com.alibaba.hologres.client.model.TableName;
9 |
10 | /**
11 | * Resize策略.
12 | */
13 | public interface ResizePolicy {
14 |
15 | void init(HoloConfig config);
16 |
17 | /**
18 | * 计算一个表新的shard数.
19 | * @param tableName 表名
20 | * @param stat collector统计信息
21 | * @param currentSize 当前的shard数
22 | * @param workerCount 当前的worker数
23 | * @param currentNano 当前JVM纳秒数
24 | * @return 新的shard数
25 | */
26 | int calculate(TableName tableName, CollectorStatistics stat, int currentSize, int workerCount, long currentNano);
27 | }
28 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/collector/shard/DistributionKeyShardPolicy.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.collector.shard;
6 |
7 | import com.alibaba.hologres.client.impl.util.ShardUtil;
8 | import com.alibaba.hologres.client.model.Record;
9 |
10 | import java.util.concurrent.ConcurrentSkipListMap;
11 |
12 | /**
13 | * 使用distribution key去做shard,没有distribution key则随机.
14 | */
15 | public class DistributionKeyShardPolicy implements ShardPolicy{
16 |
17 | private ConcurrentSkipListMap splitRange = new ConcurrentSkipListMap<>();
18 |
19 | @Override
20 | public void init(int shardCount) {
21 | splitRange.clear();
22 | int[][] range = ShardUtil.split(shardCount);
23 | for (int i = 0; i < shardCount; ++i) {
24 | splitRange.put(range[i][0], i);
25 | }
26 | }
27 |
28 | @Override
29 | public int locate(Record record) {
30 | int raw = ShardUtil.hash(record, record.getSchema().getDistributionKeyIndex());
31 | int hash = Integer.remainderUnsigned(raw, ShardUtil.RANGE_END);
32 | return splitRange.floorEntry(hash).getValue();
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/collector/shard/ShardPolicy.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.collector.shard;
6 |
7 | import com.alibaba.hologres.client.model.Record;
8 |
9 | /**
10 | * shard策略.
11 | */
12 | public interface ShardPolicy {
13 |
14 | void init(int shardCount);
15 |
16 | int locate(Record record);
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/copy/CopyContext.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.copy;
6 |
7 | import org.postgresql.copy.CopyOperation;
8 | import org.postgresql.jdbc.PgConnection;
9 |
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * copy需要的相关对象.
14 | */
15 | public class CopyContext {
16 |
17 | /**
18 | * 为了拿到conn.getTimestampUtils()对象,用于CopyIn时的timestamp相关字段序列化.
19 | */
20 | private PgConnection conn;
21 |
22 | /**
23 | * 为了可以在调用侧根据情况执行cancel copy.
24 | */
25 | private CopyOperation copyOperation;
26 |
27 | public CopyContext(PgConnection conn, CopyOperation copyOperation) {
28 | this.conn = conn;
29 | this.copyOperation = copyOperation;
30 | }
31 |
32 | public PgConnection getConn() {
33 | return conn;
34 | }
35 |
36 | public CopyOperation getCopyOperation() {
37 | return copyOperation;
38 | }
39 |
40 | public synchronized void cancel() throws SQLException {
41 | if (copyOperation.isActive()) {
42 | copyOperation.cancelCopy();
43 | }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/copy/InternalPipedOutputStream.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.copy;
6 |
7 | import java.io.IOException;
8 | import java.io.PipedInputStream;
9 | import java.io.PipedOutputStream;
10 |
11 | /**
12 | * InternalPipedOutputStream的生命周期在HoloClient内部维护.
13 | * Worker在执行copy时如果发现是InternalPipedOutputStream,将在完成copy后调用close(),否则不调用close()
14 | */
15 | public class InternalPipedOutputStream extends PipedOutputStream {
16 |
17 | public InternalPipedOutputStream(PipedInputStream snk) throws IOException {
18 | super(snk);
19 | }
20 |
21 | public InternalPipedOutputStream() {
22 | }
23 |
24 | }
25 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/EmptyActionHandler.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler;
6 |
7 | import com.alibaba.hologres.client.HoloConfig;
8 | import com.alibaba.hologres.client.impl.action.EmptyAction;
9 |
10 | /**
11 | * EmptyAction处理类.
12 | */
13 | public class EmptyActionHandler extends ActionHandler {
14 |
15 | private static final String NAME = "empty";
16 |
17 | public EmptyActionHandler(HoloConfig config) {
18 | super(config);
19 | }
20 |
21 | @Override
22 | public void handle(EmptyAction action) {
23 |
24 | }
25 |
26 | @Override
27 | public String getCostMsMetricName() {
28 | return NAME + METRIC_COST_MS;
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/SqlActionHandler.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler;
6 |
7 | import com.alibaba.hologres.client.HoloConfig;
8 | import com.alibaba.hologres.client.exception.HoloClientException;
9 | import com.alibaba.hologres.client.impl.ConnectionHolder;
10 | import com.alibaba.hologres.client.impl.action.SqlAction;
11 |
12 | /**
13 | * SqlAction处理类.
14 | */
15 | public class SqlActionHandler extends ActionHandler {
16 |
17 | private static final String NAME = "sql";
18 |
19 | private final ConnectionHolder connectionHolder;
20 |
21 | public SqlActionHandler(ConnectionHolder connectionHolder, HoloConfig config) {
22 | super(config);
23 | this.connectionHolder = connectionHolder;
24 | }
25 |
26 | @Override
27 | public void handle(SqlAction action) {
28 | try {
29 | action.getFuture().complete(connectionHolder.retryExecute((conn) -> action.getHandler().apply(conn)));
30 | } catch (HoloClientException e) {
31 | action.getFuture().completeExceptionally(e);
32 | }
33 | }
34 |
35 | @Override
36 | public String getCostMsMetricName() {
37 | return NAME + METRIC_COST_MS;
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/jdbc/JdbcByteaColumnValues.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler.jdbc;
6 |
7 | import org.postgresql.jdbc.TimestampUtils;
8 |
9 | import java.nio.charset.Charset;
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * bytea 列存类.
14 | */
15 | public class JdbcByteaColumnValues extends JdbcColumnValues {
16 | private static final Charset UTF8 = Charset.forName("utf-8");
17 |
18 | byte[][] array;
19 |
20 | public JdbcByteaColumnValues(TimestampUtils timestampUtils, int rowCount) {
21 | super(timestampUtils, rowCount);
22 | array = new byte[rowCount][];
23 | }
24 |
25 | @Override
26 | public void doSet(int row, Object obj) throws SQLException {
27 | array[row] = castToBytes(obj);
28 | }
29 |
30 | private static byte[] castToBytes(final Object in) throws SQLException {
31 | try {
32 | if (in instanceof byte[]) {
33 | return (byte[]) in;
34 | }
35 | return in.toString().getBytes(UTF8);
36 | } catch (final Exception e) {
37 | throw cannotCastException(in.getClass().getName(), "byte[]", e);
38 | }
39 | }
40 |
41 | @Override
42 | public Object[] getArray() {
43 | return array;
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/jdbc/JdbcDoubleColumnValues.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler.jdbc;
6 |
7 | import org.postgresql.jdbc.TimestampUtils;
8 |
9 | import java.sql.Clob;
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * double列存类.
14 | */
15 | public class JdbcDoubleColumnValues extends JdbcColumnValues {
16 |
17 | Double[] array;
18 |
19 | public JdbcDoubleColumnValues(TimestampUtils timestampUtils, int rowCount) {
20 | super(timestampUtils, rowCount);
21 | array = new Double[rowCount];
22 | }
23 |
24 | @Override
25 | public void doSet(int row, Object obj) throws SQLException {
26 | array[row] = castToDouble(obj);
27 | }
28 |
29 | private static double castToDouble(final Object in) throws SQLException {
30 | try {
31 | if (in instanceof String) {
32 | return Double.parseDouble((String) in);
33 | }
34 | if (in instanceof Number) {
35 | return ((Number) in).doubleValue();
36 | }
37 | if (in instanceof java.util.Date) {
38 | return ((java.util.Date) in).getTime();
39 | }
40 | if (in instanceof Boolean) {
41 | return (Boolean) in ? 1d : 0d;
42 | }
43 | if (in instanceof Clob) {
44 | return Double.parseDouble(asString((Clob) in));
45 | }
46 | if (in instanceof Character) {
47 | return Double.parseDouble(in.toString());
48 | }
49 | } catch (final Exception e) {
50 | throw cannotCastException(in.getClass().getName(), "double", e);
51 | }
52 | throw cannotCastException(in.getClass().getName(), "double");
53 | }
54 |
55 | @Override
56 | public Object[] getArray() {
57 | return array;
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/jdbc/JdbcFloatColumnValues.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler.jdbc;
6 |
7 | import org.postgresql.jdbc.TimestampUtils;
8 |
9 | import java.sql.Clob;
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * float列存类.
14 | */
15 | public class JdbcFloatColumnValues extends JdbcColumnValues {
16 |
17 | Float[] array;
18 |
19 | public JdbcFloatColumnValues(TimestampUtils timestampUtils, int rowCount) {
20 | super(timestampUtils, rowCount);
21 | array = new Float[rowCount];
22 | }
23 |
24 | @Override
25 | public void doSet(int row, Object obj) throws SQLException {
26 | array[row] = castToFloat(obj);
27 | }
28 |
29 | private static float castToFloat(final Object in) throws SQLException {
30 | try {
31 | if (in instanceof String) {
32 | return Float.parseFloat((String) in);
33 | }
34 | if (in instanceof Number) {
35 | return ((Number) in).floatValue();
36 | }
37 | if (in instanceof java.util.Date) {
38 | return ((java.util.Date) in).getTime();
39 | }
40 | if (in instanceof Boolean) {
41 | return (Boolean) in ? 1f : 0f;
42 | }
43 | if (in instanceof Clob) {
44 | return Float.parseFloat(asString((Clob) in));
45 | }
46 | if (in instanceof Character) {
47 | return Float.parseFloat(in.toString());
48 | }
49 | } catch (final Exception e) {
50 | throw cannotCastException(in.getClass().getName(), "float", e);
51 | }
52 | throw cannotCastException(in.getClass().getName(), "float");
53 | }
54 |
55 | @Override
56 | public Object[] getArray() {
57 | return array;
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/jdbc/JdbcIntegerColumnValues.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler.jdbc;
6 |
7 | import org.postgresql.jdbc.TimestampUtils;
8 |
9 | import java.sql.Clob;
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * Int 列存类.
14 | */
15 | public class JdbcIntegerColumnValues extends JdbcColumnValues {
16 |
17 | Integer[] array;
18 |
19 | public JdbcIntegerColumnValues(TimestampUtils timestampUtils, int rowCount) {
20 | super(timestampUtils, rowCount);
21 | array = new Integer[rowCount];
22 | }
23 |
24 | @Override
25 | public void doSet(int row, Object obj) throws SQLException {
26 | array[row] = castToInt(obj);
27 | }
28 |
29 | private static int castToInt(final Object in) throws SQLException {
30 | try {
31 | if (in instanceof String) {
32 | return Integer.parseInt((String) in);
33 | }
34 | if (in instanceof Number) {
35 | return ((Number) in).intValue();
36 | }
37 | if (in instanceof java.util.Date) {
38 | return (int) ((java.util.Date) in).getTime();
39 | }
40 | if (in instanceof Boolean) {
41 | return (Boolean) in ? 1 : 0;
42 | }
43 | if (in instanceof Clob) {
44 | return Integer.parseInt(asString((Clob) in));
45 | }
46 | if (in instanceof Character) {
47 | return Integer.parseInt(in.toString());
48 | }
49 | } catch (final Exception e) {
50 | throw cannotCastException(in.getClass().getName(), "int", e);
51 | }
52 | throw cannotCastException(in.getClass().getName(), "int");
53 | }
54 |
55 | @Override
56 | public Object[] getArray() {
57 | return array;
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/jdbc/JdbcLongColumnValues.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler.jdbc;
6 |
7 | import org.postgresql.jdbc.TimestampUtils;
8 |
9 | import java.sql.Clob;
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * long列存类.
14 | */
15 | public class JdbcLongColumnValues extends JdbcColumnValues {
16 |
17 | Long[] array;
18 |
19 | public JdbcLongColumnValues(TimestampUtils timestampUtils, int rowCount) {
20 | super(timestampUtils, rowCount);
21 | array = new Long[rowCount];
22 | }
23 |
24 | @Override
25 | public void doSet(int row, Object obj) throws SQLException {
26 | array[row] = castToLong(obj);
27 | }
28 |
29 | private static long castToLong(final Object in) throws SQLException {
30 | try {
31 | if (in instanceof String) {
32 | return Long.parseLong((String) in);
33 | }
34 | if (in instanceof Number) {
35 | return ((Number) in).longValue();
36 | }
37 | if (in instanceof java.util.Date) {
38 | return ((java.util.Date) in).getTime();
39 | }
40 | if (in instanceof Boolean) {
41 | return (Boolean) in ? 1L : 0L;
42 | }
43 | if (in instanceof Clob) {
44 | return Long.parseLong(asString((Clob) in));
45 | }
46 | if (in instanceof Character) {
47 | return Long.parseLong(in.toString());
48 | }
49 | } catch (final Exception e) {
50 | throw cannotCastException(in.getClass().getName(), "long", e);
51 | }
52 | throw cannotCastException(in.getClass().getName(), "long");
53 | }
54 |
55 | @Override
56 | public Object[] getArray() {
57 | return array;
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/jdbc/JdbcShortColumnValues.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler.jdbc;
6 |
7 | import org.postgresql.jdbc.TimestampUtils;
8 |
9 | import java.sql.Clob;
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * SHort列存类.
14 | */
15 | public class JdbcShortColumnValues extends JdbcColumnValues {
16 |
17 | Short[] array;
18 |
19 | public JdbcShortColumnValues(TimestampUtils timestampUtils, int rowCount) {
20 | super(timestampUtils, rowCount);
21 | array = new Short[rowCount];
22 | }
23 |
24 | @Override
25 | public void doSet(int row, Object obj) throws SQLException {
26 | array[row] = castToShort(obj);
27 | }
28 |
29 | private static short castToShort(final Object in) throws SQLException {
30 | try {
31 | if (in instanceof String) {
32 | return Short.parseShort((String) in);
33 | }
34 | if (in instanceof Number) {
35 | return ((Number) in).shortValue();
36 | }
37 | if (in instanceof java.util.Date) {
38 | return (short) ((java.util.Date) in).getTime();
39 | }
40 | if (in instanceof Boolean) {
41 | return (Boolean) in ? (short) 1 : (short) 0;
42 | }
43 | if (in instanceof Clob) {
44 | return Short.parseShort(asString((Clob) in));
45 | }
46 | if (in instanceof Character) {
47 | return Short.parseShort(in.toString());
48 | }
49 | } catch (final Exception e) {
50 | throw cannotCastException(in.getClass().getName(), "short", e);
51 | }
52 | throw cannotCastException(in.getClass().getName(), "short");
53 | }
54 |
55 | @Override
56 | public Object[] getArray() {
57 | return array;
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/handler/jdbc/JdbcStringColumnValues.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.handler.jdbc;
6 |
7 | import com.alibaba.hologres.client.HoloConfig;
8 | import org.postgresql.jdbc.TimestampUtils;
9 |
10 | import java.sql.Clob;
11 | import java.sql.SQLException;
12 |
13 | /**
14 | * String 列存类.
15 | */
16 | public class JdbcStringColumnValues extends JdbcColumnValues {
17 |
18 | private final HoloConfig config;
19 | String[] array;
20 |
21 | public JdbcStringColumnValues(TimestampUtils timestampUtils, int rowCount, HoloConfig config) {
22 | super(timestampUtils, rowCount);
23 | array = new String[rowCount];
24 | this.config = config;
25 | }
26 |
27 | @Override
28 | public void doSet(int row, Object obj) throws SQLException {
29 | array[row] = removeU0000(castToString(obj));
30 | }
31 |
32 | private String removeU0000(final String in) {
33 | if (config.isRemoveU0000InTextColumnValue() && in != null && in.contains("\u0000")) {
34 | return in.replaceAll("\u0000", "");
35 | } else {
36 | return in;
37 | }
38 | }
39 |
40 | private static String castToString(final Object in) throws SQLException {
41 | try {
42 | if (in instanceof String) {
43 | return (String) in;
44 | }
45 | if (in instanceof Clob) {
46 | return asString((Clob) in);
47 | }
48 | // convert any unknown objects to string.
49 | return in.toString();
50 |
51 | } catch (final Exception e) {
52 | throw cannotCastException(in.getClass().getName(), "String", e);
53 | }
54 | }
55 |
56 | @Override
57 | public Object[] getArray() {
58 | return array;
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/impl/util/ExceptionUtil.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2023. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.impl.util;
6 |
7 | import com.alibaba.hologres.client.exception.HoloClientException;
8 | import com.alibaba.hologres.client.exception.HoloClientWithDetailsException;
9 |
10 | /**
11 | * 异常相关工具类.
12 | */
13 | public class ExceptionUtil {
14 | /**
15 | * 当a和b同为HoloClientWithDetailsException时,将b合并入a对象,否则返回任何一个异常.
16 | *
17 | * @param a
18 | * @param b
19 | * @return
20 | */
21 | public static HoloClientException merge(HoloClientException a, HoloClientException b) {
22 | if (a == null) {
23 | return b;
24 | } else if (b == null) {
25 | return a;
26 | } else if (a instanceof HoloClientWithDetailsException) {
27 | if (b instanceof HoloClientWithDetailsException) {
28 | ((HoloClientWithDetailsException) a).merge((HoloClientWithDetailsException) b);
29 | return a;
30 | } else {
31 | return b;
32 | }
33 | } else {
34 | return a;
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/Partition.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.model;
6 |
7 | import java.io.Serializable;
8 |
9 | /**
10 | * 分区类.
11 | */
12 | public class Partition implements Serializable {
13 | String schemaName;
14 | String tableName;
15 | String parentSchemaName;
16 | String parentTableName;
17 | String partitionValue;
18 |
19 | public void setSchemaName(String schemaName) {
20 | this.schemaName = schemaName;
21 | }
22 |
23 | public void setTableName(String tableName) {
24 | this.tableName = tableName;
25 | }
26 |
27 | public void setParentSchemaName(String parentSchemaName) {
28 | this.parentSchemaName = parentSchemaName;
29 | }
30 |
31 | public void setParentTableName(String parentTableName) {
32 | this.parentTableName = parentTableName;
33 | }
34 |
35 | public void setPartitionValue(String partitionValue) {
36 | this.partitionValue = partitionValue;
37 | }
38 |
39 | public String getSchemaName() {
40 | return schemaName;
41 | }
42 |
43 | public String getTableName() {
44 | return tableName;
45 | }
46 |
47 | public String getParentSchemaName() {
48 | return parentSchemaName;
49 | }
50 |
51 | public String getParentTableName() {
52 | return parentTableName;
53 | }
54 |
55 | public String getPartitionValue() {
56 | return partitionValue;
57 | }
58 | }
59 |
60 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/SSLMode.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.model;
6 |
7 | /**
8 | * enum for ssl mode.
9 | * see SSL_MODE in org.postgresql.PGProperty.
10 | */
11 | public enum SSLMode {
12 | // disable
13 | DISABLE("disable"),
14 | // require
15 | REQUIRE("require"),
16 | // verify-ca
17 | VERIFY_CA("verify-ca"),
18 | // verify-full
19 | VERIFY_FULL("verify-full");
20 |
21 | private final String pgPropertyValue;
22 |
23 | SSLMode(String pgPropertyValue) {
24 | this.pgPropertyValue = pgPropertyValue;
25 | }
26 |
27 | public static SSLMode fromPgPropertyValue(String pgPropertyValue) {
28 | for (SSLMode sslMode : SSLMode.values()) {
29 | if (sslMode.getPgPropertyValue().equalsIgnoreCase(pgPropertyValue)) {
30 | return sslMode;
31 | }
32 | }
33 | throw new IllegalArgumentException("invalid ssl mode: " + pgPropertyValue);
34 | }
35 |
36 | public String getPgPropertyValue() {
37 | return pgPropertyValue;
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/WriteFailStrategy.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.model;
6 |
7 | /**
8 | * 写入失败时的策略.
9 | * TRY_ONE_BY_ONE,会将攒批提交退化为逐条提交
10 | * NONE,抛异常,什么都不做
11 | */
12 | public enum WriteFailStrategy {
13 | TRY_ONE_BY_ONE,
14 | NONE
15 | }
16 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/WriteMode.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.model;
6 |
7 | /**
8 | * enum for write mode.
9 | * */
10 | public enum WriteMode {
11 | INSERT_OR_IGNORE,
12 | INSERT_OR_UPDATE,
13 | INSERT_OR_REPLACE
14 | }
15 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/binlog/BinlogHeartBeatRecord.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.model.binlog;
6 |
7 | import com.alibaba.hologres.client.impl.binlog.BinlogEventType;
8 | import com.alibaba.hologres.client.model.TableSchema;
9 |
10 | /**
11 | * 仅仅用来表示消费的timestamp位置.
12 | * 当没有新数据产生时,lsn不会变,但timestamp可以推进,表示消费延时.
13 | */
14 | public class BinlogHeartBeatRecord extends BinlogRecord {
15 |
16 | public BinlogHeartBeatRecord(TableSchema schema, long lsn, BinlogEventType eventType, long timestamp) {
17 | super(schema, lsn, eventType, timestamp);
18 | }
19 |
20 | @Override
21 | public boolean isHeartBeat() {
22 | return true;
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/binlog/BinlogPartitionSubscribeMode.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.client.model.binlog;
2 |
3 | /**
4 | * enum for binlog partition mode.
5 | */
6 | public enum BinlogPartitionSubscribeMode {
7 | DISABLE,
8 | // Specify a set of partition values when subscribe starts, which cannot be modified afterward.
9 | STATIC,
10 | // Specify a starting partition table when subscribe starts, and then subscribe in chronological order(the table is partitioned by time).
11 | DYNAMIC
12 | }
13 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/binlog/BinlogRecord.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.model.binlog;
6 |
7 | import com.alibaba.hologres.client.impl.binlog.BinlogEventType;
8 | import com.alibaba.hologres.client.model.Record;
9 | import com.alibaba.hologres.client.model.TableSchema;
10 |
11 | import java.io.Serializable;
12 | import java.util.Arrays;
13 |
14 | /**
15 | * Binlog Record, 包含binlog相关参数.
16 | */
17 | public class BinlogRecord extends Record implements Serializable {
18 | private final long lsn;
19 | private final BinlogEventType eventType;
20 | private final long timestamp;
21 |
22 | public BinlogRecord(TableSchema schema, long lsn, BinlogEventType eventType, long timestamp) {
23 | super(schema);
24 | this.lsn = lsn;
25 | this.eventType = eventType;
26 | this.timestamp = timestamp;
27 | }
28 |
29 | public long getBinlogLsn() {
30 | return lsn;
31 | }
32 |
33 | public BinlogEventType getBinlogEventType() {
34 | return eventType;
35 | }
36 |
37 | public long getBinlogTimestamp() {
38 | return timestamp;
39 | }
40 |
41 | public boolean isHeartBeat() {
42 | return false;
43 | }
44 |
45 | @Override
46 | public String toString() {
47 | return "BinlogRecord{" +
48 | "schema=" + getSchema() +
49 | ", binlog lsn=" + lsn +
50 | ", binlog eventType=" + eventType +
51 | ", binlog timestamp=" + timestamp +
52 | ", values=" + Arrays.toString(getValues()) +
53 | ", bitSet=" + getBitSet() +
54 | '}';
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/checkandput/CheckAndPutRecord.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.client.model.checkandput;
2 |
3 | import com.alibaba.hologres.client.model.Record;
4 | import com.alibaba.hologres.client.model.TableSchema;
5 |
6 | import java.io.Serializable;
7 |
8 | /**
9 | * CheckAndPut Record, 包含需要check的字段,条件等.
10 | */
11 | public class CheckAndPutRecord extends Record implements Serializable {
12 |
13 | CheckAndPutCondition checkAndPutCondition;
14 |
15 | public CheckAndPutRecord(TableSchema schema, String checkColumnName, CheckCompareOp checkOp, Object checkValue, Object nullValue) {
16 | super(schema);
17 | this.checkAndPutCondition = new CheckAndPutCondition(checkColumnName, checkOp, checkValue, nullValue);
18 | }
19 |
20 | public CheckAndPutRecord(Record record, CheckAndPutCondition checkAndPutCondition) {
21 | super(record.getSchema());
22 | this.merge(record);
23 | this.setType(record.getType());
24 | this.checkAndPutCondition = checkAndPutCondition;
25 | }
26 |
27 | public static CheckAndPutRecord build(TableSchema schema, String checkColumn, CheckCompareOp checkOp, Object checkValue, Object nullValue) {
28 | return new CheckAndPutRecord(schema, checkColumn, checkOp, checkValue, nullValue);
29 | }
30 |
31 | public CheckAndPutCondition getCheckAndPutCondition() {
32 | return checkAndPutCondition;
33 | }
34 |
35 | @Override
36 | public String toString() {
37 | return checkAndPutCondition.toString() + super.toString();
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/model/checkandput/CheckCompareOp.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.client.model.checkandput;
2 |
3 | /**
4 | * 用于checkAndPut.
5 | */
6 | public enum CheckCompareOp {
7 | /**
8 | * less than.
9 | */
10 | LESS("<"),
11 | /**
12 | * less than or equal to.
13 | */
14 | LESS_OR_EQUAL("<="),
15 | /**
16 | * equals.
17 | */
18 | EQUAL("="),
19 | /**
20 | * not equal.
21 | */
22 | NOT_EQUAL("<>"),
23 | /**
24 | * is null.
25 | */
26 | IS_NULL("is null"),
27 | /**
28 | * is not null.
29 | */
30 | IS_NOT_NULL("is not null"),
31 | /**
32 | * greater than or equal to.
33 | */
34 | GREATER_OR_EQUAL(">="),
35 | /**
36 | * greater than.
37 | */
38 | GREATER(">");
39 |
40 | private final String operatorString;
41 | CheckCompareOp(String operatorString) {
42 | this.operatorString = operatorString;
43 | }
44 |
45 | public String getOperatorString() {
46 | return operatorString;
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/type/PGroaringbitmap.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.type;
6 |
7 | /**
8 | * roaringbitmap对象.
9 | */
10 | public class PGroaringbitmap extends PgDefaultBinaryObject {
11 | public PGroaringbitmap() {
12 | super("roaringbitmap");
13 | }
14 |
15 | String text = null;
16 |
17 | @Override
18 | public String getValue() {
19 | if (bytes == null) {
20 | return null;
21 | }
22 | if (text == null) {
23 | final StringBuilder sb = new StringBuilder("\\x");
24 | for (int i = 0; i < bytes.length; ++i) {
25 | int v = bytes[i] & 0xFF;
26 | String hv = Integer.toHexString(v);
27 | if (hv.length() < 2) {
28 | sb.append("0");
29 | }
30 | sb.append(hv);
31 | }
32 | text = sb.toString();
33 | }
34 | return text;
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/type/PgDefaultBinaryObject.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.type;
6 |
7 | import org.postgresql.util.PGBinaryObject;
8 | import org.postgresql.util.PGobject;
9 |
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * 通用的PgBinaryObject实现.
14 | */
15 | public class PgDefaultBinaryObject extends PGobject implements PGBinaryObject {
16 |
17 | int length = 0;
18 | byte[] bytes = null;
19 |
20 | public PgDefaultBinaryObject(String type) {
21 | this.type = type;
22 | }
23 |
24 | @Override
25 | public void setByteValue(byte[] bytes, int i) throws SQLException {
26 | length = bytes.length - i;
27 | if (this.bytes == null || this.bytes.length < length) {
28 | this.bytes = new byte[length];
29 | }
30 | System.arraycopy(bytes, i, this.bytes, 0, length);
31 | }
32 |
33 | @Override
34 | public int lengthInBytes() {
35 | return length;
36 | }
37 |
38 | @Override
39 | public void toBytes(byte[] bytes, int i) {
40 | System.arraycopy(this.bytes, 0, bytes, i, length);
41 | }
42 |
43 | @Override
44 | public Object clone() throws CloneNotSupportedException {
45 | PgDefaultBinaryObject ret = new PgDefaultBinaryObject(type);
46 | ret.length = length;
47 | ret.bytes = new byte[length];
48 | System.arraycopy(this.bytes, 0, ret.bytes, 0, length);
49 | return ret;
50 | }
51 |
52 | @Override
53 | public String getValue() {
54 | throw new UnsupportedOperationException("Unsupported type " + type + " input byte[]");
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/utils/FutureUtil.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.utils;
6 |
7 | import com.alibaba.hologres.client.exception.ExceptionCode;
8 | import com.alibaba.hologres.client.exception.HoloClientException;
9 |
10 | import java.util.concurrent.CompletableFuture;
11 | import java.util.concurrent.ExecutionException;
12 |
13 | /**
14 | * future相关工具类.
15 | */
16 | public class FutureUtil {
17 | /**
18 | * 封装了一下异常处理.
19 | *
20 | * @param future
21 | * @param
22 | * @return
23 | * @throws HoloClientException
24 | */
25 | public static T get(CompletableFuture future) throws HoloClientException {
26 | try {
27 | return future.get();
28 | } catch (InterruptedException e) {
29 | throw new RuntimeException(e);
30 | } catch (ExecutionException e) {
31 | if (e.getCause() instanceof HoloClientException) {
32 | throw (HoloClientException) e.getCause();
33 | } else if (e.getCause() != null) {
34 | throw new HoloClientException(ExceptionCode.INTERNAL_ERROR, "", e.getCause());
35 | } else {
36 | throw new HoloClientException(ExceptionCode.INTERNAL_ERROR, "", e);
37 | }
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/utils/Tuple.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.utils;
6 |
7 | import java.util.Objects;
8 |
9 | /**
10 | * 元组.
11 | * @param
12 | * @param
13 | */
14 | public class Tuple {
15 | public L l;
16 | public R r;
17 |
18 | public Tuple(L l, R r) {
19 | this.l = l;
20 | this.r = r;
21 | }
22 |
23 | @Override
24 | public boolean equals(Object o) {
25 | if (this == o) {
26 | return true;
27 | }
28 | if (o == null || getClass() != o.getClass()) {
29 | return false;
30 | }
31 | Tuple, ?> tuple = (Tuple, ?>) o;
32 | return Objects.equals(l, tuple.l) &&
33 | Objects.equals(r, tuple.r);
34 | }
35 |
36 | @Override
37 | public int hashCode() {
38 | return Objects.hash(l, r);
39 | }
40 |
41 | @Override
42 | public String toString() {
43 | return "Tuple{" +
44 | "l=" + l +
45 | ", r=" + r +
46 | '}';
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/utils/Tuple3.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2022. Alibaba Group Holding Limited
3 | */
4 |
5 | package com.alibaba.hologres.client.utils;
6 |
7 | import java.util.Objects;
8 |
9 | /**
10 | * 元组.
11 | *
12 | * @param
13 | * @param
14 | * @param
15 | */
16 | public class Tuple3 {
17 | public L l;
18 | public M m;
19 | public R r;
20 |
21 | public Tuple3(L l, M m, R r) {
22 | this.l = l;
23 | this.m = m;
24 | this.r = r;
25 | }
26 |
27 | @Override
28 | public boolean equals(Object o) {
29 | if (this == o) {
30 | return true;
31 | }
32 | if (o == null || getClass() != o.getClass()) {
33 | return false;
34 | }
35 | Tuple3, ?, ?> tuple = (Tuple3, ?, ?>) o;
36 | return Objects.equals(l, tuple.l) &&
37 | Objects.equals(m, tuple.m) &&
38 | Objects.equals(r, tuple.r);
39 | }
40 |
41 | @Override
42 | public int hashCode() {
43 | return Objects.hash(l, m, r);
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/holo-client/src/main/java/com/alibaba/hologres/client/utils/Tuple4.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.client.utils;
2 |
3 | import java.util.Objects;
4 |
5 | /**
6 | * 元组.
7 | *
8 | * @param
9 | * @param
10 | * @param
11 | * @param
12 | */
13 | public class Tuple4 {
14 | public F0 f0;
15 | public F1 f1;
16 | public F2 f2;
17 | public F3 f3;
18 |
19 | public Tuple4(F0 f0, F1 f1, F2 f2, F3 f3) {
20 | this.f0 = f0;
21 | this.f1 = f1;
22 | this.f2 = f2;
23 | this.f3 = f3;
24 | }
25 |
26 | @Override
27 | public boolean equals(Object o) {
28 | if (this == o) {
29 | return true;
30 | }
31 | if (o == null || getClass() != o.getClass()) {
32 | return false;
33 | }
34 | Tuple4, ?, ?, ?> tuple = (Tuple4, ?, ?, ?>) o;
35 | return Objects.equals(f0, tuple.f0) &&
36 | Objects.equals(f1, tuple.f1) &&
37 | Objects.equals(f2, tuple.f2) &&
38 | Objects.equals(f3, tuple.f3);
39 | }
40 |
41 | @Override
42 | public int hashCode() {
43 | return Objects.hash(f0, f1, f2, f3);
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/holo-client/src/main/resources/META-INF/services/java.sql.Driver:
--------------------------------------------------------------------------------
1 | com.alibaba.hologres.org.postgresql.Driver
--------------------------------------------------------------------------------
/holo-client/src/main/resources/holo-client.properties:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2021. Alibaba Group Holding Limited
3 | #
4 |
5 | version=@project.version@
6 |
--------------------------------------------------------------------------------
/holo-client/src/saveVersion.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This file is used to generate the annotation of package info that
4 | # records the user, url, revision and timestamp.
5 |
6 | unset LANG
7 | unset LC_CTYPE
8 |
9 | version=$1
10 | outputDirectory=$2
11 |
12 | pushd .
13 |
14 | user=`whoami | sed -n -e 's/\\\/\\\\\\\\/p'`
15 | if [ "$user" == "" ]
16 | then
17 | user=`whoami`
18 | fi
19 | date=`date`
20 | cwd=`pwd`
21 | if [ -d .svn ]; then
22 | revision=`svn info | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'`
23 | url=`svn info | sed -n -e 's/^URL: \(.*\)/\1/p'`
24 | elif [ -d .git ]; then
25 | revision=`git log -1 --pretty=format:"%H"`
26 | hostname=`hostname`
27 | url="git://${hostname}${cwd}"
28 | else
29 | revision="Unknown"
30 | url="file://$cwd"
31 | fi
32 | which md5sum > /dev/null
33 | if [ "$?" != "0" ] ; then
34 | which md5 > /dev/null
35 | if [ "$?" != "0" ] ; then
36 | srcChecksum="Unknown"
37 | else
38 | srcChecksum=`find src/main/ | grep -e "\.java" -e "\.proto" | LC_ALL=C sort | xargs md5 | md5 | cut -d ' ' -f 1`
39 | fi
40 | else
41 | srcChecksum=`find src/main/ | grep -e "\.java" -e "\.proto" | LC_ALL=C sort | xargs md5sum | md5sum | cut -d ' ' -f 1`
42 | fi
43 | popd
44 |
45 | mkdir -p "$outputDirectory/com/alibaba/hologres/client"
46 | cat >"$outputDirectory/com/alibaba/hologres/client/Version.java" < columnList = new ArrayList<>();
26 | Column c1 = new Column();
27 | c1.setName("A1");
28 | c1.setTypeName("text");
29 | // c1.setAllowNull(true);
30 | c1.setPrimaryKey(true);
31 | columnList.add(c1);
32 | builder.setColumns(columnList);
33 | builder.setPartitionColumnName("a1");
34 |
35 | builder.setClusteringKey(new String[]{"A1"});
36 | builder.setBitmapIndexKey(new String[]{"A1"});
37 | TableSchema table = builder.build();
38 | System.out.println(DDLGenerator.sqlGenerator(table));
39 |
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/holo-client/src/test/java/com/alibaba/hologres/client/utils/IdentifierTest.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.client.utils;
2 |
3 | import org.testng.Assert;
4 | import org.testng.annotations.Test;
5 |
6 | /**
7 | * Test Postgres Identifier quotation.
8 | * */
9 | public class IdentifierTest {
10 |
11 | @Test
12 | public void testLowercase() {
13 | Assert.assertEquals("abc", IdentifierUtil.quoteIdentifier("abc"));
14 | Assert.assertEquals("Abc", IdentifierUtil.quoteIdentifier("Abc"));
15 | Assert.assertEquals("\"Abc\"", IdentifierUtil.quoteIdentifier("Abc", true));
16 | Assert.assertEquals("\"$Abc\"", IdentifierUtil.quoteIdentifier("$Abc"));
17 | Assert.assertEquals("\"$abc\"", IdentifierUtil.quoteIdentifier("$abc"));
18 | Assert.assertEquals("\"abc$\"", IdentifierUtil.quoteIdentifier("abc$"));
19 | Assert.assertEquals("_abc", IdentifierUtil.quoteIdentifier("_abc"));
20 | Assert.assertEquals("\"9abc\"", IdentifierUtil.quoteIdentifier("9abc"));
21 | Assert.assertEquals("\"ab\"\"c\"", IdentifierUtil.quoteIdentifier("ab\"c"));
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/holo-client/testng.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/holo-e2e-performance-tool/src/main/java/com/alibaba/hologres/performace/client/Main.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.performace.client;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 |
6 | public class Main {
7 | public static final Logger LOG = LoggerFactory.getLogger(Main.class);
8 |
9 | public static void main(String[] args) {
10 | if (args.length != 2) {
11 | LOG.error("invalid args\njava -jar xxxx.jar CONF_NAME METHOD\n METHOD = INSERT/FIXED_COPY/GET/SCAN");
12 | return;
13 | }
14 | try {
15 | switch (args[1]) {
16 | case "INSERT":
17 | new InsertTest().run(args[0]);
18 | break;
19 | case "GET":
20 | new GetTest().run(args[0]);
21 | break;
22 | case "PREPARE_GET_DATA":
23 | new PrepareGetData().run(args[0]);
24 | break;
25 | case "FIXED_COPY":
26 | new FixedCopyTest().run(args[0]);
27 | break;
28 | case "PREPARE_SCAN_DATA":
29 | new PrepareScanData().run(args[0]);
30 | break;
31 | case "SCAN":
32 | new ScanTest().run(args[0]);
33 | break;
34 | case "PREPARE_BINLOG_DATA":
35 | new PrepareBinlogData().run(args[0]);
36 | break;
37 | case "BINLOG":
38 | new BinlogTest().run(args[0]);
39 | break;
40 | default:
41 | throw new Exception("unknow method " + args[1]);
42 | }
43 | } catch (Exception e) {
44 | LOG.error("", e);
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/holo-e2e-performance-tool/src/main/java/com/alibaba/hologres/performace/client/PrepareBinlogData.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.performace.client;
2 |
3 | import com.alibaba.hologres.client.utils.ConfLoader;
4 |
5 |
6 | public class PrepareBinlogData extends FixedCopyTest{
7 | BinlogTestConf binlogTestConf = new BinlogTestConf();
8 | PrepareBinlogDataConf prepareBinlogDataConf = new PrepareBinlogDataConf();
9 |
10 | @Override
11 | void init() throws Exception {
12 | super.init();
13 | ConfLoader.load(confName, "binlog.", binlogTestConf);
14 | ConfLoader.load(confName, "prepareBinlogData.", prepareBinlogDataConf);
15 | conf.testByTime = false;
16 | conf.rowNumber = prepareBinlogDataConf.rowNumber;
17 | conf.tableName = binlogTestConf.tableName;
18 | conf.publicationName = binlogTestConf.publicationName;
19 | conf.slotName = binlogTestConf.slotName;
20 | conf.recreatePublicationAndSlot = prepareBinlogDataConf.recreatePublicationAndSlot;
21 | conf.binlogTTL = prepareBinlogDataConf.binlogTTL;
22 | conf.shardCount = prepareBinlogDataConf.shardCount;
23 | conf.hasPk = true;
24 | conf.enableBinlogConsumption = true;
25 | conf.enableBinlog = true;
26 | conf.deleteTableAfterDone = false;
27 | conf.dumpMemoryStat = false;
28 | }
29 |
30 | }
31 |
32 | class PrepareBinlogDataConf {
33 | public long rowNumber = 1000000;
34 | public int shardCount = 40;
35 | public boolean recreatePublicationAndSlot = true;
36 | public int binlogTTL = 19600; //6 hours
37 | }
38 |
--------------------------------------------------------------------------------
/holo-e2e-performance-tool/src/main/java/com/alibaba/hologres/performace/client/PrepareGetData.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.performace.client;
2 |
3 | import com.alibaba.hologres.client.utils.ConfLoader;
4 |
5 |
6 | public class PrepareGetData extends FixedCopyTest{
7 | GetTestConf getTestConf = new GetTestConf();
8 | PrepareGetDataConf prepareGetDataConf = new PrepareGetDataConf();
9 |
10 | @Override
11 | void init() throws Exception {
12 | super.init();
13 | ConfLoader.load(confName, "get.", getTestConf);
14 | ConfLoader.load(confName, "prepareGetData.", prepareGetDataConf);
15 | conf.testByTime = false;
16 | conf.rowNumber = prepareGetDataConf.rowNumber;
17 | conf.tableName = getTestConf.tableName;
18 | conf.hasPk = true;
19 | conf.orientation = prepareGetDataConf.orientation;
20 | conf.deleteTableAfterDone = false;
21 | conf.dumpMemoryStat = false;
22 | }
23 |
24 | }
25 |
26 | class PrepareGetDataConf {
27 | public long rowNumber = 1000000;
28 | public String orientation = "row";
29 | }
30 |
--------------------------------------------------------------------------------
/holo-e2e-performance-tool/src/main/java/com/alibaba/hologres/performace/client/PrepareScanData.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.performace.client;
2 |
3 | import com.alibaba.hologres.client.utils.ConfLoader;
4 |
5 |
6 | public class PrepareScanData extends FixedCopyTest{
7 | ScanTestConf scanTestConf = new ScanTestConf();
8 | PrepareScanDataConf prepareScanDataConf = new PrepareScanDataConf();
9 |
10 | @Override
11 | void init() throws Exception {
12 | super.init();
13 | ConfLoader.load(confName, "scan.", scanTestConf);
14 | ConfLoader.load(confName, "prepareScanData.", prepareScanDataConf);
15 | conf.prefixPk = true;
16 | conf.recordCountPerPrefix = prepareScanDataConf.recordCountPerPrefix;
17 | conf.testByTime = false;
18 | conf.rowNumber = prepareScanDataConf.rowNumber;
19 | conf.tableName = scanTestConf.tableName;
20 | conf.hasPk = true;
21 | conf.orientation = prepareScanDataConf.orientation;
22 | conf.deleteTableAfterDone = false;
23 | conf.dumpMemoryStat = false;
24 | }
25 | }
26 |
27 | class PrepareScanDataConf {
28 | public int recordCountPerPrefix = 100;
29 | public long rowNumber = 1000000;
30 | public String orientation = "row";
31 | }
32 |
--------------------------------------------------------------------------------
/holo-e2e-performance-tool/src/main/java/com/alibaba/hologres/performace/params/IntRandomParamProvider.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.performace.params;
2 |
3 | import java.util.concurrent.ThreadLocalRandom;
4 |
5 | public class IntRandomParamProvider implements ParamProvider {
6 | int min;
7 | int max;
8 |
9 | @Override
10 | public void init(String pattern) {
11 | String[] temp = pattern.split("-");
12 | min = Integer.parseInt(temp[0]);
13 | max = Integer.parseInt(temp[1]);
14 | }
15 |
16 | @Override
17 | public Integer next() {
18 | return ThreadLocalRandom.current().nextInt(min, max);
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/holo-e2e-performance-tool/src/main/java/com/alibaba/hologres/performace/params/LongRandomParamProvider.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.performace.params;
2 |
3 | import java.util.concurrent.ThreadLocalRandom;
4 |
5 | class LongRandomParamProvider implements ParamProvider {
6 | long min;
7 | long max;
8 |
9 | @Override
10 | public void init(String pattern) {
11 | String[] temp = pattern.split("-");
12 | min = Long.parseLong(temp[0]);
13 | max = Long.parseLong(temp[1]);
14 | }
15 |
16 | @Override
17 | public Long next() {
18 | return ThreadLocalRandom.current().nextLong(min, max);
19 | }
20 | }
--------------------------------------------------------------------------------
/holo-e2e-performance-tool/src/main/java/com/alibaba/hologres/performace/params/ParamProvider.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.performace.params;
2 |
3 | public interface ParamProvider {
4 | void init(String patter);
5 |
6 | T next();
7 | }
8 |
--------------------------------------------------------------------------------
/holo-e2e-performance-tool/src/main/java/com/alibaba/hologres/performace/params/ParamsProvider.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.performace.params;
2 |
3 | import java.util.ArrayList;
4 | import java.util.List;
5 |
6 | public class ParamsProvider {
7 | List list;
8 |
9 | public ParamsProvider(String pattern) {
10 | String[] temp = pattern.split(",");
11 | list = new ArrayList<>();
12 | for (String str : temp) {
13 | switch (str.charAt(0)) {
14 | case 'I':
15 | IntRandomParamProvider p = new IntRandomParamProvider();
16 | p.init(str.substring(1));
17 | list.add(p);
18 | break;
19 | case 'L':
20 | LongRandomParamProvider lp = new LongRandomParamProvider();
21 | lp.init(str.substring(1));
22 | list.add(lp);
23 | break;
24 | default:
25 | throw new RuntimeException("unknown pattern " + str.charAt(0));
26 | }
27 | }
28 | }
29 |
30 | public Object get(int index) {
31 | return list.get(index).next();
32 | }
33 |
34 | public int size() {
35 | return list.size();
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/holo-llm-deepseek/config/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "eas_config": {
3 | "url": "http://xxxxxx.pai-eas.aliyuncs.com/",
4 | "token": "xxxxxx",
5 | "stream_mode": 1,
6 | "temperature": 0.0,
7 | "top_p": 0.5,
8 | "top_k": 10,
9 | "max_tokens": 1200
10 | },
11 | "holo_config": {
12 | "HOLO_ENDPOINT": "xxxxxx.hologres.aliyuncs.com",
13 | "HOLO_PORT": 80,
14 | "HOLO_DATABASE": "xxxxxx",
15 | "HOLO_USER": "xxxxxx",
16 | "HOLO_PASSWORD": "xxxxxx"
17 | },
18 | "embedding": {
19 | "model_id": "iic/nlp_corom_sentence-embedding_chinese-base",
20 | "model_dimension": 768
21 | },
22 | "query_topk": 4,
23 | "prompt_template": "Use the following pieces of information to answer the user's question.\n If you don't know the answer, just say that you don't know, don't try to make up an answer.\n Context: {context}\nQuestion: {question}\n Only return the helpful answer with Chinese below and nothing else.\nHelpful answer:"
24 | }
--------------------------------------------------------------------------------
/holo-llm-deepseek/data/example.csv:
--------------------------------------------------------------------------------
1 | title,content
2 | "什么是 Hologres", "Hologres 是阿里云自研一体化实时湖仓平台,通过统一数据平台实现湖仓存储一体、多模式计算一体、分析服务一体、Data+AI一体,无缝对接主流BI工具,支持OLAP查询、即席分析、在线服务、向量计算多个场景,分析性能打破TPC-H世界记录,与MaxCompute、Flink、DataWorks深度融合,提供离在线一体化全栈数仓解决方案。"
3 | "什么是 Proxima", "Proxima是一款来自于阿里达摩院的实现向量近邻搜索的高性能软件库,相比于Faiss等开源的同类产品,Proxima在稳定性、性能等方面都更为出色,能够提供业内性能和效果卓越的基础方法模块,支持图像、视频、人脸等各种应用场景。"
4 | "什么是 Hologres External Database", "Hologres External Database功能支持在无需进行数据迁移的情况下,将外部数据源的元数据加载至Hologres。这一功能便于在一个系统中同时管理内部与外部数据,实现湖仓一体架构下的统一元数据管理。同时,通过实时分析与查询外部数据,实现数据的无缝对接。"
5 | "什么是 Hologres Serverless Computing", "Hologres支持Serverless Computing,您可以指定大SQL作业(例如CPU或内存开销大的SQL作业)在全托管的Serverless Computing资源池进行,无需预留固定规格的计算资源,且各作业还可使用独立的Serverless Computing资源,确保作业之间资源隔离,避免了资源竞争与相互干扰的情况。"
6 | "Hologres Serverless Computing 适用场景有哪些", "Serverless Computing功能的适用场景如下: 1. 适用于大SQL作业频繁出现OOM(Out of Memory,内存溢出)报错,期望提升作业成功率和实例稳定性的场景。2. 适用于当前实例在流量低峰期资源闲置较多,期望提升资源利用率、降低成本的场景。3. 适用于当前实例在流量高峰期资源紧张,且通过分时弹性增加一倍资源仍难以缓解资源压力,期望进一步提升资源量、解决流量峰值问题的场景。"
7 | "Hologres Serverless Computing 能加速 Fixed Plan 吗", "Serverless Computing计算资源由当前可用区所有用户共享,用户之间公平调度,查询请求可能会受当前Serverless Computing Pool的资源水位影响而增加排队时长;独享计算资源为当前实例预留,实例或计算组在running状态时不会回收,不会受其他用户实例影响。因此,在查询延时敏感的场景下,更适合使用独享计算资源。Serverless Computing无法支持在线服务场景,包括Flink实时写入、DataWorks数据集成实时写入、Holo Client实时写入、通过Fixed Plan的在线点查等, 如果您有通过Fixed Plan优化的各类场景需求,该部分场景需要通过独享计算资源运行。"
--------------------------------------------------------------------------------
/holo-llm/config/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "eas_config": {
3 | "url": "http://xxxxxx.pai-eas.aliyuncs.com/",
4 | "token": "xxxxxx"
5 | },
6 | "holo_config": {
7 | "HOLO_ENDPOINT": "xxxxxx.hologres.aliyuncs.com",
8 | "HOLO_PORT": 80,
9 | "HOLO_DATABASE":"xxxxxx",
10 | "HOLO_USER": "xxxxxx",
11 | "HOLO_PASSWORD": "xxxxxx"
12 | },
13 | "embedding": {
14 | "model_id": "damo/nlp_corom_sentence-embedding_english-base",
15 | "model_dimension": 768
16 | },
17 | "query_topk": 4,
18 | "prompt_template": "Use the following pieces of information to answer the user's question.\n If you don't know the answer, just say that you don't know, don't try to make up an answer.\n Context: {context}\nQuestion: {question}\n Only return the helpful answer below and nothing else.\nHelpful answer:"
19 | }
--------------------------------------------------------------------------------
/holo-shipper/.gitignore:
--------------------------------------------------------------------------------
1 | /.idea
2 | *.iml
3 | *.DS_Store
4 | /target
--------------------------------------------------------------------------------
/holo-shipper/src/META-INF/MANIFEST.MF:
--------------------------------------------------------------------------------
1 | Manifest-Version: 1.0
2 | Main-Class: com.alibaba.hologres.shipper.HoloShipper
3 |
4 |
--------------------------------------------------------------------------------
/holo-shipper/src/main/java/com/alibaba/hologres/shipper/generic/AbstractDB.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.shipper.generic;
2 |
3 | import com.alibaba.fastjson.JSONObject;
4 | import com.alibaba.hologres.shipper.utils.TablesMeta;
5 |
6 | import java.util.List;
7 | import java.util.Map;
8 |
9 | public abstract class AbstractDB {
10 | public abstract AbstractTable getTable(String tableName) throws Exception;
11 | public abstract boolean checkTableExistence(String tableName);
12 | //check if table tableName already exists
13 | public abstract void prepareRead();
14 | public abstract void prepareWrite();
15 | public abstract Map getGUC();
16 | public abstract void setGUC(Map gucMapping);
17 | public abstract String getExtension();
18 | public abstract void setExtension(String extInfo);
19 | public abstract void createSchemas(List schemaList);
20 | public abstract TablesMeta getMetadata(JSONObject shipList, JSONObject blackList, boolean restoreOwner, boolean restorePriv, boolean restoreForeign, boolean restoreView);
21 | public abstract void recordMetadata(TablesMeta dbInfo);
22 | public abstract void restoreSPM(Map> spmInfo);
23 | public abstract void restoreSLPM(Map> slpmInfo);
24 | public static boolean filterListContains(JSONObject filterList, String schemaName, String tableName) {
25 | if(!filterList.containsKey(schemaName))
26 | return false;
27 | if(filterList.getJSONArray(schemaName).contains("*"))
28 | return true;
29 | if(filterList.getJSONArray(schemaName).contains(tableName))
30 | return true;
31 | return false;
32 | }
33 | }
--------------------------------------------------------------------------------
/holo-shipper/src/main/java/com/alibaba/hologres/shipper/generic/AbstractInstance.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.shipper.generic;
2 |
3 | import java.util.List;
4 | import java.util.Map;
5 |
6 | public abstract class AbstractInstance {
7 | public abstract List getDBList();
8 | public abstract AbstractDB getDB(String dbName);
9 | public abstract List getAllRoles();
10 | public abstract void createRoles(List roles, Map roleInfo);
11 | public abstract void showVersion();
12 | public abstract void createDB(String dbName);
13 | public abstract Map getRoleInfo(List roleList); //true: role can login, false: role cannot login
14 | public void close() {}
15 | }
16 |
--------------------------------------------------------------------------------
/holo-shipper/src/main/java/com/alibaba/hologres/shipper/generic/AbstractTable.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.shipper.generic;
2 |
3 | import java.io.Closeable;
4 | import java.io.PipedInputStream;
5 | import java.io.PipedOutputStream;
6 | import java.util.Map;
7 |
8 | public abstract class AbstractTable implements Closeable {
9 | public abstract String getTableDDL();
10 | //return table DDL as a string
11 | public abstract void setTableDDL(String DDLInfo) throws Exception;
12 | //set table DDL according to DDLInfo and user requirements
13 | public abstract void readTableData(PipedOutputStream os, int startShard, int endShard);
14 | public abstract void writeTableData(PipedInputStream is, int startShard, int endShard);
15 | public abstract String getPrivileges();
16 | public abstract void setPrivileges(String privInfo);
17 | public abstract Map getBatches(int numBatch, int dstShardCount, boolean disableShardCopy);
18 | public void close() {}
19 | public int getShardCount() {return 0;}
20 | }
21 |
--------------------------------------------------------------------------------
/holo-shipper/src/main/java/com/alibaba/hologres/shipper/utils/CustomPipedInputStream.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.shipper.utils;
2 |
3 | import java.io.IOException;
4 | import java.io.PipedInputStream;
5 |
6 | public class CustomPipedInputStream extends PipedInputStream {
7 |
8 | private ProcessBar.Meter meter;
9 |
10 | public CustomPipedInputStream(ProcessBar.Meter meter) {
11 | this.meter = meter;
12 | }
13 | @Override
14 | public int read(byte b[]) throws IOException {
15 | meter.mark(b.length);
16 | return super.read(b);
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/holo-shipper/src/main/java/com/alibaba/hologres/shipper/utils/TableInfo.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.shipper.utils;
2 |
3 | public class TableInfo {
4 | public String schemaName;
5 | public String tableName;
6 | public Boolean isPartitioned;
7 | public String parentSchema;
8 | public String parentTable;
9 | public Boolean isForeign;
10 | public Boolean isView;
11 | }
12 |
--------------------------------------------------------------------------------
/holo-shipper/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | holo-shipper.log
4 |
5 | %d{yyyy-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n
6 |
7 |
8 |
9 |
10 | holo-client.log
11 |
12 | %d{yyyy-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n
13 |
14 |
15 |
16 |
17 |
18 | %d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/holo-utils/find-incompatible-flink-jobs/src/main/java/com/alibaba/hologres/IncompatibleResult.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres;
2 |
3 | public class IncompatibleResult {
4 |
5 | public IncompatibleResult(String deploymentName, String deploymentVersion, String tableName) {
6 | this.deploymentName = deploymentName;
7 | this.deploymentVersion = deploymentVersion;
8 | this.tableName = tableName;
9 | }
10 | public String deploymentName;
11 | public String deploymentVersion;
12 | public String tableName;
13 | }
14 |
--------------------------------------------------------------------------------
/holo-utils/find-incompatible-flink-jobs/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | #log4j.rootLogger=DEBUG, stdout
2 | #log4j.appender.stdout=org.apache.log4j.ConsoleAppender
3 | #log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
4 | #log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
--------------------------------------------------------------------------------
/hologres-connector-datax-writer/README.md:
--------------------------------------------------------------------------------
1 | # hologresjdbcwriter
2 |
3 | 配置模板参见[plugin_job_template.json](./src/main/resources/plugin_job_template.json),其中"client"部分可以参见[HoloClient](../holo-client)的配置说明
4 |
--------------------------------------------------------------------------------
/hologres-connector-datax-writer/src/main/assembly/package.xml:
--------------------------------------------------------------------------------
1 |
5 |
6 |
7 | dir
8 |
9 | false
10 |
11 |
12 | src/main/resources
13 |
14 | plugin.json
15 | plugin_job_template.json
16 |
17 | plugin/writer/hologresjdbcwriter
18 |
19 |
20 | target/
21 |
22 | hologresjdbcwriter-0.0.1-SNAPSHOT.jar
23 |
24 | plugin/writer/hologresjdbcwriter
25 |
26 |
27 |
28 |
29 |
30 | false
31 | plugin/writer/hologresjdbcwriter/libs
32 | runtime
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/hologres-connector-datax-writer/src/main/java/com/alibaba/datax/plugin/writer/hologresjdbcwriter/Constant.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.datax.plugin.writer.hologresjdbcwriter;
2 |
3 | /**
4 | * 用于插件解析用户配置时,需要进行标识(MARK)的常量的声明.
5 | */
6 | public final class Constant {
7 | public static final int DEFAULT_BATCH_SIZE = 512;
8 |
9 | public static final int DEFAULT_BATCH_BYTE_SIZE = 50 * 1024 * 1024;
10 |
11 | public static String CONN_MARK = "connection";
12 |
13 | public static String TABLE_NUMBER_MARK = "tableNumber";
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/hologres-connector-datax-writer/src/main/java/com/alibaba/datax/plugin/writer/hologresjdbcwriter/Key.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.datax.plugin.writer.hologresjdbcwriter;
2 |
3 | public final class Key {
4 | public final static String JDBC_URL = "jdbcUrl";
5 |
6 | public final static String USERNAME = "username";
7 |
8 | public final static String PASSWORD = "password";
9 |
10 | public final static String TABLE = "table";
11 |
12 | public final static String COLUMN = "column";
13 |
14 | public final static String Array_Delimiter = "arrayDelimiter";
15 |
16 | public final static String WRITE_MODE = "writeMode";
17 |
18 | public final static String PRE_SQL = "preSql";
19 |
20 | public final static String POST_SQL = "postSql";
21 |
22 | //默认值:256
23 | public final static String BATCH_SIZE = "batchSize";
24 |
25 | //默认值:50m
26 | public final static String BATCH_BYTE_SIZE = "batchByteSize";
27 |
28 | public final static String EMPTY_AS_NULL = "emptyAsNull";
29 |
30 |
31 | }
--------------------------------------------------------------------------------
/hologres-connector-datax-writer/src/main/resources/plugin.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "hologresjdbcwriter",
3 | "class": "com.alibaba.datax.plugin.writer.hologresjdbcwriter.HologresJdbcWriter",
4 | "description": "useScene: prod. mechanism: Jdbc connection using the database, execute insert sql. warn: The more you know about the database, the less problems you encounter.",
5 | "developer": "alibaba"
6 | }
7 |
--------------------------------------------------------------------------------
/hologres-connector-datax-writer/src/main/resources/plugin_job_template.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "hologresjdbcwriter",
3 | "parameter": {
4 | "username": "",
5 | "password": "",
6 | "column": [],
7 | "connection": [
8 | {
9 | "jdbcUrl": "",
10 | "table": []
11 | }
12 | ],
13 | "preSql": [],
14 | "postSql": [],
15 | "batchByteSize": 52428800,
16 | "client": {
17 | "writeThreadSize": 3,
18 | "dynamicPartition" : true
19 | }
20 | }
21 | }
--------------------------------------------------------------------------------
/hologres-connector-examples/README.md:
--------------------------------------------------------------------------------
1 | # hologres-connectors-examples
2 |
3 | Examples for Hologres connectors
4 |
5 | # 模块介绍
6 |
7 | * [hologres-connector-flink-examples](hologres-connector-flink-examples)
8 |
9 | hologres flink connector 的 Examples
10 |
11 | * [hologres-connector-flink-ordergen](hologres-connector-flink-ordergen)
12 |
13 | hologres flink connector 的
14 | 订单数据源表生成工具,使用方式见 [flink自定义connector文档](https://help.aliyun.com/document_detail/193520.html)
15 | * [hologres-connector-spark-examples](hologres-connector-spark-examples)
16 |
17 | hologres spark connector 的 Examples
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-examples/src/main/java/com/alibaba/ververica/connectors/hologres/example/SourceItem.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.example;
2 |
3 | import java.math.BigDecimal;
4 | import java.sql.Timestamp;
5 |
6 | /**
7 | * SourceItem.
8 | */
9 | public class SourceItem {
10 | /**
11 | * for example: event type.
12 | */
13 | public enum EventType {
14 | INSERT,
15 | DELETE
16 | }
17 |
18 | public EventType eventType = EventType.INSERT;
19 | public long userId;
20 | public String userName;
21 | public BigDecimal price;
22 | public Timestamp saleTimestamp;
23 |
24 | public SourceItem() {
25 | }
26 |
27 | public SourceItem(long userId, String userName, BigDecimal price, Timestamp saleTimestamp) {
28 | this.userId = userId;
29 | this.userName = userName;
30 | this.price = price;
31 | this.saleTimestamp = saleTimestamp;
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-examples/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=info,console
2 | log4j.appender.console=org.apache.log4j.ConsoleAppender
3 | log4j.appender.console.Threshold=INFO
4 | log4j.appender.console.ImmediateFlush=true
5 | log4j.appender.console.Target=System.out
6 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
7 | log4j.appender.console.layout.ConversionPattern=[%-5p] %d(%r) --> [%t] %l: %m %x %n
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-examples/src/main/resources/ods_app_example.csv:
--------------------------------------------------------------------------------
1 | uid,country,prov,city,channel,operator,brand,ip,click_time,year,month,day,ymd
2 | 1ae58016,中国,广东,东莞,android,pros,a,192.168.1.1,2021-03-29 13:34:00,2021,3,29,20210329
3 | 2ae7788c,中国,广东,深圳,android,pros,a,192.168.1.8,2021-03-29 13:34:00,2021,3,29,20210329
4 | 1c90ab08,美国,新泽西州,阿布西肯,ios,cons,a,192.168.1.2,2021-03-29 13:34:00,2021,3,29,20210329
5 | e7847f80,中国,天津,天津,ios,pros,b,192.168.1.3,2021-03-29 13:34:00,2021,3,29,20210329
6 | 1ae58016,中国,广东,东莞,android,pros,a,192.168.1.1,2021-03-29 13:34:00,2021,3,29,20210329
7 | 1c90ab08,美国,新泽西州,阿布西肯,ios,cons,a,192.168.1.2,2021-03-29 13:34:01,2021,3,29,20210329
8 | e7847f80,中国,天津,天津,ios,pros,b,192.168.1.3,2021-03-29 13:34:01,2021,3,29,20210329
9 | 1ae58016,中国,广东,东莞,android,pros,a,192.168.1.1,2021-03-29 13:34:01,2021,3,29,20210329
10 | 1c90ab08,美国,新泽西州,阿布西肯,ios,cons,a,192.168.1.2,2021-03-29 13:34:01,2021,3,29,20210329
11 | e7847f80,中国,天津,天津,ios,pros,b,192.168.1.3,2021-03-29 13:34:02,2021,3,29,20210329
12 | 1ae58016,中国,广东,东莞,android,pros,a,192.168.1.1,2021-03-29 13:34:02,2021,3,29,20210329
13 | 1c90ab08,美国,新泽西州,阿布西肯,ios,cons,a,192.168.1.2,2021-03-29 13:34:02,2021,3,29,20210329
14 | e7847f80,中国,天津,天津,ios,pros,b,192.168.1.3,2021-03-29 13:34:02,2021,3,29,20210329
15 | 1ae58016,中国,广东,东莞,android,pros,a,192.168.1.1,2021-03-29 13:34:03,2021,3,29,20210329
16 | 1c90ab08,美国,新泽西州,阿布西肯,ios,cons,a,192.168.1.2,2021-03-29 13:34:03,2021,3,29,20210329
17 | e7847f80,中国,天津,天津,ios,pros,b,192.168.1.3,2021-03-29 13:34:03,2021,3,29,20210329
18 | e8855f8d,中国,天津,天津,ios,pros,b,192.168.1.5,2021-03-29 13:34:03,2021,3,29,20210329
19 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-examples/src/main/resources/repartition.sql:
--------------------------------------------------------------------------------
1 | --sourceDDL
2 | CREATE TEMPORARY TABLE source_table
3 | (
4 | c_custkey BIGINT
5 | ,c_name STRING
6 | ,c_address STRING
7 | ,c_nationkey INTEGER
8 | ,c_phone STRING
9 | ,c_acctbal NUMERIC(15, 2)
10 | ,c_mktsegment STRING
11 | ,c_comment STRING
12 | )
13 | WITH (
14 | 'connector' = 'datagen'
15 | ,'rows-per-second' = '10'
16 | ,'number-of-rows' = '100'
17 | );
18 |
19 | --sourceDql
20 | select *, cast('2024-04-21' as DATE) from source_table;
21 |
22 | --sinkDDL
23 | CREATE TABLE sink_table
24 | (
25 | c_custkey BIGINT
26 | ,c_name STRING
27 | ,c_address STRING
28 | ,c_nationkey INTEGER
29 | ,c_phone STRING
30 | ,c_acctbal NUMERIC(15, 2)
31 | ,c_mktsegment STRING
32 | ,c_comment STRING
33 | ,`date` DATE
34 | )
35 | with (
36 | 'connector' = 'hologres'
37 | ,'dbname' = 'test_db'
38 | ,'tablename' = 'test_sink_customer'
39 | ,'username' = ''
40 | ,'password' = ''
41 | ,'endpoint' = ''
42 | ,'jdbccopywritemode' = 'BULK_LOAD'
43 | ,'reshuffle-by-holo-distribution-key.enabled'='true'
44 | );
45 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-examples/src/main/resources/setting.properties:
--------------------------------------------------------------------------------
1 | # table with config of hologres table
2 | endpoint=
3 | username=
4 | password=
5 | database=
6 | dimTableName=uid_mapping
7 | dwsTableName=dws_app
8 | #time window size, unit is second
9 | windowSize=2
10 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-ordergen/README.md:
--------------------------------------------------------------------------------
1 | # Flink-connector-ordergen
2 |
3 | hologres flink connector 的 订单数据源表生成工具
4 |
5 | ### 编译
6 |
7 | 运行```mvn package -DskipTests```
8 |
9 | ### 使用方式见
10 |
11 | [flink自定义connector文档](https://help.aliyun.com/document_detail/193520.html)
12 |
13 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-ordergen/src/main/java/io/hologres/flink/ordergen/City.java:
--------------------------------------------------------------------------------
1 | package io.hologres.flink.ordergen;
2 |
3 | import java.io.Serializable;
4 |
5 | /**
6 | * City.
7 | */
8 | public class City implements Serializable {
9 | private String nameZh;
10 | private String name;
11 | private String code;
12 | private String longtitude;
13 | private String latitude;
14 |
15 | public City(String nameZh, String name, String code, String longitude, String latitude) {
16 | this.nameZh = nameZh;
17 | this.name = name;
18 | this.code = code;
19 | this.longtitude = longitude;
20 | this.latitude = latitude;
21 | }
22 |
23 | public String getLongtitude() {
24 | return longtitude;
25 | }
26 |
27 | public String getLatitude() {
28 | return latitude;
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-ordergen/src/main/java/io/hologres/flink/ordergen/OrderGenTableSource.java:
--------------------------------------------------------------------------------
1 | package io.hologres.flink.ordergen;
2 |
3 | import org.apache.flink.streaming.api.functions.source.SourceFunction;
4 | import org.apache.flink.table.api.TableSchema;
5 | import org.apache.flink.table.connector.ChangelogMode;
6 | import org.apache.flink.table.connector.source.DynamicTableSource;
7 | import org.apache.flink.table.connector.source.ScanTableSource;
8 | import org.apache.flink.table.connector.source.SourceFunctionProvider;
9 | import org.apache.flink.table.data.RowData;
10 | import org.apache.flink.types.RowKind;
11 |
12 | /**
13 | * OrderTableSource.
14 | */
15 | public class OrderGenTableSource implements ScanTableSource {
16 | private final TableSchema schema;
17 |
18 | public OrderGenTableSource(TableSchema schema) {
19 | this.schema = schema;
20 | }
21 |
22 | @Override
23 | public DynamicTableSource copy() {
24 | return new OrderGenTableSource(schema);
25 | }
26 |
27 | @Override
28 | public String asSummaryString() {
29 | return "OrderGen Table Source";
30 | }
31 |
32 | @Override
33 | public ChangelogMode getChangelogMode() {
34 | return ChangelogMode.newBuilder()
35 | .addContainedKind(RowKind.INSERT)
36 | .addContainedKind(RowKind.DELETE)
37 | .build();
38 | }
39 |
40 | @Override
41 | public ScanRuntimeProvider getScanRuntimeProvider(ScanContext scanContext) {
42 | final SourceFunction sourceFunction = new OrderGenSourceFunction();
43 | return SourceFunctionProvider.of(sourceFunction, false);
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-ordergen/src/main/java/io/hologres/flink/ordergen/PrefectureCity.java:
--------------------------------------------------------------------------------
1 | package io.hologres.flink.ordergen;
2 |
3 | import java.io.Serializable;
4 | import java.util.List;
5 |
6 | /**
7 | * PrefectureCity.
8 | */
9 | public class PrefectureCity implements Serializable {
10 | private String prefectureNameZh;
11 | private String prefectureName;
12 | private List cities;
13 |
14 | public PrefectureCity(String prefectureNameZh, String prefectureName, List cities) {
15 | this.prefectureNameZh = prefectureNameZh;
16 | this.prefectureName = prefectureName;
17 | this.cities = cities;
18 | }
19 |
20 | public List getCities() {
21 | return cities;
22 | }
23 |
24 | public void setCities(List cities) {
25 | this.cities = cities;
26 | }
27 |
28 | public String getPrefectureNameZh() {
29 | return prefectureNameZh;
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-ordergen/src/main/java/io/hologres/flink/ordergen/Province.java:
--------------------------------------------------------------------------------
1 | package io.hologres.flink.ordergen;
2 |
3 | import java.io.Serializable;
4 | import java.util.List;
5 |
6 | /**
7 | * Province.
8 | */
9 | public class Province implements Serializable {
10 | private String provinceNameZh;
11 | private String provinceName;
12 | private List prefectureCities;
13 |
14 | public Province(
15 | String provinceNameZh, String provinceName, List prefectureCities) {
16 | this.provinceNameZh = provinceNameZh;
17 | this.provinceName = provinceName;
18 | this.prefectureCities = prefectureCities;
19 | }
20 |
21 | public List getPrefectureCities() {
22 | return prefectureCities;
23 | }
24 |
25 | public void setPrefectureCities(List prefectureCities) {
26 | this.prefectureCities = prefectureCities;
27 | }
28 |
29 | public String getProvinceNameZh() {
30 | return provinceNameZh;
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-ordergen/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory:
--------------------------------------------------------------------------------
1 | io.hologres.flink.ordergen.OrderGenTableFactory
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-flink-ordergen/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # Licensed to the Apache Software Foundation (ASF) under one
3 | # or more contributor license agreements. See the NOTICE file
4 | # distributed with this work for additional information
5 | # regarding copyright ownership. The ASF licenses this file
6 | # to you under the Apache License, Version 2.0 (the
7 | # "License"); you may not use this file except in compliance
8 | # with the License. You may obtain a copy of the License at
9 | #
10 | # http://www.apache.org/licenses/LICENSE-2.0
11 | #
12 | # Unless required by applicable law or agreed to in writing, software
13 | # distributed under the License is distributed on an "AS IS" BASIS,
14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | # See the License for the specific language governing permissions and
16 | # limitations under the License.
17 | ################################################################################
18 | log4j.rootLogger=INFO, console
19 | log4j.appender.console=org.apache.log4j.ConsoleAppender
20 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
21 | log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n
22 |
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-spark-examples/README.md:
--------------------------------------------------------------------------------
1 | # Spark-connector-Examples
2 |
3 | 在Examples模块下,有如下几个示例:
4 | * 1.SparkHoloTableCatalogExample
5 |
6 | 通过Holo Spark connector, 创建Hologres Catalog进行读写的示例
7 |
8 | * 2.SparkWriteDataFrameToHoloExample
9 |
10 | 一个使用java实现的通过Holo Spark connector将数据写入至Hologres的应用
11 | 使用scala脚本实现的例子可以参考 hologres-connector-spark-3.x/README.md
12 |
13 | * 3.SparkReadHoloToDataFrameExample
14 |
15 | 一个使用java实现的通过Holo Spark connector从Hologres读取数据的应用
16 | 使用scala脚本实现的例子可以参考 hologres-connector-spark-3.x/README.md
17 |
18 | * 4.SparkToHoloRepartitionExample
19 |
20 | 一个使用scala实现的通过Holo Spark connector将数据根据holo的distribution key进行repartition,从而实现高性能的批量导入holo有主键表的应用
21 |
22 |
23 |
24 | ## 提交Spark作业
25 | 当前的Spark example默认使用Spark 3.3版本,测试的时候请使用Spark 3.3版本集群
26 |
27 | ### 编译
28 | 在本项目(hologres-connector-spark-examples)根目录运行```mvn package -DskipTests```
29 |
30 | 在spark集群通过spark-submit提交作业并指定参数即可,以 SparkWriteDataFrameToHoloExample为例:
31 | ```
32 | spark-submit --class com.alibaba.hologres.spark.example.SparkWriteDataFrameToHoloExample --jars target/hologres-connector-spark-examples-1.0.0-jar-with-dependencies.jar --endpoint ${ip:port} --username ${user_name} --password ${password} --database {database} --tablename sink_table
33 | ```
34 | ## 在IDEA中运行和调试
35 |
36 | 以上是针对提交作业到Spark集群的情况,用户也可以在IDEA等编辑器中运行代码,需要在运行配置中设置"
37 | 将带有provided作用域的依赖项添加到类路径"
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-spark-examples/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=info,console
2 | log4j.appender.console=org.apache.log4j.ConsoleAppender
3 | log4j.appender.console.Threshold=INFO
4 | log4j.appender.console.ImmediateFlush=true
5 | log4j.appender.console.Target=System.out
6 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
7 | log4j.appender.console.layout.ConversionPattern=[%-5p] %d(%r) --> [%t] %l: %m %x %n
--------------------------------------------------------------------------------
/hologres-connector-examples/hologres-connector-spark-examples/src/main/resources/setting.properties:
--------------------------------------------------------------------------------
1 | USERNAME=
2 | PASSWORD=
3 | JDBCURL=
4 | TABLE=customer_holo_table
5 | FILEPATH=hologres-connector-spark-examples/src/main/resources/customer.tbl
--------------------------------------------------------------------------------
/hologres-connector-flink-1.15/src/main/resources/META-INF/services/com.alibaba.ververica.connectors.hologres.source.HologresLookUpFunctionFactory:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | com.alibaba.ververica.connectors.hologres.source.lookup.Flink115HologresLookUpFunctionFactory
17 |
--------------------------------------------------------------------------------
/hologres-connector-flink-1.17/src/main/resources/META-INF/services/com.alibaba.ververica.connectors.hologres.source.HologresLookUpFunctionFactory:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | com.alibaba.ververica.connectors.hologres.source.lookup.Flink117HologresLookUpFunctionFactory
17 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## Hologres Flink Connector的通用核心代码
3 |
4 | 无法直接使用,编译安装至本地作为 ***hologres-connector-flink-1.15*** 以及 ***hologres-connector-flink-1.17*** 的依赖
5 |
6 | ### hologres-connector-flink-base编译
7 |
8 | - 结合使用环境的hive版本进行打包,使用-P指定版本参数
9 |
10 | - 支持版本如下表
11 |
12 | |参数| 支持版本 |
13 | |:---:|:------------------------:|
14 | |flink版本| flink1.15
flink1.17 |
15 |
16 | 使用-p参数指定flink版本进行编译,如flink1.15可以使用如下命令:
17 |
18 | ```
19 | mvn install -pl hologres-connector-flink-base clean package -DskipTests -Pflink-1.15
20 | ```
21 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/api/HologresIOClient.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.api;
2 |
3 | import org.apache.flink.table.types.logical.LogicalType;
4 |
5 | import com.alibaba.ververica.connectors.hologres.config.HologresConnectionParam;
6 |
7 | import javax.annotation.Nullable;
8 |
9 | import java.io.IOException;
10 | import java.io.Serializable;
11 |
12 | /** Abstract for different Holgores IO interface. */
13 | public abstract class HologresIOClient implements Serializable {
14 | protected final HologresConnectionParam param;
15 | protected final String[] fieldNames;
16 | protected final LogicalType[] logicalTypes;
17 |
18 | public HologresIOClient(
19 | HologresConnectionParam param, String[] fieldNames, LogicalType[] logicalTypes) {
20 | this.param = param;
21 | this.fieldNames = fieldNames;
22 | this.logicalTypes = logicalTypes;
23 | }
24 |
25 | public void open() throws IOException {
26 | open(null, null);
27 | }
28 |
29 | public abstract void open(@Nullable Integer taskNumber, @Nullable Integer numTasks)
30 | throws IOException;
31 |
32 | public abstract void close() throws IOException;
33 | }
34 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/api/HologresReader.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.api;
2 |
3 | import org.apache.flink.table.types.logical.LogicalType;
4 |
5 | import com.alibaba.ververica.connectors.hologres.config.HologresConnectionParam;
6 |
7 | import java.io.IOException;
8 | import java.util.List;
9 | import java.util.concurrent.CompletableFuture;
10 |
11 | /** Abstract for different Holgores IO reader. */
12 | public abstract class HologresReader extends HologresIOClient {
13 | protected final String[] primaryKeys;
14 |
15 | public HologresReader(
16 | HologresConnectionParam param,
17 | String[] fieldNames,
18 | LogicalType[] logicalTypes,
19 | String[] primaryKeys) {
20 | super(param, fieldNames, logicalTypes);
21 | this.primaryKeys = primaryKeys;
22 | }
23 |
24 | public abstract CompletableFuture asyncGet(T record) throws IOException;
25 |
26 | public abstract CompletableFuture> asyncGetMany(T record) throws IOException;
27 |
28 | public abstract T get(T record) throws IOException;
29 |
30 | public abstract List getMany(T record) throws IOException;
31 | }
32 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/api/HologresRecordConverter.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.api;
2 |
3 | import java.io.Serializable;
4 |
5 | /** HologresRecordConverter. */
6 | public interface HologresRecordConverter extends Serializable {
7 | OUT convertFrom(IN record);
8 |
9 | IN convertTo(OUT record);
10 |
11 | OUT convertToPrimaryKey(IN record);
12 | }
13 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/api/HologresWriter.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.api;
2 |
3 | import org.apache.flink.table.types.logical.LogicalType;
4 |
5 | import com.alibaba.ververica.connectors.hologres.config.HologresConnectionParam;
6 |
7 | import java.io.IOException;
8 |
9 | /** Abstract for different Holgores IO writer. */
10 | public abstract class HologresWriter extends HologresIOClient {
11 | public HologresWriter(
12 | HologresConnectionParam param, String[] fieldNames, LogicalType[] logicalTypes) {
13 | super(param, fieldNames, logicalTypes);
14 | }
15 |
16 | public abstract long writeAddRecord(T record) throws IOException;
17 |
18 | public abstract long writeDeleteRecord(T record) throws IOException;
19 |
20 | public abstract void flush() throws IOException;
21 | }
22 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/sink/HologresSinkFunction.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.sink;
2 |
3 | import org.apache.flink.annotation.VisibleForTesting;
4 | import org.apache.flink.table.data.RowData;
5 |
6 | import com.alibaba.ververica.connectors.common.sink.OutputFormatSinkFunction;
7 | import com.alibaba.ververica.connectors.hologres.api.HologresIOClient;
8 | import com.alibaba.ververica.connectors.hologres.api.HologresWriter;
9 | import com.alibaba.ververica.connectors.hologres.config.HologresConnectionParam;
10 |
11 | /** Sink Function. */
12 | public class HologresSinkFunction extends OutputFormatSinkFunction {
13 | private static final long serialVersionUID = 1L;
14 | private HologresWriter hologresIOClient;
15 |
16 | public HologresSinkFunction(
17 | HologresConnectionParam connectionParam, HologresWriter hologresIOClient) {
18 | super(new HologresTableOutputFormat(connectionParam, hologresIOClient));
19 | this.hologresIOClient = hologresIOClient;
20 | }
21 |
22 | @VisibleForTesting
23 | protected HologresIOClient getHologresIOClient() {
24 | return hologresIOClient;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/source/HologresLookUpFunctionFactory.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.source;
2 |
3 | import org.apache.flink.api.common.functions.FlatMapFunction;
4 | import org.apache.flink.streaming.api.functions.async.AsyncFunction;
5 | import org.apache.flink.table.api.TableSchema;
6 | import org.apache.flink.table.data.RowData;
7 |
8 | import com.alibaba.ververica.connectors.common.dim.cache.CacheStrategy;
9 | import com.alibaba.ververica.connectors.hologres.api.HologresReader;
10 |
11 | /** An utility class to create Hologres Look up function. */
12 | public interface HologresLookUpFunctionFactory {
13 | AsyncFunction createAsyncFunction(
14 | String sqlTableName,
15 | TableSchema tableSchema,
16 | String[] index,
17 | CacheStrategy cacheStrategy,
18 | HologresReader hologresReader,
19 | boolean hasPrimaryKey);
20 |
21 | FlatMapFunction createFunction(
22 | String sqlTableName,
23 | TableSchema tableSchema,
24 | String[] index,
25 | CacheStrategy cacheStrategy,
26 | HologresReader hologresReader,
27 | boolean hasPrimaryKey);
28 | }
29 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/source/bulkread/HologresShardInputSplit.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.alibaba.ververica.connectors.hologres.source.bulkread;
20 |
21 | import org.apache.flink.core.io.InputSplit;
22 |
23 | /** An input split that represents a Holo shard, where split number equals shard id. */
24 | public class HologresShardInputSplit implements InputSplit {
25 |
26 | private final int shardId; // shard id == split number
27 |
28 | public HologresShardInputSplit(int splitNumber) {
29 | this.shardId = splitNumber;
30 | }
31 |
32 | @Override
33 | public int getSplitNumber() {
34 | return shardId;
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/utils/FlinkUtil.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.utils;
2 |
3 | import org.apache.flink.api.common.typeinfo.TypeInformation;
4 | import org.apache.flink.table.api.TableSchema;
5 | import org.apache.flink.table.data.RowData;
6 | import org.apache.flink.table.types.logical.RowType;
7 |
8 | import java.lang.reflect.InvocationTargetException;
9 | import java.lang.reflect.Method;
10 |
11 | /** Flink compatibility util. */
12 | public class FlinkUtil {
13 | @SuppressWarnings("unchecked")
14 | public static TypeInformation getRowTypeInfo(TableSchema tableSchema) {
15 | try {
16 | Class rowDataTypeInfoClass = getTypeInfoClass();
17 | Method method = rowDataTypeInfoClass.getMethod("of", RowType.class);
18 | Object result =
19 | method.invoke(null, (RowType) tableSchema.toRowDataType().getLogicalType());
20 | return (TypeInformation) result;
21 | } catch (ClassNotFoundException
22 | | NoSuchMethodException
23 | | IllegalAccessException
24 | | InvocationTargetException e) {
25 | throw new RuntimeException(e);
26 | }
27 | }
28 |
29 | private static Class getTypeInfoClass() throws ClassNotFoundException {
30 | try {
31 | // Flink 1.11
32 | return Class.forName("org.apache.flink.table.runtime.typeutils.RowDataTypeInfo");
33 | } catch (ClassNotFoundException e) {
34 | // Flink 1.12+
35 | return Class.forName("org.apache.flink.table.runtime.typeutils.InternalTypeInfo");
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/java/com/alibaba/ververica/connectors/hologres/utils/SchemaUtil.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.ververica.connectors.hologres.utils;
2 |
3 | import org.apache.flink.table.api.TableSchema;
4 | import org.apache.flink.table.types.logical.LogicalType;
5 | import org.apache.flink.table.types.logical.RowType;
6 |
7 | /** Flink compatibility util. */
8 | public class SchemaUtil {
9 | public static LogicalType[] getLogicalTypes(TableSchema tableSchema) {
10 | LogicalType[] logicalTypes = new LogicalType[tableSchema.getFieldCount()];
11 | final RowType rowType = (RowType) tableSchema.toPhysicalRowDataType().getLogicalType();
12 | for (int i = 0; i < tableSchema.getFieldCount(); i++) {
13 | logicalTypes[i] = rowType.getTypeAt(i);
14 | }
15 | return logicalTypes;
16 | }
17 |
18 | public static LogicalType[] getLogicalTypes(TableSchema tableSchema, String[] fieldNames) {
19 | LogicalType[] fieldTypes = new LogicalType[fieldNames.length];
20 | for (int idx = 0; idx < fieldNames.length; idx++) {
21 | fieldTypes[idx] = tableSchema.getFieldDataType(fieldNames[idx]).get().getLogicalType();
22 | }
23 | return fieldTypes;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/hologres-connector-flink-base/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | com.alibaba.ververica.connectors.hologres.factory.HologresTableFactory
17 |
--------------------------------------------------------------------------------
/hologres-connector-hive-2.x/src/main/java/com/alibaba/hologres/hive/HoloStorageHandler.java:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * Licensed under the Apache License, Version 2.0 (the "License");
4 | * you may not use this file except in compliance with the License.
5 | * You may obtain a copy of the License at
6 | *
7 | * http://www.apache.org/licenses/LICENSE-2.0
8 | *
9 | * Unless required by applicable law or agreed to in writing, software
10 | * distributed under the License is distributed on an "AS IS" BASIS,
11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | * See the License for the specific language governing permissions and
13 | * limitations under the License.
14 | */
15 |
16 | package com.alibaba.hologres.hive;
17 |
18 | /** Hive2 HoloStorageHandler. */
19 | public class HoloStorageHandler extends BaseHoloStorageHandler {}
20 |
--------------------------------------------------------------------------------
/hologres-connector-hive-2.x/src/test/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Set everything to be logged to the file target/unit-tests.log
2 | log4j.rootCategory=INFO, file
3 | log4j.appender.file=org.apache.log4j.FileAppender
4 | log4j.appender.file.append=true
5 | log4j.appender.file.file=target/unit-tests.log
6 | log4j.appender.file.layout=org.apache.log4j.PatternLayout
7 | log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %t %p %c{1}: %m%n
8 |
--------------------------------------------------------------------------------
/hologres-connector-hive-2.x/src/test/resources/setting.properties:
--------------------------------------------------------------------------------
1 | HIVE_USERNAME=
2 | HIVE_PASSWORD=
3 | HIVE_JDBCURL=jdbc:hive2://ip:10000/default
4 | HOLO_USERNAME=
5 | HOLO_PASSWORD=
6 | HOLO_JDBCURL=jdbc:hologres://ip:port/test_db
7 |
--------------------------------------------------------------------------------
/hologres-connector-hive-3.x/src/main/java/com/alibaba/hologres/hive/HoloStorageHandler.java:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * Licensed under the Apache License, Version 2.0 (the "License");
4 | * you may not use this file except in compliance with the License.
5 | * You may obtain a copy of the License at
6 | *
7 | * http://www.apache.org/licenses/LICENSE-2.0
8 | *
9 | * Unless required by applicable law or agreed to in writing, software
10 | * distributed under the License is distributed on an "AS IS" BASIS,
11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | * See the License for the specific language governing permissions and
13 | * limitations under the License.
14 | */
15 |
16 | package com.alibaba.hologres.hive;
17 |
18 | import org.apache.hadoop.hive.ql.plan.TableDesc;
19 |
20 | import java.util.Map;
21 |
22 | /** Hive3 HoloStorageHandler. */
23 | public class HoloStorageHandler extends BaseHoloStorageHandler {
24 | /** credentials already included in properties. */
25 | @Override
26 | public void configureInputJobCredentials(
27 | TableDesc tableDesc, Map jobCredentials) {}
28 | }
29 |
--------------------------------------------------------------------------------
/hologres-connector-hive-3.x/src/test/java/test.md:
--------------------------------------------------------------------------------
1 | 数据类型测试
2 |
3 | ```sql
4 | create table hive_customer_type(a int, b int8,c bool, d real, e float8, f varchar(5), g json, h jsonb,i timestamptz,j date, k numeric(18,5), l bytea, m int[], n bigint[], o real[], p double precision[], q bool[], r text[]);
5 |
6 | CREATE EXTERNAL TABLE customer_to_holo_type
7 | (
8 | a int,
9 | b bigint,
10 | c boolean,
11 | d float,
12 | e double,
13 | f string,
14 | g string,
15 | h string,
16 | i timestamp,
17 | j date,
18 | k decimal(18,5),
19 | l binary,
20 | m ARRAY,
21 | n ARRAY,
22 | o ARRAY,
23 | p ARRAY,
24 | q ARRAY,
25 | r ARRAY
26 | )
27 | STORED BY 'com.alibaba.hologres.hive.HoloStorageHandler'
28 | TBLPROPERTIES (
29 | "hive.sql.jdbc.driver" = "org.postgresql.Driver",
30 | "hive.sql.jdbc.url" = "jdbc:postgresql://host:port/db",
31 | "hive.sql.username" = "",
32 | "hive.sql.password" = "",
33 | "hive.sql.table" = "hive_customer_type",
34 | "hive.sql.write_mode" = "INSERT_OR_UPDATE",
35 | "hive.sql.copy_write_mode" = "true",
36 | "hive.sql.copy_write_max_connections_number" = "20",
37 | "hive.sql.dirty_data_check" = "true"
38 | );
39 |
40 | insert into customer_to_holo_type select 111, 222, 'false', 1.23, 2.34,'ccc','{\"a\":\"b\"}','{\"a\":\"b\"}', '2021-05-21 16:00:45.123', '2021-05-21','85.23', '\x030405', array(1,2,3), array(1L,2L,3L), null, null, array(true,flase), array('a','b','c');
41 | ```
--------------------------------------------------------------------------------
/hologres-connector-hive-3.x/src/test/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Set everything to be logged to the file target/unit-tests.log
2 | log4j.rootCategory=INFO, file
3 | log4j.appender.file=org.apache.log4j.FileAppender
4 | log4j.appender.file.append=true
5 | log4j.appender.file.file=target/unit-tests.log
6 | log4j.appender.file.layout=org.apache.log4j.PatternLayout
7 | log4j.appender.file.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss.SSS} %t %p %c{1}: %m%n
8 |
--------------------------------------------------------------------------------
/hologres-connector-hive-3.x/src/test/resources/setting.properties:
--------------------------------------------------------------------------------
1 | HIVE_USERNAME=
2 | HIVE_PASSWORD=
3 | HIVE_JDBCURL=jdbc:hive2://ip:10000/default
4 | HOLO_USERNAME=
5 | HOLO_PASSWORD=
6 | HOLO_JDBCURL=jdbc:hologres://ip:port/test_db
7 |
--------------------------------------------------------------------------------
/hologres-connector-hive-base/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## Hologres Hive Connector的通用核心代码
3 |
4 | 无法直接使用,编译安装至本地作为 ***hologres-connector-hive-2.x*** 以及 ***hologres-connector-hive-3.x*** 的依赖
5 |
6 | ### hologres-connector-hive-base编译
7 |
8 | - 结合使用环境的hive版本进行打包,使用-P指定版本参数
9 |
10 | - 支持版本如下表
11 |
12 | |参数|支持版本|
13 | |:---:|:---:|
14 | |hive版本|hive-2
hive-3|
15 |
16 | 例如使用的hive环境为hive-2.3.8,可以使用如下命令:
17 |
18 | ```
19 | mvn install -pl hologres-connector-hive-base clean package -DskipTests -Phive-2
20 | ```
21 |
--------------------------------------------------------------------------------
/hologres-connector-hive-base/src/main/java/com/alibaba/hologres/hive/HoloRecordWritable.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.hive;
2 |
3 | import com.alibaba.hologres.client.Put;
4 | import com.alibaba.hologres.hive.exception.HiveHoloStorageException;
5 | import org.apache.hadoop.io.Writable;
6 |
7 | import java.io.DataInput;
8 | import java.io.DataOutput;
9 | import java.io.IOException;
10 | import java.util.Arrays;
11 |
12 | /** HoloRecordWritable. */
13 | public class HoloRecordWritable implements Writable {
14 |
15 | private Object[] columnValues;
16 | private String[] columnNames;
17 |
18 | public void clear() {
19 | Arrays.fill(columnValues, null);
20 | }
21 |
22 | public HoloRecordWritable(int numColumns, String[] columnNames) {
23 | this.columnValues = new Object[numColumns];
24 | this.columnNames = columnNames;
25 | }
26 |
27 | public void set(int i, Object columnObject) {
28 | columnValues[i] = columnObject;
29 | }
30 |
31 | public void write(Put put) throws HiveHoloStorageException {
32 | if (columnValues == null) {
33 | throw new HiveHoloStorageException("No data available to be written");
34 | }
35 | for (int i = 0; i < columnValues.length; i++) {
36 | Object value = columnValues[i];
37 | put.setObject(columnNames[i], value);
38 | }
39 | }
40 |
41 | @Override
42 | public void write(DataOutput dataOutput) throws IOException {}
43 |
44 | @Override
45 | public void readFields(DataInput dataInput) throws IOException {}
46 |
47 | public Object[] getColumnValues() {
48 | return columnValues;
49 | }
50 |
51 | public String[] getColumnNames() {
52 | return columnNames;
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/hologres-connector-hive-base/src/main/java/com/alibaba/hologres/hive/conf/HoloStorageConfigManager.java:
--------------------------------------------------------------------------------
1 | package com.alibaba.hologres.hive.conf;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 |
6 | import java.util.Map;
7 | import java.util.Map.Entry;
8 | import java.util.Properties;
9 |
10 | /** Main configuration handler class. */
11 | public class HoloStorageConfigManager {
12 |
13 | private static final Logger LOGGER = LoggerFactory.getLogger(HoloStorageConfigManager.class);
14 |
15 | public static void copyConfigurationToJob(Properties props, Map jobProps) {
16 | for (Entry