├── README.md ├── .gitignore ├── aliyun-connectors-common ├── src │ ├── main │ │ └── java │ │ │ └── com │ │ │ └── alibaba │ │ │ └── flink │ │ │ └── connectors │ │ │ └── common │ │ │ ├── Constants.java │ │ │ ├── source │ │ │ ├── WatermarkProvider.java │ │ │ ├── AbstractLocatableParallelSource.java │ │ │ ├── AbstractParallelSource.java │ │ │ ├── AbstractDynamicParallelSource.java │ │ │ └── SourceUtils.java │ │ │ ├── sink │ │ │ ├── HasRetryTimeout.java │ │ │ ├── Syncable.java │ │ │ ├── TupleRichOutputFormat.java │ │ │ ├── OutputFormatSinkFunction.java │ │ │ └── TupleOutputFormatSinkFunction.java │ │ │ ├── reader │ │ │ ├── Interruptible.java │ │ │ ├── RecordReader.java │ │ │ ├── SequenceReader.java │ │ │ └── AbstractPartitionNumsListener.java │ │ │ ├── errorcode │ │ │ └── ConnectorErrors.java │ │ │ ├── sts │ │ │ ├── StsConstants.java │ │ │ ├── DecodeUtil.java │ │ │ ├── StsParamValidateUtil.java │ │ │ ├── StsServiceRequest.java │ │ │ └── AbstractClientProvider.java │ │ │ ├── exception │ │ │ ├── NotEnoughParamsException.java │ │ │ ├── ErrorUtils.java │ │ │ └── ConnectorException.java │ │ │ ├── converter │ │ │ └── RecordConverter.java │ │ │ ├── resolver │ │ │ └── RecordResolver.java │ │ │ ├── util │ │ │ ├── ConnectionPool.java │ │ │ ├── DateUtil.java │ │ │ ├── BlinkStringUtil.java │ │ │ └── RetryUtils.java │ │ │ ├── metrics │ │ │ └── SumAndCount.java │ │ │ ├── conf │ │ │ └── BlinkOptions.java │ │ │ └── MetricUtils.java │ └── test │ │ └── java │ │ └── com │ │ └── alibaba │ │ └── flink │ │ └── connectors │ │ └── common │ │ └── PreAssignedInputSplitAssignerTest.java └── pom.xml ├── NOTICE ├── datahub-connector ├── src │ ├── main │ │ ├── resources │ │ │ └── META-INF │ │ │ │ └── services │ │ │ │ └── org.apache.flink.table.factories.TableFactory │ │ └── java │ │ │ └── com │ │ │ └── alibaba │ │ │ └── flink │ │ │ └── connectors │ │ │ └── datahub │ │ │ ├── datastream │ │ │ ├── sink │ │ │ │ ├── DatahubRecordResolver.java │ │ │ │ └── DatahubSinkFunction.java │ │ │ ├── source │ │ │ │ └── DatahubShardInputSplit.java │ │ │ ├── util │ │ │ │ └── DatahubClientProvider.java │ │ │ └── example │ │ │ │ ├── DatahubSourceFunctionExample.java │ │ │ │ └── DatahubSinkFunctionExample.java │ │ │ └── table │ │ │ ├── DatahubRowRecordResolver.java │ │ │ ├── DatahubDescriptorValidator.java │ │ │ ├── DatahubTableSink.java │ │ │ └── DatahubTableFactory.java │ └── test │ │ └── java │ │ └── com │ │ └── alibaba │ │ └── flink │ │ └── connectors │ │ └── datahub │ │ └── table │ │ └── DatahubTableFactoryTest.java └── README.md ├── sls-connector ├── src │ ├── main │ │ └── java │ │ │ └── com │ │ │ └── alibaba │ │ │ └── flink │ │ │ └── connectors │ │ │ └── sls │ │ │ └── datastream │ │ │ ├── sink │ │ │ ├── DefaultSlsSinkFunction.java │ │ │ ├── SlsRecordResolver.java │ │ │ ├── DefaultSlsRecordResolver.java │ │ │ ├── SlsSinkFunction.java │ │ │ ├── SlsRecord.java │ │ │ └── LogProducerProvider.java │ │ │ └── source │ │ │ ├── SlsInputSplit.java │ │ │ ├── SlsClientProvider.java │ │ │ └── SlsSourceFunction.java │ └── test │ │ └── java │ │ └── com │ │ └── alibaba │ │ └── flink │ │ └── connectors │ │ └── sls │ │ └── datastream │ │ └── sink │ │ ├── LogProducerProviderTest.java │ │ └── SlsOutputFormatTest.java └── pom.xml ├── cloudhbase-connector ├── src │ ├── main │ │ └── java │ │ │ └── com │ │ │ └── alibaba │ │ │ └── flink │ │ │ └── connectors │ │ │ └── cloudhbase │ │ │ ├── sink │ │ │ └── CloudHBaseRecordResolver.java │ │ │ └── example │ │ │ └── CloudHBaseSinkFunctionExample.java │ └── test │ │ └── java │ │ └── com │ │ └── alibaba │ │ └── flink │ │ └── connectors │ │ └── cloudhbase │ │ └── sink │ │ └── CloudHBaseSinkFunctionTest.java └── pom.xml ├── pom.xml ├── tools └── maven │ ├── suppressions.xml │ └── scalastyle-config.xml └── sls-shaded-sdk └── pom.xml /README.md: -------------------------------------------------------------------------------- 1 | Aliyun products data connectors to Flink. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .cache 2 | scalastyle-output.xml 3 | .classpath 4 | .idea 5 | .metadata 6 | .settings 7 | .project 8 | .version.properties 9 | filter.properties 10 | logs.zip 11 | target 12 | tmp 13 | *.class 14 | *.iml 15 | *.swp 16 | *.jar 17 | *.log 18 | .DS_Store 19 | node_modules 20 | *.ipr 21 | *.iws -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/Constants.java: -------------------------------------------------------------------------------- 1 | package com.alibaba.flink.connectors.common; 2 | 3 | /** 4 | * Constants. 5 | */ 6 | public class Constants { 7 | public static final String SERVICE_TYPE = "blink"; 8 | public static final String CONFIG_PREFIX = "yarn.app." + SERVICE_TYPE + "."; 9 | } 10 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/source/WatermarkProvider.java: -------------------------------------------------------------------------------- 1 | package com.alibaba.flink.connectors.common.source; 2 | 3 | /** 4 | * WatermarkProvider. 5 | */ 6 | public interface WatermarkProvider { 7 | /** 8 | * Gets current partition watermark. 9 | * @return 10 | */ 11 | long getWatermark(); 12 | } 13 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2019 alibaba group 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. -------------------------------------------------------------------------------- /datahub-connector/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | com.alibaba.flink.connectors.datahub.table.DatahubTableFactory 17 | -------------------------------------------------------------------------------- /datahub-connector/README.md: -------------------------------------------------------------------------------- 1 | ## Examples 2 | 3 | ### Datahub Table Sink 4 | 5 | ```sql 6 | CREATE TABLE mysink(i INTEGER, b BIGINT, f FLOAT, d DOUBLE, d2 DECIMAL, b2 BOOLEAN, s VARCHAR) 7 | with ( 8 | 'connector.type'='datahub', # required 9 | 'connector.project'='...', # required 10 | 'connector.topic'='...', # required 11 | 'connector.access_id'='...', # required 12 | 'connector.access_key'='...', # required 13 | 'connector.endpoint'='...', # required 14 | 'connector.buffer_size'='...', # optional, number of message to buffer before sending out, default value 5000 15 | 'connector.batch_size'='...', # optional, number of message to send in a batch, default value 50 16 | 'connector.batch_write_timeout_in_mills'='...', # optional, timeout to flush data if buffer is not full, default value 20000 17 | 'connector.retry_timeout_in_mills'='...', # optional, timeout for data that is buffered, default value 1000 18 | 'connector.max_retry_times'='...' # optional, default value 20 19 | ); 20 | ``` 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sink/HasRetryTimeout.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.sink; 20 | 21 | /** 22 | * Created by biao.liub@alibaba-inc.com on 2016/11/7. 23 | */ 24 | public interface HasRetryTimeout { 25 | long getRetryTimeout(); 26 | } 27 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/reader/Interruptible.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.reader; 20 | 21 | /** 22 | * Created by biao.liub@alibaba-inc.com on 2016/10/27. 23 | */ 24 | public interface Interruptible { 25 | /** 26 | * Interrupt. 27 | */ 28 | void interrupt(); 29 | } 30 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/errorcode/ConnectorErrors.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.errorcode; 20 | 21 | /** 22 | * ConnectorErrors. 23 | */ 24 | public class ConnectorErrors { 25 | public static final ConnectorErrorCode INST = (ConnectorErrorCode) ErrorFactory.createProxy(ConnectorErrorCode.class); 26 | } 27 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sink/Syncable.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.sink; 20 | 21 | import java.io.IOException; 22 | 23 | /** 24 | * Created by sleepy on 15/12/15. 25 | * Will be invoked in stream mode, when doing checkpoint 26 | */ 27 | public interface Syncable { 28 | void sync() throws IOException; 29 | } 30 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sts/StsConstants.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.common.sts; 19 | 20 | import com.aliyuncs.http.ProtocolType; 21 | 22 | /** 23 | * StsConstants. 24 | */ 25 | public class StsConstants { 26 | // https 27 | public static final ProtocolType PROTOCOL_TYPE = ProtocolType.HTTPS; 28 | // duration 29 | public static final Long DURATION = 129600L; 30 | 31 | public static final String STS_SECRET_KEY = "xxx"; 32 | 33 | public static final String STS_ROLE_RESPONSE_KEY = "roleResponseKey"; 34 | } 35 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/exception/NotEnoughParamsException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.exception; 20 | 21 | /** 22 | * NotEnoughParamsException. 23 | */ 24 | public class NotEnoughParamsException extends ConnectorException { 25 | 26 | private static final long serialVersionUID = -927337157640571386L; 27 | 28 | public NotEnoughParamsException(String message) { 29 | super(message); 30 | } 31 | 32 | public NotEnoughParamsException(String message, Throwable cause) { 33 | super(message, cause); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/sink/DefaultSlsSinkFunction.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.sink; 20 | 21 | /** 22 | * DefaultSlsSinkFunction accepts input of SlsRecord. 23 | */ 24 | public class DefaultSlsSinkFunction extends SlsSinkFunction { 25 | public DefaultSlsSinkFunction(String endPoint, String accessKeyId, String accessKeySecret, 26 | String projectName, String logstoreName) { 27 | super(endPoint, accessKeyId, accessKeySecret, projectName, logstoreName, new DefaultSlsRecordResolver()); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/exception/ErrorUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.exception; 20 | 21 | /** 22 | * Utils to deal exception. 23 | */ 24 | public class ErrorUtils { 25 | 26 | public static ConnectorException getException(String s, Throwable t) { 27 | return new ConnectorException(s, t); 28 | } 29 | 30 | public static ConnectorException getException(String message) { 31 | return new ConnectorException(message); 32 | } 33 | 34 | public static void throwException(String s, Throwable t) { 35 | throw getException(s, t); 36 | } 37 | 38 | public static void throwException(String message) { 39 | throw getException(message); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/converter/RecordConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.converter; 20 | 21 | import java.io.Serializable; 22 | 23 | /** 24 | * Interface used in SinkFunction or SourceFunction to convert Record to/from 25 | * flink. 26 | * @param 27 | * @param 28 | */ 29 | public interface RecordConverter extends Serializable { 30 | RecordConverter NOOP_CONVERTER = new NoopConverter(); 31 | R convert(IN message); 32 | 33 | /** 34 | * Inner identical Converter. 35 | * @param 36 | */ 37 | final class NoopConverter implements RecordConverter { 38 | 39 | @Override 40 | public T convert(T record) { 41 | return record; 42 | } 43 | } 44 | 45 | } 46 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/resolver/RecordResolver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.resolver; 20 | 21 | import org.apache.flink.configuration.Configuration; 22 | 23 | import java.io.Serializable; 24 | 25 | /** 26 | * RecordResolver interface is used by SinkFunction to extract necessary information from a custom input type. 27 | * For example, the user callback to send a Kafka ProducerRecord. 28 | */ 29 | public interface RecordResolver extends Serializable { 30 | 31 | /** 32 | * Initialization method for the resolver. 33 | */ 34 | default void open(Configuration configuration) { 35 | } 36 | 37 | /** 38 | * Tear-down method for the resolver. 39 | */ 40 | default void close() { 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/sink/SlsRecordResolver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.sink; 20 | 21 | import com.alibaba.flink.connectors.common.resolver.RecordResolver; 22 | import com.aliyun.openservices.log.common.LogItem; 23 | 24 | /** 25 | * Interface for serializing a record to sls. 26 | * @param 27 | */ 28 | public interface SlsRecordResolver extends RecordResolver { 29 | 30 | /** 31 | * Gets topic of log. 32 | */ 33 | String getTopic(T record); 34 | 35 | /** 36 | * Gets source meta of log. 37 | */ 38 | String getSource(T record); 39 | 40 | /** 41 | * Gets partition key of log. 42 | */ 43 | String getPartitionKey(T record); 44 | 45 | /** 46 | * Gets content of log. 47 | */ 48 | LogItem getLogItem(T record); 49 | } 50 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/sink/DefaultSlsRecordResolver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.sink; 20 | 21 | import com.aliyun.openservices.log.common.LogItem; 22 | 23 | /** 24 | * Default serialization schema which wrappers a SlsRecord. 25 | */ 26 | public class DefaultSlsRecordResolver implements SlsRecordResolver { 27 | @Override 28 | public String getTopic(SlsRecord record) { 29 | return record.getTopic(); 30 | } 31 | 32 | @Override 33 | public String getSource(SlsRecord record) { 34 | return record.getSource(); 35 | } 36 | 37 | @Override 38 | public String getPartitionKey(SlsRecord record) { 39 | return record.getPartitionKey(); 40 | } 41 | 42 | @Override 43 | public LogItem getLogItem(SlsRecord record) { 44 | return record.getContent(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/source/SlsInputSplit.java: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Licensed to the Apache Software Foundation (ASF) under one or more 4 | * contributor license agreements. See the NOTICE file distributed with 5 | * this work for additional information regarding copyright ownership. 6 | * The ASF licenses this file to You under the Apache License, Version 2.0 7 | * (the "License"); you may not use this file except in compliance with 8 | * the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | * 18 | */ 19 | 20 | package com.alibaba.flink.connectors.sls.datastream.source; 21 | 22 | import org.apache.flink.core.io.InputSplit; 23 | 24 | /** 25 | * SlsInputSplit representing a shard of sls log store. 26 | */ 27 | public class SlsInputSplit implements InputSplit, java.io.Serializable { 28 | private static final long serialVersionUID = 1L; 29 | 30 | /** The total number of partitions. */ 31 | private final int shardId; 32 | 33 | public SlsInputSplit(int shardId) { 34 | this.shardId = shardId; 35 | } 36 | 37 | @Override 38 | public int getSplitNumber() { 39 | return shardId; 40 | } 41 | 42 | public int getShardId() { 43 | return shardId; 44 | } 45 | 46 | @Override 47 | public String toString() { 48 | return "SlsInputSplit{" + 49 | ", shardId=" + shardId + 50 | '}'; 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /cloudhbase-connector/src/main/java/com/alibaba/flink/connectors/cloudhbase/sink/CloudHBaseRecordResolver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.cloudhbase.sink; 19 | 20 | import com.alibaba.flink.connectors.common.resolver.RecordResolver; 21 | import org.apache.hadoop.hbase.client.Mutation; 22 | 23 | /** 24 | * Interface for implementing CloudHBase's record resolver. 25 | * 26 | * @param the type of record to convert. 27 | */ 28 | public interface CloudHBaseRecordResolver extends RecordResolver { 29 | 30 | /** 31 | * Returns the row key of the record in the form of string. 32 | * 33 | * @param record the record whose row key is returned. 34 | */ 35 | String getRowKey(RECORD record); 36 | 37 | /** 38 | * Returns the mutation for the record. 39 | * 40 | * @param record the record whose mutation is returned. 41 | */ 42 | Mutation getMutation(RECORD record); 43 | } 44 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/sink/SlsSinkFunction.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.sink; 20 | 21 | import com.alibaba.flink.connectors.common.sink.OutputFormatSinkFunction; 22 | 23 | /** 24 | * SlsSinkFunction. 25 | */ 26 | public class SlsSinkFunction extends OutputFormatSinkFunction { 27 | 28 | public SlsSinkFunction(SlsOutputFormat slsOutputFormat) { 29 | super(slsOutputFormat); 30 | } 31 | 32 | public SlsSinkFunction( 33 | String endPoint, String accessKeyId, String accessKeySecret, 34 | String projectName, String logstoreName, 35 | SlsRecordResolver serializationSchema) { 36 | this(new SlsOutputFormat.Builder() 37 | .setEndPoint(endPoint).setAccessKey(accessKeySecret).setAccessKeyId(accessKeyId) 38 | .setProjectName(projectName).setLogstore(logstoreName) 39 | .setSerializationSchema(serializationSchema).build()); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/datastream/sink/DatahubRecordResolver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.datahub.datastream.sink; 20 | 21 | import com.alibaba.flink.connectors.common.resolver.RecordResolver; 22 | import com.aliyun.datahub.client.model.RecordEntry; 23 | 24 | /** 25 | * DatahubRecordResolver. 26 | * @param 27 | */ 28 | public interface DatahubRecordResolver extends RecordResolver { 29 | DatahubRecordResolver NOOP_DATAHUB_RECORD_RESOLVER = new NoopDatahubRecordResolver(); 30 | 31 | /** 32 | * Default implementation. 33 | */ 34 | default void open() { 35 | // do nothing 36 | } 37 | 38 | RecordEntry getRecordEntry(T record); 39 | 40 | /** 41 | * NoopDatahubRecordResolver. 42 | */ 43 | class NoopDatahubRecordResolver implements DatahubRecordResolver { 44 | @Override 45 | public RecordEntry getRecordEntry(RecordEntry record) { 46 | return record; 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /aliyun-connectors-common/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | 8 | com.alibaba.flink 9 | aliyun-flink-connectors-parent 10 | 0.1-SNAPSHOT 11 | .. 12 | 13 | 14 | com.alibaba.flink 15 | aliyun-connectors-common 16 | 17 | 18 | 19 | org.apache.flink 20 | flink-streaming-java_${scala.binary.version} 21 | ${flink.version} 22 | provided 23 | 24 | 25 | org.apache.flink 26 | flink-streaming-scala_${scala.binary.version} 27 | ${flink.version} 28 | provided 29 | 30 | 31 | org.apache.flink 32 | flink-metrics-dropwizard 33 | ${flink.version} 34 | provided 35 | 36 | 37 | com.aliyun 38 | aliyun-java-sdk-sts 39 | 2.1.6 40 | 41 | 42 | com.aliyun 43 | aliyun-java-sdk-core 44 | 2.3.6 45 | 46 | 47 | commons-codec 48 | commons-codec 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/datastream/sink/DatahubSinkFunction.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.datahub.datastream.sink; 20 | 21 | import com.alibaba.flink.connectors.common.sink.OutputFormatSinkFunction; 22 | 23 | /** 24 | * DatahubSinkFunction. 25 | * @param 26 | */ 27 | public class DatahubSinkFunction extends OutputFormatSinkFunction { 28 | public DatahubSinkFunction(DatahubOutputFormat slsOutputFormat) { 29 | super(slsOutputFormat); 30 | } 31 | 32 | public DatahubSinkFunction( 33 | String endPoint, String projectName, String topicName, 34 | String accessKeyId, String accessKeySecret, 35 | DatahubRecordResolver serializationSchema) { 36 | this(new DatahubOutputFormat.Builder() 37 | .setEndPoint(endPoint) 38 | .setAccessKey(accessKeySecret) 39 | .setAccessId(accessKeyId) 40 | .setProjectName(projectName) 41 | .setTopicName(topicName) 42 | .setRecordResolver(serializationSchema) 43 | .build()); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /sls-connector/src/test/java/com/alibaba/flink/connectors/sls/datastream/sink/LogProducerProviderTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.sink; 20 | 21 | import com.aliyun.openservices.aliyun.log.producer.LogProducer; 22 | import com.aliyun.openservices.aliyun.log.producer.errors.ProducerException; 23 | import org.junit.Test; 24 | import org.mockito.Mockito; 25 | import org.powermock.reflect.Whitebox; 26 | 27 | /** 28 | * Tests for log producer provider. 29 | */ 30 | public class LogProducerProviderTest { 31 | 32 | @Test 33 | public void testClose() throws ProducerException, InterruptedException { 34 | LogProducerProvider producerProvider = new LogProducerProvider( 35 | "test-project", "test-endpoint", "test-ak", "test-secret", 1, 100); 36 | 37 | LogProducer producer = Mockito.mock(LogProducer.class); 38 | Whitebox.setInternalState(producerProvider, "client", producer); 39 | 40 | Mockito.doThrow(new InterruptedException()).doNothing().when(producer).close(); 41 | producerProvider.closeClient(); 42 | 43 | Mockito.verify(producer, Mockito.times(2)).close(); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/datastream/source/DatahubShardInputSplit.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.datahub.datastream.source; 19 | 20 | import org.apache.flink.core.io.InputSplit; 21 | 22 | import java.util.Date; 23 | 24 | /** 25 | * DatahubShardInputSplit. 26 | */ 27 | public class DatahubShardInputSplit implements InputSplit { 28 | private static final long serialVersionUID = 4128235102572836877L; 29 | 30 | private final int splitId; 31 | private String shardId; 32 | private Date startTime; 33 | 34 | public DatahubShardInputSplit(int splitId, String shardId, Date startTime) { 35 | this.splitId = splitId; 36 | this.shardId = shardId; 37 | this.startTime = startTime; 38 | } 39 | 40 | @Override 41 | public int getSplitNumber() { 42 | return splitId; 43 | } 44 | 45 | public String getShardId() { 46 | return shardId; 47 | } 48 | 49 | public Date getStartTime() { 50 | return startTime; 51 | } 52 | 53 | @Override 54 | public String toString() { 55 | return String.format("splitId:%s, shardId:%s, startTime:%s", splitId, shardId, startTime); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/sink/SlsRecord.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.sink; 20 | 21 | import com.aliyun.openservices.log.common.LogItem; 22 | 23 | import java.io.Serializable; 24 | 25 | /** 26 | * SlsRecord wrappers content and meta data of the record to write to Sls. 27 | */ 28 | public class SlsRecord implements Serializable { 29 | 30 | private final String topic; 31 | private final String source; 32 | private final String partitionKey; 33 | private final LogItem content; 34 | 35 | public SlsRecord(LogItem content, String topic, String source, String partitionKey) { 36 | this.content = content; 37 | this.topic = topic; 38 | this.source = source; 39 | this.partitionKey = partitionKey; 40 | } 41 | 42 | public SlsRecord(LogItem content) { 43 | this(content, "", "", null); 44 | } 45 | 46 | public String getTopic() { 47 | return topic; 48 | } 49 | 50 | public String getSource() { 51 | return source; 52 | } 53 | 54 | public String getPartitionKey() { 55 | return partitionKey; 56 | } 57 | 58 | public LogItem getContent() { 59 | return content; 60 | } 61 | 62 | } 63 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 20 | 21 | 22 | org.apache.flink 23 | flink-parent 24 | 1.10.0 25 | 26 | 27 | 28 | 1.10.0 29 | 30 | 31 | 4.0.0 32 | 33 | com.alibaba.flink 34 | aliyun-flink-connectors-parent 35 | 0.1-SNAPSHOT 36 | pom 37 | 38 | 39 | 40 | The Apache Software License, Version 2.0 41 | http://www.apache.org/licenses/LICENSE-2.0.txt 42 | repo 43 | 44 | 45 | 46 | 47 | aliyun-connectors-common 48 | cloudhbase-connector 49 | datahub-connector 50 | sls-connector 51 | sls-shaded-sdk 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sts/DecodeUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.common.sts; 19 | 20 | import org.apache.commons.codec.binary.Base64; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | import javax.crypto.Cipher; 25 | import javax.crypto.spec.SecretKeySpec; 26 | 27 | /** 28 | * DecodeUtil. 29 | */ 30 | public class DecodeUtil { 31 | private static Logger logger = LoggerFactory.getLogger(DecodeUtil.class); 32 | 33 | private static final String ENCODING = "UTF-8"; 34 | 35 | private static final String CIPHER_ALGORITHM_ECB = "AES/ECB/PKCS5Padding"; 36 | 37 | public static String decrypt(String strToDecrypt, String secret) throws Exception { 38 | if (strToDecrypt == null) { 39 | return null; 40 | } 41 | try { 42 | Cipher cipher = Cipher.getInstance(CIPHER_ALGORITHM_ECB); 43 | final SecretKeySpec secretKey = new SecretKeySpec(secret.getBytes(), "AES"); 44 | cipher.init(Cipher.DECRYPT_MODE, secretKey); 45 | String decryptedString = new String(cipher.doFinal(Base64.decodeBase64(strToDecrypt))); 46 | return decryptedString; 47 | } catch (Exception e) { 48 | logger.error("decode " + strToDecrypt + " failed " + e.getStackTrace()); 49 | throw e; 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sts/StsParamValidateUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.common.sts; 19 | 20 | import org.apache.flink.configuration.Configuration; 21 | 22 | import com.alibaba.flink.connectors.common.conf.BlinkOptions; 23 | import com.alibaba.flink.connectors.common.exception.NotEnoughParamsException; 24 | import com.alibaba.flink.connectors.common.util.BlinkStringUtil; 25 | 26 | /** 27 | * StsParamValidateUtil. 28 | */ 29 | public class StsParamValidateUtil { 30 | 31 | public static String stsValidate(String accessId, String accessKey, String localErrorMsg, Configuration properties) { 32 | String stsRoleArn = properties.getString(BlinkOptions.STS.STS_ROLE_ARN); 33 | String stsAccessId = properties.getString(BlinkOptions.STS.STS_ACCESS_ID); 34 | String stsAccessKey = properties.getString(BlinkOptions.STS.STS_ACCESS_KEY); 35 | String stsUid = properties.getString(BlinkOptions.STS.STS_UID); 36 | if (BlinkStringUtil.isNotEmpty(accessId, accessKey) || BlinkStringUtil.isNotEmpty(stsRoleArn, stsAccessId, stsAccessKey, stsUid)){ 37 | return null; 38 | } else if (properties.containsKey(BlinkOptions.STS.STS_ROLE_ARN.key())){ 39 | throw new NotEnoughParamsException(String.format("Lack necessary arguments: {0}", BlinkOptions.STS.STS_PARAMS_HELP_MSG)); 40 | } else { 41 | throw new NotEnoughParamsException(localErrorMsg); 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /sls-connector/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 20 | 23 | 24 | 4.0.0 25 | 26 | 27 | com.alibaba.flink 28 | aliyun-flink-connectors-parent 29 | 0.1-SNAPSHOT 30 | .. 31 | 32 | 33 | sls-connector 34 | 35 | jar 36 | 37 | 38 | 39 | 40 | com.alibaba.flink 41 | sls-shaded-sdk 42 | ${project.version} 43 | 44 | 45 | org.apache.flink 46 | flink-core 47 | ${flink.version} 48 | provided 49 | 50 | 51 | org.apache.flink 52 | flink-streaming-java_2.11 53 | ${flink.version} 54 | provided 55 | 56 | 57 | com.alibaba.flink 58 | aliyun-connectors-common 59 | ${project.version} 60 | compile 61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/exception/ConnectorException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.common.exception; 19 | 20 | /** 21 | * ConnectorException. 22 | */ 23 | public class ConnectorException extends RuntimeException { 24 | private static final long serialVersionUID = 6632569372505798553L; 25 | private int errorCode; 26 | private String errorMessage; 27 | 28 | public ConnectorException(String errorMessage) { 29 | super(errorMessage); 30 | this.errorMessage = errorMessage; 31 | } 32 | 33 | public ConnectorException(String errorMessage, Throwable cause) { 34 | super(errorMessage, cause); 35 | this.errorMessage = errorMessage; 36 | } 37 | 38 | public ConnectorException(int errorCode, String errorMessage) { 39 | super(errorCode + "\n\n" + errorMessage); 40 | this.errorCode = errorCode; 41 | this.errorMessage = errorMessage; 42 | } 43 | 44 | public ConnectorException(int errorCode, String errorMessage, Throwable cause) { 45 | super(errorCode + "\n\n" + errorMessage, cause); 46 | this.errorCode = errorCode; 47 | this.errorMessage = errorMessage; 48 | } 49 | 50 | public int getErrorCode() { 51 | return this.errorCode; 52 | } 53 | 54 | public String getErrorMessage() { 55 | return this.errorMessage; 56 | } 57 | 58 | public void setErrorCode(int errorCode) { 59 | this.errorCode = errorCode; 60 | } 61 | 62 | public void setErrorMessage(String errorMessage) { 63 | this.errorMessage = errorMessage; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /tools/maven/suppressions.xml: -------------------------------------------------------------------------------- 1 | 2 | 20 | 21 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 34 | 37 | 38 | 39 | 42 | 43 | 46 | 47 | 50 | 53 | 54 | 57 | 58 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/util/ConnectionPool.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.util; 20 | 21 | import java.util.Map; 22 | import java.util.concurrent.ConcurrentHashMap; 23 | 24 | /** 25 | * A managed connection pool, the {@code ConnectionPool} can used as a static field 26 | * to shared across all threads. 27 | * 28 | * @param the pooled connection, such as {@code HTablePool} or {@code DruidDataSource} 29 | */ 30 | public class ConnectionPool { 31 | 32 | private final Map pools = new ConcurrentHashMap<>(); 33 | private final Map referenceCounts = new ConcurrentHashMap<>(); 34 | 35 | public synchronized boolean contains(String dataSourceName) { 36 | return pools.containsKey(dataSourceName); 37 | } 38 | 39 | public synchronized T get(String dataSourceName) { 40 | referenceCounts.put(dataSourceName, referenceCounts.get(dataSourceName) + 1); 41 | return pools.get(dataSourceName); 42 | } 43 | 44 | public synchronized T put(String dataSourceName, T dataSource) { 45 | referenceCounts.put(dataSourceName, 1); 46 | return pools.put(dataSourceName, dataSource); 47 | } 48 | 49 | public synchronized boolean remove(String dataSourceName) { 50 | Integer count = referenceCounts.get(dataSourceName); 51 | if (count == null) { 52 | // no connection existed, shouldn't close the pooled connection 53 | return false; 54 | } else if (count == 1) { 55 | referenceCounts.remove(dataSourceName); 56 | pools.remove(dataSourceName); 57 | // should close the pooled connection 58 | return true; 59 | } else { 60 | referenceCounts.put(dataSourceName, count - 1); 61 | // other thread is still using the connection, 62 | // shouldn't close the pooled connection 63 | return false; 64 | } 65 | } 66 | 67 | public synchronized int size() { 68 | return pools.size(); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/table/DatahubRowRecordResolver.java: -------------------------------------------------------------------------------- 1 | package com.alibaba.flink.connectors.datahub.table; 2 | 3 | import org.apache.flink.api.java.typeutils.RowTypeInfo; 4 | import org.apache.flink.types.Row; 5 | 6 | import com.alibaba.flink.connectors.datahub.datastream.sink.DatahubRecordResolver; 7 | import com.alibaba.flink.connectors.datahub.datastream.util.DatahubClientProvider; 8 | import com.aliyun.datahub.client.DatahubClient; 9 | import com.aliyun.datahub.client.model.Field; 10 | import com.aliyun.datahub.client.model.RecordEntry; 11 | import com.aliyun.datahub.client.model.RecordSchema; 12 | import com.aliyun.datahub.client.model.TupleRecordData; 13 | 14 | import static org.apache.flink.util.Preconditions.checkArgument; 15 | 16 | /** 17 | * Resolver to convert Flink Row into Datahub RecordEntry. 18 | */ 19 | public class DatahubRowRecordResolver implements DatahubRecordResolver { 20 | private final RowTypeInfo flinkRowTypeInfo; 21 | private final String project; 22 | private final String topic; 23 | private final String accessId; 24 | private final String accessKey; 25 | private final String endpoint; 26 | 27 | private transient RecordSchema recordSchema; 28 | 29 | public DatahubRowRecordResolver( 30 | RowTypeInfo flinkRowTypeInfo, 31 | String project, 32 | String topic, 33 | String accessId, 34 | String accessKey, 35 | String endpoint) { 36 | this.flinkRowTypeInfo = flinkRowTypeInfo; 37 | this.project = project; 38 | this.topic = topic; 39 | this.accessId = accessId; 40 | this.accessKey = accessKey; 41 | this.endpoint = endpoint; 42 | } 43 | 44 | @Override 45 | public void open() { 46 | DatahubClient client = new DatahubClientProvider(endpoint, accessId, accessKey).getClient(); 47 | recordSchema = client.getTopic(project, topic).getRecordSchema(); 48 | 49 | checkArgument(recordSchema.getFields().size() == flinkRowTypeInfo.getArity()); 50 | } 51 | 52 | @Override 53 | public RecordEntry getRecordEntry(Row row) { 54 | RecordEntry record = new RecordEntry(); 55 | TupleRecordData recordData = new TupleRecordData(recordSchema); 56 | 57 | for (int i = 0; i < recordSchema.getFields().size(); i++) { 58 | Field column = recordSchema.getField(i); 59 | Object columnData = row.getField(i); 60 | switch (column.getType()) { 61 | case BIGINT: 62 | case DECIMAL: 63 | case BOOLEAN: 64 | case DOUBLE: 65 | case TIMESTAMP: 66 | case STRING: 67 | recordData.setField(i, columnData); 68 | break; 69 | default: 70 | throw new RuntimeException( 71 | String.format("DatahubRowRecordResolver doesn't support type '%s' yet", columnData.getClass().getName())); 72 | } 73 | } 74 | record.setRecordData(recordData); 75 | return record; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sink/TupleRichOutputFormat.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.sink; 20 | 21 | import org.apache.flink.api.common.io.RichOutputFormat; 22 | import org.apache.flink.api.java.tuple.Tuple2; 23 | import org.apache.flink.metrics.Counter; 24 | import org.apache.flink.metrics.Meter; 25 | import org.apache.flink.types.Row; 26 | 27 | import com.alibaba.flink.connectors.common.MetricUtils; 28 | 29 | import java.io.IOException; 30 | 31 | /** 32 | * All OutputFormats should extends TupleRichOutputFormat to handle add and delete messages. For 33 | * example, like the RdsOutputFormat. 34 | */ 35 | public abstract class TupleRichOutputFormat extends RichOutputFormat> { 36 | private Meter sinkInTps; 37 | private Counter delCounter; 38 | 39 | @Override 40 | public void open(int taskNumber, int numTasks) throws IOException { 41 | sinkInTps = MetricUtils.registerSinkInTps(getRuntimeContext()); 42 | delCounter = getRuntimeContext().getMetricGroup().addGroup(MetricUtils.METRIC_GROUP_SINK) 43 | .counter("del"); 44 | } 45 | 46 | @Override 47 | public void writeRecord(Tuple2 cRow) throws IOException { 48 | sinkInTps.markEvent(); 49 | if (cRow.f0) { 50 | // Use Row.copy() method to avoid bug when setting object reuse mode. 51 | writeAddRecord(Row.copy(cRow.f1)); 52 | } else { 53 | writeDeleteRecord(Row.copy(cRow.f1)); 54 | delCounter.inc(); 55 | } 56 | } 57 | 58 | /** 59 | * Write Add record. 60 | * 61 | * @param row 62 | * @throws IOException 63 | */ 64 | public abstract void writeAddRecord(Row row) throws IOException; 65 | 66 | /** 67 | * Write Delete record. 68 | * 69 | * @param row 70 | * @throws IOException 71 | */ 72 | public abstract void writeDeleteRecord(Row row) throws IOException; 73 | 74 | /** 75 | * Get name of output format. 76 | * @return name 77 | */ 78 | public abstract String getName(); 79 | } 80 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/source/AbstractLocatableParallelSource.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.source; 20 | 21 | import org.apache.flink.api.common.io.InputFormat; 22 | import org.apache.flink.api.common.io.LocatableInputSplitAssigner; 23 | import org.apache.flink.core.io.InputSplit; 24 | import org.apache.flink.core.io.InputSplitAssigner; 25 | import org.apache.flink.core.io.LocatableInputSplit; 26 | 27 | import java.io.IOException; 28 | import java.io.Serializable; 29 | 30 | /** 31 | * Base class for locatable source, which will assign input split according to location of input splits. 32 | * @param 33 | * @param 34 | */ 35 | public abstract class AbstractLocatableParallelSource extends AbstractParallelSource { 36 | @Override 37 | public abstract LocatableInputSplit[] createInputSplitsForCurrentSubTask(int numberOfParallelSubTasks, int indexOfThisSubTask) 38 | throws IOException; 39 | 40 | @Override 41 | public InputFormat getFormat() { 42 | return new LocatableParallelSourceInputFormatWrapper<>(this); 43 | } 44 | 45 | /** 46 | * LocatableParallelSourceInputFormatWrapper. 47 | * @param 48 | */ 49 | protected static class LocatableParallelSourceInputFormatWrapper extends ParallelSourceInputFormatWrapper { 50 | 51 | public LocatableParallelSourceInputFormatWrapper(AbstractParallelSource source) { 52 | super(source); 53 | } 54 | 55 | @Override 56 | public InputSplitAssigner getInputSplitAssigner(InputSplit[] inputSplits) { 57 | LocatableInputSplit[] locatableInputSplits = new LocatableInputSplit[inputSplits.length]; 58 | for (int i = 0; i < inputSplits.length; i++) { 59 | locatableInputSplits[i] = (LocatableInputSplit) inputSplits[i]; 60 | } 61 | if (source.isParallelReadDisabled()) { 62 | return new LocatableInputSplitAssigner(locatableInputSplits); 63 | } 64 | return super.getInputSplitAssigner(inputSplits); 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/datastream/util/DatahubClientProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.datahub.datastream.util; 19 | 20 | import org.apache.flink.configuration.Configuration; 21 | 22 | import com.alibaba.flink.connectors.common.sts.AbstractClientProvider; 23 | import com.aliyun.datahub.client.DatahubClient; 24 | import com.aliyun.datahub.client.DatahubClientBuilder; 25 | import com.aliyun.datahub.client.auth.AliyunAccount; 26 | import com.aliyun.datahub.client.common.DatahubConfig; 27 | import org.slf4j.Logger; 28 | import org.slf4j.LoggerFactory; 29 | 30 | /** 31 | * DatahubClientProvider. 32 | */ 33 | public class DatahubClientProvider extends AbstractClientProvider { 34 | private static Logger logger = LoggerFactory.getLogger(DatahubClientProvider.class); 35 | private String endpoint; 36 | private boolean enablePb = false; 37 | 38 | public DatahubClientProvider(String endpoint, String accessId, String accessKey){ 39 | super(accessId, accessKey); 40 | this.endpoint = endpoint; 41 | } 42 | 43 | public DatahubClientProvider(String endpoint, 44 | Configuration properties){ 45 | super(properties); 46 | this.endpoint = endpoint; 47 | } 48 | 49 | @Override 50 | protected DatahubClient produceNormalClient(String accessId, String accessKey) { 51 | AliyunAccount account = new AliyunAccount(accessId, accessKey); 52 | DatahubConfig conf = new DatahubConfig(endpoint, account, enablePb); 53 | DatahubClient datahubClient = DatahubClientBuilder.newBuilder().setDatahubConfig(conf).build(); 54 | datahubClient.setUserAgent("blink-normal"); 55 | return datahubClient; 56 | } 57 | 58 | @Override 59 | protected DatahubClient produceStsClient(String accessId, String accessKey, String securityToken) { 60 | AliyunAccount account = new AliyunAccount(accessId, accessKey, securityToken); 61 | DatahubConfig conf = new DatahubConfig(endpoint, account, enablePb); 62 | DatahubClient datahubClient = DatahubClientBuilder.newBuilder().setDatahubConfig(conf).build(); 63 | datahubClient.setUserAgent("blink-sts"); 64 | return datahubClient; 65 | } 66 | 67 | @Override 68 | protected void closeClient() { } 69 | } 70 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/reader/RecordReader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.reader; 20 | 21 | import org.apache.flink.api.common.functions.RuntimeContext; 22 | import org.apache.flink.core.io.InputSplit; 23 | 24 | import com.alibaba.flink.connectors.common.source.WatermarkProvider; 25 | 26 | import java.io.IOException; 27 | import java.io.Serializable; 28 | 29 | /** 30 | * Created by sleepy on 16/1/11. 31 | * 32 | * @param the type parameter 33 | * @param the type parameter 34 | */ 35 | public interface RecordReader extends WatermarkProvider { 36 | /** 37 | * Open. 38 | * 39 | * @param split the split 40 | * @throws IOException the io exception 41 | */ 42 | void open(InputSplit split, RuntimeContext context) throws IOException; 43 | 44 | /** 45 | * Next boolean. 46 | * 47 | * @return the boolean 48 | * @throws IOException the io exception 49 | * @throws InterruptedException the interrupted exception 50 | */ 51 | boolean next() throws IOException, InterruptedException; 52 | 53 | /** 54 | * Gets message. 55 | * 56 | * @return the message 57 | */ 58 | OUT getMessage(); 59 | 60 | /** 61 | * Close. 62 | * 63 | * @throws IOException the io exception 64 | */ 65 | void close() throws IOException; 66 | 67 | /** 68 | * Seek. 69 | * 70 | * @param cursor the cursor 71 | * @throws IOException the io exception 72 | */ 73 | void seek(CURSOR cursor) throws IOException; 74 | 75 | /** 76 | * Gets progress. 77 | * 78 | * @return the progress 79 | * @throws IOException the io exception 80 | */ 81 | CURSOR getProgress() throws IOException; 82 | 83 | /** 84 | * Get message delay (millseconds). 85 | * 86 | * @return delay 87 | */ 88 | long getDelay(); 89 | 90 | /** 91 | * Get message delay (millseconds) from being fetched. 92 | * 93 | * @return delay 94 | */ 95 | long getFetchedDelay(); 96 | 97 | /** 98 | * Check if current record is heartbeat, heartbeat can only update watermark. 99 | * 100 | * @return 101 | */ 102 | boolean isHeartBeat(); 103 | } 104 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/metrics/SumAndCount.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.metrics; 20 | 21 | import org.apache.flink.annotation.VisibleForTesting; 22 | import org.apache.flink.metrics.Counter; 23 | import org.apache.flink.metrics.Gauge; 24 | import org.apache.flink.metrics.MetricGroup; 25 | import org.apache.flink.metrics.SimpleCounter; 26 | 27 | /** 28 | * SumAndCount Metric accumulates sum/count and recent avg of reported values. 29 | */ 30 | public class SumAndCount { 31 | private double sum; 32 | private Counter count; 33 | 34 | private static final long AVG_INTERVAL = 10_000L; 35 | private long currentAvgTime; 36 | private double currentAvg; 37 | private long nextIntervalKey; 38 | private double avgSum; 39 | private int avgCount; 40 | 41 | public SumAndCount(String name, MetricGroup metricGroup) { 42 | MetricGroup group = metricGroup.addGroup(name); 43 | count = group.counter("count"); 44 | group.gauge("sum", new Gauge() { 45 | @Override 46 | public Double getValue() { 47 | return sum; 48 | } 49 | }); 50 | group.gauge("avg", new Gauge() { 51 | @Override 52 | public Double getValue() { 53 | if (System.currentTimeMillis() - currentAvgTime > AVG_INTERVAL) { 54 | return 0.0; 55 | } 56 | return currentAvg; 57 | } 58 | }); 59 | } 60 | 61 | /** 62 | * Used only for testing purpose. Don't use in production! 63 | */ 64 | @VisibleForTesting 65 | public SumAndCount(String name) { 66 | sum = 0; 67 | count = new SimpleCounter(); 68 | } 69 | 70 | public void update(long value) { 71 | update(1, value); 72 | } 73 | 74 | public void update(long countUpdated, long value) { 75 | count.inc(countUpdated); 76 | sum += value; 77 | 78 | long now = System.currentTimeMillis(); 79 | if (now / AVG_INTERVAL > nextIntervalKey) { 80 | nextIntervalKey = now / AVG_INTERVAL; 81 | currentAvgTime = nextIntervalKey * AVG_INTERVAL; 82 | currentAvg = avgCount == 0 ? 0 : avgSum / avgCount; 83 | avgCount = 0; 84 | avgSum = 0; 85 | } 86 | avgCount++; 87 | avgSum += value; 88 | } 89 | 90 | public double getSum() { 91 | return sum; 92 | } 93 | 94 | public Counter getCounter() { 95 | return count; 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/table/DatahubDescriptorValidator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.datahub.table; 20 | 21 | import org.apache.flink.table.descriptors.ConnectorDescriptorValidator; 22 | import org.apache.flink.table.descriptors.DescriptorProperties; 23 | 24 | /** 25 | * Descriptor validator for Datahub. 26 | */ 27 | public class DatahubDescriptorValidator extends ConnectorDescriptorValidator { 28 | public static final String CONNECTOR_TYPE_VALUE_DATAHUB = "datahub"; 29 | 30 | public static final String CONNECTOR_PROJECT = "connector.project"; 31 | public static final String CONNECTOR_TOPIC = "connector.topic"; 32 | public static final String CONNECTOR_ACCESS_ID = "connector.access_id"; 33 | public static final String CONNECTOR_ACCESS_KEY = "connector.access_key"; 34 | public static final String CONNECTOR_ENDPOINT = "connector.endpoint"; 35 | 36 | public static final String CONNECTOR_BUFFER_SIZE = "connector.buffer_size"; 37 | public static final String CONNECTOR_BATCH_SIZE = "connector.batch_size"; 38 | public static final String CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS = "connector.batch_write_timeout_in_mills"; 39 | public static final String CONNECTOR_RETRY_TIMEOUT_IN_MILLS = "connector.retry_timeout_in_mills"; 40 | public static final String CONNECTOR_MAX_RETRY_TIMES = "connector.max_retry_times"; 41 | 42 | @Override 43 | public void validate(DescriptorProperties properties) { 44 | super.validate(properties); 45 | properties.validateValue(CONNECTOR_TYPE, getConnectorTypeValue(), false); 46 | properties.validateString(CONNECTOR_PROJECT, false, 1); 47 | properties.validateString(CONNECTOR_TOPIC, false, 1); 48 | properties.validateString(CONNECTOR_ACCESS_ID, false, 1); 49 | properties.validateString(CONNECTOR_ACCESS_KEY, false, 1); 50 | properties.validateString(CONNECTOR_ENDPOINT, false, 1); 51 | 52 | properties.validateInt(CONNECTOR_BUFFER_SIZE, true, 1); 53 | properties.validateInt(CONNECTOR_BATCH_SIZE, true, 1); 54 | properties.validateLong(CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS, true, 1); 55 | properties.validateInt(CONNECTOR_RETRY_TIMEOUT_IN_MILLS, true, 1); 56 | properties.validateInt(CONNECTOR_MAX_RETRY_TIMES, true, 1); 57 | } 58 | 59 | public String getConnectorTypeValue() { 60 | return CONNECTOR_TYPE_VALUE_DATAHUB; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/datastream/example/DatahubSourceFunctionExample.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.datahub.datastream.example; 19 | 20 | import org.apache.flink.api.common.functions.FlatMapFunction; 21 | import org.apache.flink.api.common.typeinfo.TypeHint; 22 | import org.apache.flink.api.java.tuple.Tuple2; 23 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 24 | 25 | import com.alibaba.flink.connectors.datahub.datastream.source.DatahubSourceFunction; 26 | import com.aliyun.datahub.client.model.RecordEntry; 27 | import com.aliyun.datahub.client.model.TupleRecordData; 28 | 29 | import java.io.Serializable; 30 | import java.util.List; 31 | 32 | /** 33 | * Example to show how to use DatahubSourceFunction and RecordConverter. 34 | */ 35 | public class DatahubSourceFunctionExample implements Serializable { 36 | private String endPoint = ""; 37 | private String projectName = ""; 38 | private String topicName = ""; 39 | private String accessId = ""; 40 | private String accessKey = ""; 41 | 42 | public void runExample() throws Exception { 43 | StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); 44 | env.setParallelism(1); 45 | DatahubSourceFunction datahubSource = 46 | new DatahubSourceFunction(endPoint, projectName, topicName, accessId, accessKey, 0, 47 | Long.MAX_VALUE, 1, 1, 1); 48 | env.addSource(datahubSource).flatMap( 49 | (FlatMapFunction, Tuple2>) (recordEntries, collector) -> { 50 | for (RecordEntry recordEntry : recordEntries) { 51 | collector.collect(getStringLongTuple2(recordEntry)); 52 | } 53 | }).returns(new TypeHint>() {}).print(); 54 | env.execute(); 55 | } 56 | 57 | private Tuple2 getStringLongTuple2(RecordEntry recordEntry) { 58 | Tuple2 tuple2 = new Tuple2<>(); 59 | TupleRecordData recordData = (TupleRecordData) (recordEntry.getRecordData()); 60 | tuple2.f0 = (String) recordData.getField(0); 61 | tuple2.f1 = (Long) recordData.getField(1); 62 | return tuple2; 63 | } 64 | 65 | public static void main(String[] args) throws Exception { 66 | DatahubSourceFunctionExample sourceFunctionExample = new DatahubSourceFunctionExample(); 67 | sourceFunctionExample.runExample(); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/conf/BlinkOptions.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.conf; 20 | 21 | import org.apache.flink.configuration.ConfigOption; 22 | 23 | import java.util.Arrays; 24 | import java.util.List; 25 | 26 | import static org.apache.flink.configuration.ConfigOptions.key; 27 | 28 | /** 29 | * BlinkOptions. 30 | */ 31 | public class BlinkOptions { 32 | /** 33 | * Aliyun STS options. 34 | */ 35 | public static class STS { 36 | // for sts endpoint, used for validation for all the connectors. 37 | public static final String INNER_STS_ENDPOINT = "__inner__blink_sts_endpoints__"; 38 | 39 | public static final ConfigOption STS_REGION_ID = key("stsRegionId".toLowerCase()) 40 | .defaultValue("cn-shanghai"); 41 | 42 | /** 当前 STS API 版本. */ 43 | public static final ConfigOption STS_API_VERSION = key("stsApiVersion".toLowerCase()) 44 | .defaultValue("2015-04-01"); 45 | 46 | public static final ConfigOption STS_ACCESS_ID = key("stsAccessId".toLowerCase()) 47 | .noDefaultValue(); 48 | 49 | public static final ConfigOption STS_ACCESS_KEY = key("stsAccesskey".toLowerCase()) 50 | .noDefaultValue(); 51 | 52 | public static final ConfigOption STS_ROLE_ARN = key("roleArn".toLowerCase()) 53 | .noDefaultValue(); 54 | 55 | public static final ConfigOption STS_UID = key("stsUid".toLowerCase()) 56 | .noDefaultValue(); 57 | public static final ConfigOption BRS_ENDPOINT = key("brsEndPoint".toLowerCase()) 58 | .noDefaultValue(); 59 | 60 | /** 默认86400s. */ 61 | public static final ConfigOption STS_ROLEARN_UPDATE_SECONDS = key("stsUpdateSeconds".toLowerCase()) 62 | .defaultValue(86400); 63 | 64 | public static final String STS_PARAMS_HELP_MSG = String.format( 65 | "required params:%s,%s,%s,%s\n\toptional params:%s,%s,%s", 66 | STS_ACCESS_ID, 67 | STS_ACCESS_KEY, 68 | STS_ROLE_ARN, 69 | STS_UID, 70 | STS_REGION_ID, 71 | STS_API_VERSION, 72 | STS_ROLEARN_UPDATE_SECONDS); 73 | 74 | public static final List SUPPORTED_KEYS = Arrays.asList( 75 | STS_REGION_ID.key(), 76 | STS_API_VERSION.key(), 77 | STS_ACCESS_ID.key(), 78 | STS_ACCESS_KEY.key(), 79 | STS_ROLE_ARN.key(), 80 | STS_UID.key(), 81 | BRS_ENDPOINT.key(), 82 | STS_ROLEARN_UPDATE_SECONDS.key(), 83 | INNER_STS_ENDPOINT); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/source/SlsClientProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.source; 20 | 21 | import org.apache.flink.configuration.Configuration; 22 | 23 | import com.alibaba.flink.connectors.common.sts.AbstractClientProvider; 24 | import com.aliyun.openservices.log.Client; 25 | import com.aliyun.openservices.log.common.Consts; 26 | 27 | import java.net.InetAddress; 28 | 29 | /** 30 | * Sls client factory. 31 | */ 32 | public class SlsClientProvider extends AbstractClientProvider { 33 | 34 | private String endPoint = null; 35 | private String consumerGroup = null; 36 | private boolean directMode = false; 37 | 38 | public SlsClientProvider( 39 | String endPoint, 40 | String accessKeyId, 41 | String accessKeySecret, 42 | String consumerGroup, 43 | boolean directMode) { 44 | super(accessKeyId, accessKeySecret); 45 | this.endPoint = endPoint; 46 | this.consumerGroup = consumerGroup; 47 | this.directMode = directMode; 48 | Consts.HTTP_SEND_TIME_OUT = 10 * 1000; 49 | } 50 | 51 | public SlsClientProvider( 52 | String endPoint, 53 | Configuration properties, 54 | String consumerGroup, 55 | boolean directMode) { 56 | super(properties); 57 | this.endPoint = endPoint; 58 | this.consumerGroup = consumerGroup; 59 | this.directMode = directMode; 60 | Consts.HTTP_SEND_TIME_OUT = 10 * 1000; 61 | } 62 | 63 | @Override 64 | protected void closeClient() { 65 | } 66 | 67 | @Override 68 | protected Client produceNormalClient(String accessId, String accessKey) { 69 | Client client = new Client(endPoint, accessId, accessKey); 70 | if (directMode){ 71 | client.EnableDirectMode(); 72 | } 73 | client.setUserAgent("Blink-ak" + "-" + consumerGroup + "-" + 74 | getHostName()); 75 | return client; 76 | } 77 | 78 | @Override 79 | protected Client produceStsClient(String accessId, String accessKey, String securityToken) { 80 | Client client = new Client(endPoint, accessId, accessKey); 81 | client.setUserAgent("Blink-sts" + "-" + consumerGroup + "-" + 82 | getHostName()); 83 | client.setSecurityToken(securityToken); 84 | return client; 85 | } 86 | 87 | private String getHostName() { 88 | String ip = ""; 89 | try { 90 | InetAddress addr = InetAddress.getLocalHost(); 91 | ip = addr.getHostAddress(); 92 | } catch (Exception e) { 93 | //ignore 94 | } 95 | return String.format("%s", ip); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /cloudhbase-connector/src/test/java/com/alibaba/flink/connectors/cloudhbase/sink/CloudHBaseSinkFunctionTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.cloudhbase.sink; 19 | 20 | import org.apache.hadoop.hbase.client.Mutation; 21 | import org.junit.Test; 22 | 23 | import static org.junit.Assert.assertEquals; 24 | import static org.junit.Assert.assertNull; 25 | import static org.junit.Assert.assertSame; 26 | import static org.mockito.Mockito.mock; 27 | 28 | /** 29 | * Test for {@link CloudHBaseSinkFunction}. 30 | */ 31 | public class CloudHBaseSinkFunctionTest { 32 | 33 | @Test 34 | public void testConstruct() { 35 | final String zkQuorum = "a,b,c"; 36 | final String zkZNodeParent = "/hbase"; 37 | final String tableName = "testTableName"; 38 | final int maxRetryNumber = 1; 39 | final int bufferSize = 2; 40 | final int batchSize = 3; 41 | final long flushInterval = 4; 42 | final CloudHBaseRecordResolver recordConverter = mock(CloudHBaseRecordResolver.class); 43 | 44 | CloudHBaseSinkFunction sinkFunction1 = new CloudHBaseSinkFunction<>(zkQuorum, tableName, recordConverter); 45 | 46 | assertEquals(zkQuorum, sinkFunction1.getZkQuorum()); 47 | assertNull(sinkFunction1.getZkZNodeParent()); 48 | assertEquals(tableName, sinkFunction1.getTableName()); 49 | assertEquals(CloudHBaseSinkFunction.DEFAULT_MAX_RETRY_NUMBER, sinkFunction1.getMaxRetryNumber()); 50 | assertEquals(CloudHBaseSinkFunction.DEFAULT_BATCH_SIZE, sinkFunction1.getBatchSize()); 51 | assertEquals(CloudHBaseSinkFunction.DEFAULT_BUFFER_SIZE, sinkFunction1.getBufferSize()); 52 | assertEquals(CloudHBaseSinkFunction.DEFAULT_FLUSH_INTERVAL, sinkFunction1.getFlushInterval()); 53 | assertSame(recordConverter, sinkFunction1.getCloudHBaserecordResolver()); 54 | 55 | CloudHBaseSinkFunction sinkFunction2 = new CloudHBaseSinkFunction<>( 56 | zkQuorum, zkZNodeParent, tableName, bufferSize, batchSize, flushInterval, maxRetryNumber, recordConverter); 57 | 58 | assertEquals(zkQuorum, sinkFunction2.getZkQuorum()); 59 | assertEquals(zkZNodeParent, sinkFunction2.getZkZNodeParent()); 60 | assertEquals(tableName, sinkFunction2.getTableName()); 61 | assertEquals(maxRetryNumber, sinkFunction2.getMaxRetryNumber()); 62 | assertEquals(batchSize, sinkFunction2.getBatchSize()); 63 | assertEquals(bufferSize, sinkFunction2.getBufferSize()); 64 | assertEquals(flushInterval, sinkFunction2.getFlushInterval()); 65 | assertSame(recordConverter, sinkFunction2.getCloudHBaserecordResolver()); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/util/DateUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.util; 20 | 21 | import org.apache.flink.api.java.tuple.Tuple2; 22 | 23 | import org.apache.commons.lang3.time.FastDateFormat; 24 | 25 | import javax.annotation.Nonnull; 26 | 27 | import java.sql.Date; 28 | import java.sql.Timestamp; 29 | import java.text.ParseException; 30 | import java.util.List; 31 | import java.util.TimeZone; 32 | import java.util.concurrent.ConcurrentHashMap; 33 | 34 | /** 35 | * DateUtil. 36 | */ 37 | public class DateUtil { 38 | private static FastDateFormat dfTimeStamp = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss"); 39 | private static ConcurrentHashMap sdfCache = new ConcurrentHashMap<>(); 40 | 41 | private static FastDateFormat getDateFormat(String timeZone, String format){ 42 | String key = String.valueOf(timeZone) + String.valueOf(format); 43 | if (null == timeZone || timeZone.isEmpty()){ 44 | return dfTimeStamp; 45 | } 46 | if (sdfCache.containsKey(key)){ 47 | return sdfCache.get(key); 48 | } else { 49 | FastDateFormat sdf = FastDateFormat.getInstance(format, TimeZone.getTimeZone(timeZone)); 50 | sdfCache.put(key, sdf); 51 | return sdf; 52 | } 53 | } 54 | 55 | public static String timeStamp2String(Timestamp value, String timeZone) { 56 | String res; 57 | if (null == timeZone || timeZone.isEmpty()) { 58 | res = dfTimeStamp.format(value); 59 | return res; 60 | } else { 61 | return timeStamp2String(value, timeZone, "yyyy-MM-dd HH:mm:ss"); 62 | } 63 | } 64 | 65 | public static String timeStamp2String(Timestamp value, String timeZone, @Nonnull String format) { 66 | FastDateFormat sdf = getDateFormat(timeZone, format); 67 | return sdf.format(value); 68 | } 69 | 70 | public static String date2String(Date value, String timeZone) { 71 | return date2String(value, timeZone, "yyyy-MM-dd"); 72 | } 73 | 74 | public static String date2String(Date value, String timeZone, @Nonnull String format) { 75 | FastDateFormat sdf = getDateFormat(timeZone, format); 76 | return sdf.format(value); 77 | } 78 | 79 | public static Long parseDateString( 80 | String formatString, 81 | String dateString, 82 | String timeZone) throws ParseException { 83 | FastDateFormat simpleDateFormat = getDateFormat(timeZone, formatString); 84 | return simpleDateFormat.parse(dateString).getTime(); 85 | } 86 | 87 | public static boolean isTimeInRange(List> rangeList, long time) { 88 | for (Tuple2 range : rangeList) { 89 | if (range.f0 <= time && time <= range.f1) { 90 | return true; 91 | } 92 | } 93 | return false; 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sts/StsServiceRequest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.common.sts; 19 | 20 | import org.apache.flink.configuration.Configuration; 21 | 22 | import com.alibaba.flink.connectors.common.conf.BlinkOptions; 23 | import com.aliyuncs.DefaultAcsClient; 24 | import com.aliyuncs.http.MethodType; 25 | import com.aliyuncs.http.X509TrustAll; 26 | import com.aliyuncs.profile.DefaultProfile; 27 | import com.aliyuncs.profile.IClientProfile; 28 | import com.aliyuncs.sts.model.v20150401.AssumeRoleRequest; 29 | import com.aliyuncs.sts.model.v20150401.AssumeRoleResponse; 30 | 31 | import static com.alibaba.flink.connectors.common.sts.StsConstants.DURATION; 32 | import static com.alibaba.flink.connectors.common.sts.StsConstants.PROTOCOL_TYPE; 33 | 34 | /** 35 | * StsServiceRequest. 36 | */ 37 | public class StsServiceRequest { 38 | private static final String INNER_STS_ENDPOINT = "__inner__blink_sts_endpoints__"; 39 | 40 | public static AssumeRoleResponse assumeRoleWithServiceIdentity( 41 | final String streamAccessId, final String streamAccessKey, 42 | final String roleArn, final String roleSessionName, 43 | final String assumeRoleFor, 44 | Configuration properties) throws Exception { 45 | //decode 46 | String decodeKey = DecodeUtil.decrypt(streamAccessKey, StsConstants.STS_SECRET_KEY); 47 | 48 | String regionId = properties.getString(BlinkOptions.STS.STS_REGION_ID); 49 | 50 | // 创建一个 Aliyun Acs Client, 用于发起 OpenAPI 请求 51 | IClientProfile profile = DefaultProfile.getProfile( 52 | regionId, streamAccessId, decodeKey); 53 | DefaultAcsClient client = new DefaultAcsClient(profile); 54 | 55 | // endPoints format: endPointName#regionId#product#domain,endPointName1#regionId1#product1#domain1 56 | if (properties.containsKey(INNER_STS_ENDPOINT) && properties.getString(INNER_STS_ENDPOINT, null) != null){ 57 | String endPoints = properties.toMap().get(INNER_STS_ENDPOINT); 58 | if (!endPoints.isEmpty()) { 59 | String[] endPointItem = endPoints.split(","); 60 | for (String item : endPointItem) { 61 | String[] partItems = item.split("#"); 62 | if (null != partItems && partItems.length == 4) { 63 | DefaultProfile.addEndpoint(partItems[0], partItems[1], partItems[2], partItems[3]); 64 | } 65 | } 66 | } 67 | } 68 | 69 | // 创建一个 AssumeRoleRequest 并设置请求参数 70 | final AssumeRoleRequest request = new AssumeRoleRequest(); 71 | request.setMethod(MethodType.POST); 72 | 73 | request.setProtocol(PROTOCOL_TYPE); 74 | request.setDurationSeconds(DURATION); 75 | request.setRoleArn(roleArn); 76 | request.setRoleSessionName(roleSessionName); 77 | // request.setAssumeRoleFor(assumeRoleFor); 78 | X509TrustAll.ignoreSSLCertificate(); 79 | // 发起请求,并得到response 80 | final AssumeRoleResponse response = client.getAcsResponse(request); 81 | 82 | return response; 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /cloudhbase-connector/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 21 | 22 | 4.0.0 23 | 24 | 25 | com.alibaba.flink 26 | aliyun-flink-connectors-parent 27 | 0.1-SNAPSHOT 28 | .. 29 | 30 | 31 | cloudhbase-connector 32 | 33 | 34 | 35 | com.alibaba.flink 36 | aliyun-connectors-common 37 | ${project.version} 38 | 39 | 40 | org.apache.flink 41 | flink-core 42 | ${flink.version} 43 | provided 44 | 45 | 46 | org.apache.flink 47 | flink-streaming-java_2.11 48 | ${flink.version} 49 | provided 50 | 51 | 52 | org.apache.flink 53 | flink-shaded-hadoop2 54 | 2.8.3-1.8.2 55 | provided 56 | 57 | 58 | com.aliyun.hbase 59 | alihbase-client 60 | 1.1.3 61 | 62 | 63 | jdk.tools 64 | jdk.tools 65 | 66 | 67 | 68 | asm 69 | asm 70 | 71 | 72 | org.apache.hadoop 73 | hadoop-auth 74 | 75 | 76 | org.apache.hadoop 77 | hadoop-common 78 | 79 | 80 | org.apache.hadoop 81 | hadoop-mapreduce-client-core 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/datastream/example/DatahubSinkFunctionExample.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.datahub.datastream.example; 19 | 20 | import org.apache.flink.api.common.functions.MapFunction; 21 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 22 | 23 | import com.alibaba.flink.connectors.datahub.datastream.sink.DatahubRecordResolver; 24 | import com.alibaba.flink.connectors.datahub.datastream.sink.DatahubSinkFunction; 25 | import com.aliyun.datahub.client.model.Field; 26 | import com.aliyun.datahub.client.model.FieldType; 27 | import com.aliyun.datahub.client.model.RecordData; 28 | import com.aliyun.datahub.client.model.RecordEntry; 29 | import com.aliyun.datahub.client.model.RecordSchema; 30 | import com.aliyun.datahub.client.model.TupleRecordData; 31 | 32 | import java.io.Serializable; 33 | 34 | /** 35 | * Example to show how to use DatahubSinkFunction and RecordConverter. 36 | */ 37 | public class DatahubSinkFunctionExample implements Serializable { 38 | private String endPoint = ""; 39 | private String projectName = ""; 40 | private String topicName = ""; 41 | private String accessId = ""; 42 | private String accessKey = ""; 43 | 44 | public void useDefaultRecordConverter() throws Exception { 45 | StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); 46 | env.generateSequence(0, 100) 47 | .map((MapFunction) aLong -> getRecordEntry(aLong, "default:")) 48 | .addSink(new DatahubSinkFunction<>(endPoint, projectName, topicName, accessId, accessKey, 49 | DatahubRecordResolver.NOOP_DATAHUB_RECORD_RESOLVER)); 50 | env.execute(); 51 | } 52 | 53 | public void useCustomRecordConverter() throws Exception { 54 | StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); 55 | DatahubRecordResolver recordResolver = message -> getRecordEntry(message, "custom:"); 56 | env.generateSequence(0, 100).addSink(new DatahubSinkFunction<>(endPoint, projectName, topicName, accessId, 57 | accessKey, recordResolver)); 58 | env.execute(); 59 | } 60 | 61 | private RecordEntry getRecordEntry(Long message, String s) { 62 | RecordSchema recordSchema = new RecordSchema(); 63 | recordSchema.addField(new Field("type", FieldType.STRING)); 64 | recordSchema.addField(new Field("value", FieldType.BIGINT)); 65 | RecordEntry recordEntry = new RecordEntry(); 66 | RecordData recordData = new TupleRecordData(recordSchema); 67 | ((TupleRecordData) recordData).setField(0, s + message); 68 | ((TupleRecordData) recordData).setField(1, message); 69 | recordEntry.setRecordData(recordData); 70 | return recordEntry; 71 | } 72 | 73 | public static void main(String[] args) throws Exception { 74 | DatahubSinkFunctionExample sinkFunctionExample = new DatahubSinkFunctionExample(); 75 | sinkFunctionExample.useDefaultRecordConverter(); 76 | sinkFunctionExample.useCustomRecordConverter(); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/util/BlinkStringUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.util; 20 | 21 | import org.apache.commons.lang3.StringUtils; 22 | 23 | /** 24 | * A simple StringUtil for blink-connectors based on org.apache.commons.lang3.StringUtils. 25 | */ 26 | public class BlinkStringUtil { 27 | 28 | public static boolean isEmpty(String... strs) { 29 | for (String str : strs) { 30 | if (StringUtils.isEmpty(str)) { 31 | return true; 32 | } 33 | } 34 | return false; 35 | } 36 | 37 | public static boolean isNotEmpty(String... strs) { 38 | return !isEmpty(strs); 39 | } 40 | 41 | public static String[] splitPreserveAllTokens(String src, String delimiter) { 42 | if (src == null) { 43 | return null; 44 | } 45 | if (delimiter == null) { 46 | return new String[]{src}; 47 | } 48 | if (delimiter.length() == 1) { 49 | return StringUtils.splitPreserveAllTokens(src, delimiter.charAt(0)); 50 | } else { 51 | return StringUtils.splitPreserveAllTokens(src, delimiter); 52 | } 53 | } 54 | 55 | public static String[] split(String src, String delimiter) { 56 | return StringUtils.split(src, delimiter); 57 | } 58 | 59 | public static String join(String[] src) { 60 | return join(src, ","); 61 | } 62 | 63 | public static String join(String[] src, String delimiter) { 64 | return StringUtils.join(src, delimiter); 65 | } 66 | 67 | /** 68 | * Checks whether the given string is null, empty, or contains only whitespace/delimiter character. 69 | */ 70 | public static boolean isBlank(String str, String delimiter) { 71 | if (str == null || str.length() == 0) { 72 | return true; 73 | } 74 | if (null == delimiter) { 75 | return StringUtils.isBlank(str); 76 | } 77 | if (delimiter.length() == 1) { 78 | char dChar = delimiter.charAt(0); 79 | final int len = str.length(); 80 | for (int i = 0; i < len; i++) { 81 | if (!Character.isWhitespace(str.charAt(i)) && dChar != str.charAt(i)) { 82 | return false; 83 | } 84 | } 85 | } else { 86 | String[] array = StringUtils.split(str, delimiter); 87 | for (String s : array) { 88 | if (!StringUtils.isBlank(s)) { 89 | return false; 90 | } 91 | } 92 | } 93 | return true; 94 | } 95 | 96 | public static boolean isEmptyKey(Object key) { 97 | if (key == null) { 98 | return true; 99 | } 100 | String val = String.valueOf(key); 101 | if (StringUtils.isBlank(val)) { 102 | return true; 103 | } 104 | return false; 105 | } 106 | 107 | /** 108 | * Return the first candidate which is not a blank string, otherwise return null. 109 | * @param candidates 110 | */ 111 | public static String coalesce(String... candidates) { 112 | if (null != candidates && candidates.length > 0) { 113 | for (String c : candidates) { 114 | if (!StringUtils.isBlank(c)) { 115 | return c; 116 | } 117 | } 118 | } 119 | return null; 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/reader/SequenceReader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.reader; 20 | 21 | import org.apache.flink.api.common.functions.RuntimeContext; 22 | import org.apache.flink.configuration.Configuration; 23 | import org.apache.flink.core.io.InputSplit; 24 | import org.apache.flink.metrics.Counter; 25 | import org.apache.flink.metrics.Meter; 26 | import org.apache.flink.metrics.MeterView; 27 | import org.apache.flink.metrics.SimpleCounter; 28 | import org.apache.flink.runtime.jobgraph.tasks.InputSplitProvider; 29 | import org.apache.flink.runtime.jobgraph.tasks.InputSplitProviderException; 30 | import org.apache.flink.streaming.api.functions.source.SourceFunction; 31 | 32 | import com.alibaba.flink.connectors.common.MetricUtils; 33 | import com.alibaba.flink.connectors.common.source.AbstractParallelSourceBase; 34 | 35 | import java.io.IOException; 36 | 37 | /** 38 | * SequenceReader is used by {@link AbstractParallelSourceBase} which requests input split dynamically after finish current 39 | * split. 40 | * @param Output type of the {@link AbstractParallelSourceBase} 41 | */ 42 | public class SequenceReader { 43 | 44 | private InputSplitProvider inputSplitProvider; 45 | private AbstractParallelSourceBase sourceFunction; 46 | private Configuration config; 47 | private volatile boolean isStop = false; 48 | private Counter outputCounter; 49 | private Meter tpsMetric; 50 | 51 | public SequenceReader(AbstractParallelSourceBase source, InputSplitProvider provider, Configuration config) { 52 | this.sourceFunction = source; 53 | this.inputSplitProvider = provider; 54 | this.config = config; 55 | RuntimeContext context = source.getRuntimeContext(); 56 | outputCounter = context.getMetricGroup().counter(MetricUtils.METRICS_TPS + "_counter", new SimpleCounter()); 57 | tpsMetric = context.getMetricGroup().meter(MetricUtils.METRICS_TPS, new MeterView(outputCounter, 60)); 58 | } 59 | 60 | public void run(SourceFunction.SourceContext ctx) throws InputSplitProviderException, IOException, InterruptedException { 61 | 62 | InputSplit inputSplit; 63 | inputSplit = inputSplitProvider.getNextInputSplit(sourceFunction.getRuntimeContext().getUserCodeClassLoader()); 64 | while (!isStop && inputSplit != null) { 65 | RecordReader recordReader = sourceFunction.createReader(config); 66 | try { 67 | recordReader.open(inputSplit, sourceFunction.getRuntimeContext()); 68 | while (!isStop && recordReader.next()) { 69 | if (recordReader.isHeartBeat()) { 70 | continue; 71 | } 72 | synchronized (ctx.getCheckpointLock()) { 73 | tpsMetric.markEvent(); 74 | ctx.collect(recordReader.getMessage()); 75 | } 76 | } 77 | } finally { 78 | recordReader.close(); 79 | } 80 | inputSplit = inputSplitProvider.getNextInputSplit(sourceFunction.getRuntimeContext().getUserCodeClassLoader()); 81 | } 82 | } 83 | 84 | /** 85 | * Stop externally. 86 | */ 87 | public void stop() { 88 | isStop = true; 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/reader/AbstractPartitionNumsListener.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.reader; 20 | 21 | import com.alibaba.flink.connectors.common.util.ConnectionPool; 22 | import org.slf4j.Logger; 23 | import org.slf4j.LoggerFactory; 24 | 25 | import java.io.Serializable; 26 | import java.util.Random; 27 | import java.util.Timer; 28 | import java.util.TimerTask; 29 | 30 | /** 31 | * Base class to monitor source partition change. 32 | */ 33 | public abstract class AbstractPartitionNumsListener implements Serializable{ 34 | private static Logger logger = LoggerFactory.getLogger(AbstractPartitionNumsListener.class); 35 | private boolean partitionChanged = false; 36 | protected int initPartitionCount = -1; 37 | private transient Timer listener; 38 | private static ConnectionPool timerPool = new ConnectionPool<>(); 39 | 40 | public void initPartitionNumsListener(){ 41 | scheduleListener(); 42 | } 43 | 44 | public abstract int getPartitionsNums(); 45 | 46 | public abstract String getReaderName(); 47 | 48 | /** 49 | * Start flusher that will listen shards change. 50 | */ 51 | private void scheduleListener() { 52 | synchronized (AbstractPartitionNumsListener.class) { 53 | if (timerPool.contains(getReaderName())) { 54 | listener = timerPool.get(getReaderName()); 55 | } else { 56 | listener = new Timer("Partition Change " + getReaderName() + "-Listener"); 57 | listener.schedule(new TimerTask() { 58 | @Override 59 | public void run() { 60 | try { 61 | // using random to reduce pressure to server 62 | Thread.sleep(new Random().nextInt(300) * 1000); 63 | int partitionsNums = getPartitionsNums(); 64 | partitionNumsChangeListener(partitionsNums, initPartitionCount); 65 | } catch (Throwable e) { 66 | logger.error("Get partition of " + getReaderName() + " Error", e); 67 | } 68 | } 69 | }, 60000, 5 * 60000); 70 | timerPool.put(getReaderName(), listener); 71 | } 72 | } 73 | } 74 | 75 | protected void triggerPartitionNumFailOver() { 76 | partitionChanged = true; 77 | logger.error(String.format("shards number of the logStore[%s] has changed, pls adjust" + 78 | " source parallelism configuration and then do restart.", getReaderName() 79 | )); 80 | } 81 | 82 | protected void partitionNumsChangeListener(int newPartitionsCount, int initPartitionCount) { 83 | if (newPartitionsCount != initPartitionCount) { 84 | triggerPartitionNumFailOver(); 85 | } 86 | } 87 | 88 | public AbstractPartitionNumsListener setInitPartitionCount(int initPartitionCount) { 89 | this.initPartitionCount = initPartitionCount; 90 | return this; 91 | } 92 | 93 | public int getInitPartitionCount() { 94 | return initPartitionCount; 95 | } 96 | 97 | public boolean isPartitionChanged() { 98 | return partitionChanged; 99 | } 100 | 101 | public void destroyPartitionNumsListener() { 102 | synchronized (AbstractPartitionNumsListener.class) { 103 | if (timerPool.remove(getReaderName()) && null != listener) { 104 | listener.cancel(); 105 | listener = null; 106 | } 107 | } 108 | } 109 | 110 | } 111 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/source/AbstractParallelSource.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.source; 20 | 21 | import org.apache.flink.api.java.tuple.Tuple2; 22 | import org.apache.flink.core.io.InputSplit; 23 | import org.apache.flink.streaming.api.checkpoint.ListCheckpointed; 24 | 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | 28 | import java.io.Serializable; 29 | import java.util.HashSet; 30 | import java.util.LinkedList; 31 | import java.util.List; 32 | import java.util.Map; 33 | import java.util.Set; 34 | 35 | /** 36 | * ParallelSource. 37 | * @param 38 | * @param 39 | */ 40 | public abstract class AbstractParallelSource 41 | extends AbstractParallelSourceBase 42 | implements ListCheckpointed> { 43 | private static final Logger LOG = LoggerFactory.getLogger(AbstractParallelSource.class); 44 | private static final long serialVersionUID = -7848357196819780804L; 45 | 46 | /** 47 | * Gets the current state of the function of operator. The state must reflect the result of all 48 | * prior invocations to this function. 49 | * 50 | * @param checkpointId The ID of the checkpoint. 51 | * @param timestamp Timestamp of the checkpoint. 52 | * @return The operator state in a list of redistributable, atomic sub-states. 53 | * Should not return null, but empty list instead. 54 | * @throws Exception Thrown if the creation of the state object failed. This causes the 55 | * checkpoint to fail. The system may decide to fail the operation (and trigger 56 | * recovery), or to discard this checkpoint attempt and to continue running 57 | * and to try again with the next checkpoint attempt. 58 | */ 59 | public List> snapshotState(long checkpointId, long timestamp) throws Exception { 60 | 61 | List> state = new LinkedList<>(); 62 | if (disableParallelRead) { 63 | return state; 64 | } 65 | 66 | // partition with progress 67 | Set partitionWithState = new HashSet<>(); 68 | for (Map.Entry entry : parallelReader.getProgress().getProgress().entrySet()) { 69 | state.add(Tuple2.of(entry.getKey(), entry.getValue())); 70 | partitionWithState.add(entry.getKey()); 71 | } 72 | 73 | // partition without progress 74 | for (Tuple2 entry: initialProgress) { 75 | if (!partitionWithState.contains(entry.f0)) { 76 | state.add(entry); 77 | } 78 | } 79 | 80 | return state; 81 | } 82 | 83 | /** 84 | * Restores the state of the function or operator to that of a previous checkpoint. 85 | * This method is invoked when a function is executed as part of a recovery run. 86 | *

Note that restoreState() is called before open().

87 | * 88 | * @param state The state to be restored as a list of atomic sub-states. 89 | */ 90 | public void restoreState(List> state) throws Exception { 91 | LOG.info("Restoring state: {}", state); 92 | // mark restore state 93 | recoryFromState = true; 94 | if (state != null && !state.isEmpty()) { 95 | this.initialProgress = state; 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/sink/LogProducerProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.sls.datastream.sink; 19 | 20 | import org.apache.flink.configuration.Configuration; 21 | 22 | import com.alibaba.flink.connectors.common.sts.AbstractClientProvider; 23 | import com.aliyun.openservices.aliyun.log.producer.LogProducer; 24 | import com.aliyun.openservices.aliyun.log.producer.ProducerConfig; 25 | import com.aliyun.openservices.aliyun.log.producer.ProjectConfig; 26 | import com.aliyun.openservices.aliyun.log.producer.ProjectConfigs; 27 | import com.aliyun.openservices.aliyun.log.producer.errors.ProducerException; 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | /** 32 | * LogProducer factory. 33 | */ 34 | public class LogProducerProvider extends AbstractClientProvider { 35 | private static final Logger LOGGER = LoggerFactory.getLogger(AbstractClientProvider.class); 36 | private transient ProducerConfig producerConfig; 37 | private String endPoint; 38 | private String projectName; 39 | private int maxRetryTimes; 40 | private int flushInterval; 41 | 42 | public LogProducerProvider( 43 | String projectName, String endPoint, String accessId, String accessKey, int maxRetryTimes, int flushInterval) { 44 | super(accessId, accessKey); 45 | this.projectName = projectName; 46 | this.endPoint = endPoint; 47 | this.maxRetryTimes = maxRetryTimes; 48 | this.flushInterval = flushInterval; 49 | } 50 | 51 | public LogProducerProvider(String projectName, String endPoint, Configuration properties, int maxRetryTimes, int flushInterval) { 52 | super(properties); 53 | this.projectName = projectName; 54 | this.endPoint = endPoint; 55 | this.maxRetryTimes = maxRetryTimes; 56 | this.flushInterval = flushInterval; 57 | } 58 | 59 | @Override 60 | protected void closeClient() { 61 | if (client != null) { 62 | // close the producer. 63 | while (true) { 64 | try { 65 | client.close(); 66 | break; 67 | } catch (InterruptedException e) { 68 | // ignore interrupt signal to avoid io thread leaking. 69 | } catch (ProducerException e) { 70 | LOGGER.warn("Exception caught when closing client", e); 71 | break; 72 | } 73 | } 74 | client = null; 75 | } 76 | } 77 | 78 | @Override 79 | protected LogProducer produceNormalClient(String accessId, String accessKey) { 80 | ProjectConfigs projectConfigs = new ProjectConfigs(); 81 | ProjectConfig projectConfig = new ProjectConfig(this.projectName, this.endPoint, accessId, accessKey); 82 | projectConfigs.put(projectConfig); 83 | producerConfig = new ProducerConfig(projectConfigs); 84 | producerConfig.setLingerMs(flushInterval); 85 | producerConfig.setRetries(maxRetryTimes); 86 | LogProducer producer = new LogProducer(producerConfig); 87 | return producer; 88 | } 89 | 90 | @Override 91 | protected LogProducer produceStsClient(String accessId, String accessKey, String securityToken) { 92 | ProjectConfigs projectConfigs = new ProjectConfigs(); 93 | ProjectConfig projectConfig = new ProjectConfig(this.projectName, this.endPoint, accessId, accessKey, securityToken); 94 | projectConfigs.put(projectConfig); 95 | producerConfig = new ProducerConfig(projectConfigs); 96 | producerConfig.setLingerMs(flushInterval); 97 | producerConfig.setRetries(maxRetryTimes); 98 | LogProducer producer = new LogProducer(producerConfig); 99 | return producer; 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /sls-shaded-sdk/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 21 | 22 | com.alibaba.flink 23 | aliyun-flink-connectors-parent 24 | 0.1-SNAPSHOT 25 | .. 26 | 27 | 4.0.0 28 | 29 | com.alibaba.flink 30 | sls-shaded-sdk 31 | 32 | 33 | 34 | com.aliyun.openservices 35 | aliyun-log-producer 36 | 0.1.0 37 | 38 | 39 | com.aliyun.openservices 40 | aliyun-log 41 | 0.6.57 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | org.apache.maven.plugins 50 | maven-shade-plugin 51 | 52 | 53 | 54 | shade-flink 55 | 56 | 57 | 58 | 59 | org.slf4j:slf4j-log4j12 60 | log4j:log4j 61 | 62 | org.slf4j:slf4j-api 63 | com.google.code.findbugs:jsr305 64 | 65 | org.apache.flink:flink-shaded-* 66 | com.fasterxml.jackson.*:* 67 | tv.cntt:* 68 | io.netty:* 69 | org.ow2.asm:* 70 | 71 | commons-logging:commons-logging 72 | commons-codec:commons-codec 73 | commons-collections:commons-collections 74 | org.apache.commons:commons-lang3 75 | 76 | org.apache.flink:* 77 | 78 | 79 | 80 | 81 | com.google.common 82 | com.alibaba.flink.sls.shaded.com.google.common 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | org.apache.maven.plugins 92 | maven-jar-plugin 93 | true 94 | 95 | 96 | 97 | true 98 | true 99 | 100 | 101 | 102 | 103 | 104 | 105 | test-jar 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sink/OutputFormatSinkFunction.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.sink; 20 | 21 | import org.apache.flink.api.common.io.OutputFormat; 22 | import org.apache.flink.api.common.io.RichOutputFormat; 23 | import org.apache.flink.configuration.Configuration; 24 | import org.apache.flink.streaming.api.checkpoint.ListCheckpointed; 25 | import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; 26 | 27 | import org.slf4j.Logger; 28 | import org.slf4j.LoggerFactory; 29 | 30 | import java.io.IOException; 31 | import java.util.List; 32 | 33 | /** 34 | * Created by sleepy on 15/12/14. 35 | * Use OutputFormat in DataStream 36 | */ 37 | public class OutputFormatSinkFunction extends RichSinkFunction 38 | implements ListCheckpointed { 39 | private static final Logger LOG = LoggerFactory.getLogger(OutputFormatSinkFunction.class); 40 | private static final long RETRY_INTERVAL = 100; 41 | private OutputFormat outputFormat; 42 | private long retryTimeout = 30 * 60 * 1000; // half an hour 43 | 44 | public OutputFormatSinkFunction(OutputFormat outputFormat) { 45 | this.outputFormat = outputFormat; 46 | } 47 | 48 | @Override 49 | public void open(Configuration config) throws IOException { 50 | if (RichOutputFormat.class.isAssignableFrom(outputFormat.getClass())) { 51 | ((RichOutputFormat) outputFormat).setRuntimeContext(getRuntimeContext()); 52 | } 53 | outputFormat.configure(config); 54 | outputFormat.open( 55 | getRuntimeContext().getIndexOfThisSubtask(), 56 | getRuntimeContext().getNumberOfParallelSubtasks()); 57 | if (outputFormat instanceof HasRetryTimeout) { 58 | retryTimeout = ((HasRetryTimeout) outputFormat).getRetryTimeout(); 59 | } 60 | LOG.info( 61 | "Initialized OutputFormatSinkFunction of {}/{} task.", 62 | getRuntimeContext().getIndexOfThisSubtask(), 63 | getRuntimeContext().getNumberOfParallelSubtasks()); 64 | } 65 | 66 | @Override 67 | public void close() throws IOException { 68 | LOG.info("Closing OutputFormatSinkFunction."); 69 | outputFormat.close(); 70 | } 71 | 72 | @Override 73 | public void invoke(RECORD record) throws Exception { 74 | outputFormat.writeRecord(record); 75 | } 76 | 77 | public OutputFormat getOutputFormat() { 78 | return outputFormat; 79 | } 80 | 81 | /* 82 | * If OutputFormat implements Syncable, will invoke sync() when doing checkpoint 83 | */ 84 | @Override 85 | public List snapshotState(long checkpointId, long checkpointTimestamp) throws Exception { 86 | if (outputFormat instanceof Syncable) { 87 | long startSyncing = System.currentTimeMillis(); 88 | // Retry until successful 89 | while (true) { 90 | try { 91 | ((Syncable) outputFormat).sync(); 92 | break; 93 | } catch (IOException e) { 94 | LOG.error("Sync output format failed", e); 95 | try { 96 | Thread.sleep(RETRY_INTERVAL); 97 | } catch (InterruptedException e1) { 98 | // throw new RuntimeException(e1); 99 | } 100 | } 101 | 102 | long retryingTimeCost = System.currentTimeMillis() - startSyncing; 103 | if (retryingTimeCost > retryTimeout) { 104 | throw new IOException(String.format("Retry time exceed timeout Error: %s, %s", retryingTimeCost, retryTimeout)); 105 | } 106 | } 107 | } 108 | return null; 109 | } 110 | 111 | @Override 112 | public void restoreState(List state) throws Exception { 113 | 114 | } 115 | 116 | @Override 117 | public String toString() { 118 | return getClass().getSimpleName() + ":" + outputFormat.toString(); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /datahub-connector/src/test/java/com/alibaba/flink/connectors/datahub/table/DatahubTableFactoryTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.datahub.table; 20 | 21 | import org.apache.flink.table.factories.TableFactoryService; 22 | import org.apache.flink.table.factories.TableSinkFactory; 23 | import org.apache.flink.table.sinks.TableSink; 24 | 25 | import org.junit.Test; 26 | 27 | import java.util.HashMap; 28 | import java.util.Map; 29 | 30 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_ACCESS_ID; 31 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_ACCESS_KEY; 32 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BATCH_SIZE; 33 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS; 34 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BUFFER_SIZE; 35 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_ENDPOINT; 36 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_MAX_RETRY_TIMES; 37 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_PROJECT; 38 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_RETRY_TIMEOUT_IN_MILLS; 39 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_TOPIC; 40 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_TYPE_VALUE_DATAHUB; 41 | import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_PROPERTY_VERSION; 42 | import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_TYPE; 43 | import static org.junit.Assert.assertTrue; 44 | 45 | /** 46 | * Test for {@link DatahubTableFactory}. 47 | */ 48 | public class DatahubTableFactoryTest { 49 | @Test 50 | public void testRequiredProperties() { 51 | Map properties = getBasicProperties(); 52 | 53 | final TableSink actual = TableFactoryService.find(TableSinkFactory.class, properties) 54 | .createTableSink(properties); 55 | 56 | assertTrue(actual instanceof DatahubTableSink); 57 | } 58 | 59 | @Test 60 | public void testSupportedProperties() { 61 | Map properties = getBasicProperties(); 62 | 63 | properties.put(CONNECTOR_BATCH_SIZE, "1"); 64 | properties.put(CONNECTOR_BUFFER_SIZE, "1"); 65 | properties.put(CONNECTOR_RETRY_TIMEOUT_IN_MILLS, "3"); 66 | properties.put(CONNECTOR_MAX_RETRY_TIMES, "10"); 67 | properties.put(CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS, "5"); 68 | 69 | final TableSink actual = TableFactoryService.find(TableSinkFactory.class, properties) 70 | .createTableSink(properties); 71 | 72 | assertTrue(actual instanceof DatahubTableSink); 73 | } 74 | 75 | private Map getBasicProperties() { 76 | Map properties = new HashMap<>(); 77 | 78 | properties.put(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_DATAHUB); 79 | properties.put(CONNECTOR_PROPERTY_VERSION, "1"); 80 | properties.put(CONNECTOR_PROJECT, "myproject"); 81 | properties.put(CONNECTOR_TOPIC, "mytopic"); 82 | properties.put(CONNECTOR_ACCESS_ID, "myid"); 83 | properties.put(CONNECTOR_ACCESS_KEY, "mykey"); 84 | properties.put(CONNECTOR_ENDPOINT, "myendpoint"); 85 | 86 | properties.put("schema.0.name", "aaa"); 87 | properties.put("schema.0.type", "INT"); 88 | properties.put("schema.1.name", "bbb"); 89 | properties.put("schema.1.type", "VARCHAR"); 90 | properties.put("schema.2.name", "ccc"); 91 | properties.put("schema.2.type", "DOUBLE"); 92 | 93 | return properties; 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sink/TupleOutputFormatSinkFunction.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.sink; 20 | 21 | import org.apache.flink.api.common.io.OutputFormat; 22 | import org.apache.flink.api.common.io.RichOutputFormat; 23 | import org.apache.flink.api.java.tuple.Tuple2; 24 | import org.apache.flink.configuration.Configuration; 25 | import org.apache.flink.streaming.api.checkpoint.ListCheckpointed; 26 | import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; 27 | 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | import java.io.IOException; 32 | import java.util.List; 33 | 34 | /** 35 | * Tuple output format. 36 | */ 37 | public class TupleOutputFormatSinkFunction extends RichSinkFunction> 38 | implements ListCheckpointed { 39 | private static final Logger LOG = LoggerFactory.getLogger(TupleOutputFormatSinkFunction.class); 40 | private static final long RETRY_INTERVAL = 100; 41 | private OutputFormat> outputFormat; 42 | private long retryTimeout = 30 * 60 * 1000; // half an hour 43 | 44 | public TupleOutputFormatSinkFunction(OutputFormat> outputFormat) { 45 | this.outputFormat = outputFormat; 46 | } 47 | 48 | @Override 49 | public void open(Configuration config) throws IOException { 50 | if (RichOutputFormat.class.isAssignableFrom(outputFormat.getClass())) { 51 | ((RichOutputFormat) outputFormat).setRuntimeContext(getRuntimeContext()); 52 | } 53 | outputFormat.configure(config); 54 | outputFormat.open( 55 | getRuntimeContext().getIndexOfThisSubtask(), 56 | getRuntimeContext().getNumberOfParallelSubtasks()); 57 | if (outputFormat instanceof HasRetryTimeout) { 58 | retryTimeout = ((HasRetryTimeout) outputFormat).getRetryTimeout(); 59 | } 60 | LOG.info( 61 | "Initialized OutputFormatSinkFunction of {}/{} task.", 62 | getRuntimeContext().getIndexOfThisSubtask(), 63 | getRuntimeContext().getNumberOfParallelSubtasks()); 64 | } 65 | 66 | @Override 67 | public void close() throws IOException { 68 | LOG.info("Closing OutputFormatSinkFunction."); 69 | outputFormat.close(); 70 | } 71 | 72 | @Override 73 | public void invoke(Tuple2 record) throws Exception { 74 | outputFormat.writeRecord(record); 75 | } 76 | 77 | public OutputFormat> getOutputFormat() { 78 | return outputFormat; 79 | } 80 | 81 | /* 82 | * If OutputFormat implements Syncable, will invoke sync() when doing checkpoint 83 | */ 84 | @Override 85 | public List snapshotState(long checkpointId, long checkpointTimestamp) throws Exception { 86 | if (outputFormat instanceof Syncable) { 87 | long startSyncing = System.currentTimeMillis(); 88 | // Retry until successful 89 | while (true) { 90 | try { 91 | ((Syncable) outputFormat).sync(); 92 | break; 93 | } catch (IOException e) { 94 | LOG.error("Sync output format failed", e); 95 | try { 96 | Thread.sleep(RETRY_INTERVAL); 97 | } catch (InterruptedException e1) { 98 | // throw new RuntimeException(e1); 99 | } 100 | } 101 | 102 | long retryingTimeCost = System.currentTimeMillis() - startSyncing; 103 | if (retryingTimeCost > retryTimeout) { 104 | throw new IOException("Output retry timeout."); 105 | } 106 | } 107 | } 108 | return null; 109 | } 110 | 111 | /* 112 | * We don't need restoreState now 113 | */ 114 | @Override 115 | public void restoreState(List list) throws Exception { 116 | 117 | } 118 | 119 | @Override 120 | public String toString() { 121 | return getClass().getSimpleName() + ":" + outputFormat.toString(); 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/table/DatahubTableSink.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.datahub.table; 20 | 21 | import org.apache.flink.api.common.io.OutputFormat; 22 | import org.apache.flink.api.common.typeinfo.TypeInformation; 23 | import org.apache.flink.api.java.typeutils.RowTypeInfo; 24 | import org.apache.flink.table.api.TableSchema; 25 | import org.apache.flink.table.descriptors.DescriptorProperties; 26 | import org.apache.flink.table.sinks.OutputFormatTableSink; 27 | import org.apache.flink.table.sinks.TableSink; 28 | import org.apache.flink.table.types.DataType; 29 | import org.apache.flink.types.Row; 30 | 31 | import com.alibaba.flink.connectors.datahub.datastream.sink.DatahubOutputFormat; 32 | 33 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BATCH_SIZE; 34 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS; 35 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BUFFER_SIZE; 36 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_MAX_RETRY_TIMES; 37 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_RETRY_TIMEOUT_IN_MILLS; 38 | 39 | /** 40 | * Table Sink for Datahub. 41 | */ 42 | public class DatahubTableSink extends OutputFormatTableSink { 43 | 44 | private final String project; 45 | private final String topic; 46 | private final String accessId; 47 | private final String accessKey; 48 | private final String endpoint; 49 | private final TableSchema schema; 50 | private final DescriptorProperties prop; 51 | 52 | public DatahubTableSink( 53 | String project, 54 | String topic, 55 | String accessId, 56 | String accessKey, 57 | String endpoint, 58 | TableSchema schema, 59 | DescriptorProperties prop) { 60 | this.project = project; 61 | this.topic = topic; 62 | this.accessId = accessId; 63 | this.accessKey = accessKey; 64 | this.endpoint = endpoint; 65 | this.schema = schema; 66 | this.prop = prop; 67 | } 68 | 69 | @Override 70 | public DataType getConsumedDataType() { 71 | return schema.toRowDataType(); 72 | } 73 | 74 | @Override 75 | public TableSchema getTableSchema() { 76 | return schema; 77 | } 78 | 79 | @Override 80 | public TableSink configure(String[] strings, TypeInformation[] typeInformations) { 81 | return new DatahubTableSink(project, topic, accessId, accessKey, endpoint, schema, prop); 82 | } 83 | 84 | @Override 85 | public OutputFormat getOutputFormat() { 86 | RowTypeInfo flinkRowTypeInfo = new RowTypeInfo(schema.getFieldTypes(), schema.getFieldNames()); 87 | DatahubOutputFormat outputFormat = new DatahubOutputFormat( 88 | endpoint, 89 | project, 90 | topic, 91 | accessId, 92 | accessKey, 93 | flinkRowTypeInfo); 94 | 95 | if (prop.containsKey(CONNECTOR_BUFFER_SIZE)) { 96 | outputFormat.setBufferSize(prop.getInt(CONNECTOR_BUFFER_SIZE)); 97 | } 98 | 99 | if (prop.containsKey(CONNECTOR_BATCH_SIZE)) { 100 | outputFormat.setBatchSize(prop.getInt(CONNECTOR_BATCH_SIZE)); 101 | } 102 | 103 | if (prop.containsKey(CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS)) { 104 | outputFormat.setBatchWriteTimeout(prop.getLong(CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS)); 105 | } 106 | if (prop.containsKey(CONNECTOR_RETRY_TIMEOUT_IN_MILLS)) { 107 | outputFormat.setRetryTimeoutInMills(prop.getInt(CONNECTOR_RETRY_TIMEOUT_IN_MILLS)); 108 | } 109 | 110 | if (prop.containsKey(CONNECTOR_MAX_RETRY_TIMES)) { 111 | outputFormat.setMaxRetryTimes(prop.getInt(CONNECTOR_MAX_RETRY_TIMES)); 112 | } 113 | 114 | outputFormat.setRecordResolver( 115 | new DatahubRowRecordResolver(flinkRowTypeInfo, project, topic, accessId, accessKey, endpoint)); 116 | 117 | return outputFormat; 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/test/java/com/alibaba/flink/connectors/common/PreAssignedInputSplitAssignerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | Licensed to the Apache Software Foundation (ASF) under one 3 | or more contributor license agreements. See the NOTICE file 4 | distributed with this work for additional information 5 | regarding copyright ownership. The ASF licenses this file 6 | to you under the Apache License, Version 2.0 (the 7 | "License"); you may not use this file except in compliance 8 | with the License. You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common; 20 | 21 | import org.apache.flink.core.io.InputSplit; 22 | import org.apache.flink.core.io.InputSplitAssigner; 23 | 24 | import com.alibaba.flink.connectors.common.source.AbstractParallelSourceBase; 25 | import org.junit.Test; 26 | 27 | import java.util.Collections; 28 | 29 | import static org.junit.Assert.assertEquals; 30 | import static org.junit.Assert.assertNull; 31 | 32 | /** 33 | * Unit test for {@link AbstractParallelSourceBase.PreAssignedInputSplitAssigner}. 34 | */ 35 | public class PreAssignedInputSplitAssignerTest { 36 | private static final int NUM_TASKS = 3; 37 | private static final int NUM_SPLITS = (1 + NUM_TASKS) * NUM_TASKS / 2; 38 | 39 | @Test 40 | public void testInitializeAndAssign() { 41 | InputSplitAssigner assigner = getAssigner(); 42 | assertEquals(0, assigner.getNextInputSplit("", 0).getSplitNumber()); 43 | assertNull(assigner.getNextInputSplit("", 0)); 44 | assertEquals(1, assigner.getNextInputSplit("", 1).getSplitNumber()); 45 | assertEquals(2, assigner.getNextInputSplit("", 1).getSplitNumber()); 46 | assertNull(assigner.getNextInputSplit("", 1)); 47 | assertEquals(3, assigner.getNextInputSplit("", 2).getSplitNumber()); 48 | assertEquals(4, assigner.getNextInputSplit("", 2).getSplitNumber()); 49 | assertEquals(5, assigner.getNextInputSplit("", 2).getSplitNumber()); 50 | assertNull(assigner.getNextInputSplit("", 2)); 51 | } 52 | 53 | @Test (expected = IllegalArgumentException.class) 54 | public void testInvalidTaskIndexForGetNextInputSplit() { 55 | getAssigner().getNextInputSplit("", 3); 56 | } 57 | 58 | @Test (expected = IllegalArgumentException.class) 59 | public void testInvalidTaskIndexForReturnInputSplits() { 60 | getAssigner().returnInputSplit(Collections.emptyList(), 3); 61 | } 62 | 63 | @Test 64 | public void testReturnSplits() { 65 | InputSplitAssigner assigner = getAssigner(); 66 | assertEquals(3, assigner.getNextInputSplit("", 2).getSplitNumber()); 67 | assertEquals(4, assigner.getNextInputSplit("", 2).getSplitNumber()); 68 | 69 | // Return split 3 back. 70 | assigner.returnInputSplit(Collections.singletonList(new MockInputSplit(3)), 2); 71 | 72 | assertEquals(3, assigner.getNextInputSplit("", 2).getSplitNumber()); 73 | assertEquals(5, assigner.getNextInputSplit("", 2).getSplitNumber()); 74 | assertNull(assigner.getNextInputSplit("", 2)); 75 | } 76 | 77 | @Test (expected = IllegalStateException.class) 78 | public void testReturnSplitsToWrongTask() { 79 | InputSplitAssigner assigner = getAssigner(); 80 | assertEquals(3, assigner.getNextInputSplit("", 2).getSplitNumber()); 81 | assigner.returnInputSplit(Collections.singletonList(new MockInputSplit(3)), 1); 82 | } 83 | 84 | @Test (expected = IllegalStateException.class) 85 | public void testReturnUnassignedSplits() { 86 | InputSplitAssigner assigner = getAssigner(); 87 | assigner.returnInputSplit(Collections.singletonList(new MockInputSplit(3)), 1); 88 | } 89 | 90 | private InputSplitAssigner getAssigner() { 91 | InputSplit[] inputSplits = new InputSplit[NUM_SPLITS]; 92 | int[] taskInputSplitSize = new int[NUM_TASKS]; 93 | int[] taskInputSplitStartIndex = new int[NUM_TASKS]; 94 | for (int i = 0; i < NUM_SPLITS; i++) { 95 | inputSplits[i] = new MockInputSplit(i); 96 | } 97 | int currentSplitIndex = 0; 98 | for (int i = 0; i < NUM_TASKS; i++) { 99 | taskInputSplitSize[i] = i + 1; 100 | taskInputSplitStartIndex[i] = currentSplitIndex; 101 | currentSplitIndex += taskInputSplitSize[i]; 102 | } 103 | return new AbstractParallelSourceBase.PreAssignedInputSplitAssigner( 104 | inputSplits, taskInputSplitSize, taskInputSplitStartIndex); 105 | } 106 | 107 | private static class MockInputSplit implements InputSplit { 108 | private final int splitNumber; 109 | 110 | MockInputSplit(int splitNumber) { 111 | this.splitNumber = splitNumber; 112 | } 113 | 114 | @Override 115 | public int getSplitNumber() { 116 | return splitNumber; 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /sls-connector/src/test/java/com/alibaba/flink/connectors/sls/datastream/sink/SlsOutputFormatTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.sink; 20 | 21 | import org.apache.flink.metrics.Meter; 22 | import org.apache.flink.types.Row; 23 | 24 | import com.alibaba.flink.connectors.common.MetricUtils; 25 | import com.alibaba.flink.sls.shaded.com.google.common.util.concurrent.SettableFuture; 26 | import com.aliyun.openservices.aliyun.log.producer.LogProducer; 27 | import com.aliyun.openservices.aliyun.log.producer.Result; 28 | import com.aliyun.openservices.aliyun.log.producer.errors.ProducerException; 29 | import org.junit.Test; 30 | import org.mockito.Mockito; 31 | import org.powermock.reflect.Whitebox; 32 | 33 | import java.io.IOException; 34 | import java.util.concurrent.ExecutorService; 35 | import java.util.concurrent.Executors; 36 | import java.util.concurrent.TimeUnit; 37 | import java.util.concurrent.atomic.AtomicLong; 38 | 39 | import static org.junit.Assert.assertEquals; 40 | 41 | /** 42 | * Test for sls output format. 43 | */ 44 | public class SlsOutputFormatTest { 45 | 46 | @Test 47 | public void testCommitFailed() { 48 | SlsRecordResolver serializationSchema = Mockito.mock(SlsRecordResolver.class); 49 | SlsOutputFormat outputFormat = new SlsOutputFormat( 50 | "", "", "", "", "", serializationSchema); 51 | 52 | SlsOutputFormat.SendFutureCallback callback = outputFormat.new SendFutureCallback(); 53 | callback.onFailure(new Exception("1")); 54 | AtomicLong numCommitted = (AtomicLong) Whitebox.getInternalState( 55 | outputFormat, "numCommitted"); 56 | assertEquals(1, numCommitted.get()); 57 | RuntimeException callbackException = (RuntimeException) Whitebox.getInternalState( 58 | outputFormat, "callBackException"); 59 | assertEquals("1", callbackException.getCause().getMessage()); 60 | } 61 | 62 | @Test 63 | public void testCommit() throws ProducerException, InterruptedException, IOException { 64 | 65 | SlsRecordResolver serializationSchema = Mockito.mock(SlsRecordResolver.class); 66 | SlsOutputFormat outputFormat = new SlsOutputFormat( 67 | "", "", "", "test_project", "test_store", serializationSchema); 68 | LogProducer producer = Mockito.mock(LogProducer.class); 69 | LogProducerProvider producerProvider = Mockito.mock(LogProducerProvider.class); 70 | Mockito.when(producerProvider.getClient()).thenReturn(producer); 71 | 72 | SettableFuture future = SettableFuture.create(); 73 | // Use any() instead of anyString() because in Mockito 2.x, anyString() does not match null any more, 74 | // which may cause the test to fail. 75 | Mockito.when( 76 | producer.send( 77 | Mockito.eq("test_project"), 78 | Mockito.eq("test_store"), 79 | Mockito.any(), 80 | Mockito.any(), 81 | Mockito.any(), 82 | Mockito.anyList())).thenReturn(future); 83 | 84 | ExecutorService executor = Executors.newSingleThreadExecutor(); 85 | Whitebox.setInternalState(outputFormat, "logProducerProvider", producerProvider); 86 | Whitebox.setInternalState( 87 | outputFormat, "sendFutureCallback", outputFormat.new SendFutureCallback()); 88 | Whitebox.setInternalState(outputFormat, "executor", executor); 89 | Whitebox.setInternalState(outputFormat, "latencyGauge", Mockito.mock(MetricUtils.LatencyGauge.class)); 90 | Whitebox.setInternalState(outputFormat, "outTps", Mockito.mock(Meter.class)); 91 | 92 | Row record = new Row(3); 93 | record.setField(0, 100); 94 | record.setField(1, 1000); 95 | record.setField(2, "test"); 96 | outputFormat.writeRecord(record); 97 | AtomicLong numSent = (AtomicLong) Whitebox.getInternalState(outputFormat, "numSent"); 98 | AtomicLong numCommitted = (AtomicLong) Whitebox.getInternalState(outputFormat, "numCommitted"); 99 | assertEquals(1, numSent.get()); 100 | assertEquals(0, numCommitted.get()); 101 | 102 | // trigger call back. 103 | future.set(new Result(true, null, 0)); 104 | // wait call back finished. 105 | executor.awaitTermination(1, TimeUnit.SECONDS); 106 | assertEquals(1, numSent.get()); 107 | assertEquals(1, numCommitted.get()); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/MetricUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.common; 19 | 20 | import org.apache.flink.api.common.functions.RuntimeContext; 21 | import org.apache.flink.metrics.Counter; 22 | import org.apache.flink.metrics.Gauge; 23 | import org.apache.flink.metrics.Meter; 24 | import org.apache.flink.metrics.MeterView; 25 | import org.apache.flink.metrics.SimpleCounter; 26 | import org.apache.flink.util.StringUtils; 27 | 28 | /** 29 | * MetricUtils. 30 | */ 31 | public class MetricUtils { 32 | public static final String METRICS_TPS = "tps"; 33 | public static final String METRICS_DELAY = "delay"; 34 | public static final String METRICS_FETCHED_DELAY = "fetched_delay"; 35 | public static final String METRICS_NO_DATA_DELAY = "no_data_delay"; 36 | 37 | public static final String METRICS_SOURCE_PROCESS_LATENCY = "sourceProcessLatency"; 38 | public static final String METRICS_SOURCE_PARTITION_LATENCY = "partitionLatency"; 39 | public static final String METRICS_SOURCE_PARTITION_COUNT = "partitionCount"; 40 | public static final String METRICS_PARSER_TPS = "parserTps"; 41 | public static final String METRICS_PARSER_SKIP_COUNTER = "parserSkipCount"; 42 | public static final String METRICS_IN_BPS = "inBps"; 43 | public static final String METRICS_BATCH_READ_COUNT = "batchReadCount"; 44 | public static final String METRICS_TAG_CONNECTOR_TYPE = "connector_type"; 45 | 46 | public static final String METRIC_GROUP_SINK = "sink"; 47 | public static final String METRICS_SINK_IN_SKIP_COUNTER = "sinkSkipCount"; 48 | private static final String METRICS_SINK_IN_TPS = "inTps"; 49 | private static final String METRICS_SINK_OUT_TPS = "outTps"; 50 | private static final String METRICS_SINK_OUT_BPS = "outBps"; 51 | private static final String METRICS_SINK_OUT_Latency = "outLatency"; 52 | 53 | public static Meter registerSinkInTps(RuntimeContext context) { 54 | Counter parserCounter = context.getMetricGroup().addGroup(METRIC_GROUP_SINK) 55 | .counter(METRICS_SINK_IN_TPS + "_counter", new SimpleCounter()); 56 | return context.getMetricGroup().addGroup(METRIC_GROUP_SINK) 57 | .meter(METRICS_SINK_IN_TPS, new MeterView(parserCounter, 60)); 58 | } 59 | 60 | public static Meter registerOutTps(RuntimeContext context) { 61 | Counter parserCounter = context.getMetricGroup().addGroup(METRIC_GROUP_SINK) 62 | .counter(METRICS_SINK_OUT_TPS + "_counter", new SimpleCounter()); 63 | return context.getMetricGroup().addGroup(METRIC_GROUP_SINK).meter(METRICS_SINK_OUT_TPS, new MeterView(parserCounter, 60)); 64 | } 65 | 66 | public static Meter registerOutBps(RuntimeContext context) { 67 | return registerOutBps(context, null); 68 | } 69 | 70 | public static Meter registerOutBps(RuntimeContext context, String connectorType) { 71 | Counter bpsCounter = context.getMetricGroup().addGroup(METRIC_GROUP_SINK) 72 | .counter(METRICS_SINK_OUT_BPS + "_counter", new SimpleCounter()); 73 | String tag = ""; 74 | if (!StringUtils.isNullOrWhitespaceOnly(connectorType)) { 75 | tag = ":" + METRICS_TAG_CONNECTOR_TYPE + "=" + connectorType; 76 | } 77 | return context.getMetricGroup().addGroup(METRIC_GROUP_SINK) 78 | .meter(METRICS_SINK_OUT_BPS + tag, new MeterView(bpsCounter, 60)); 79 | } 80 | 81 | public static LatencyGauge registerOutLatency(RuntimeContext context) { 82 | return context.getMetricGroup().addGroup(METRIC_GROUP_SINK) 83 | .gauge(METRICS_SINK_OUT_Latency, new LatencyGauge()); 84 | } 85 | 86 | public static Counter registerSinkSkipCounter(RuntimeContext context, String groupName) { 87 | return context.getMetricGroup().addGroup(METRIC_GROUP_SINK).addGroup(groupName) 88 | .counter(METRICS_SINK_IN_SKIP_COUNTER); 89 | } 90 | 91 | /** 92 | * LatencyGauge. 93 | */ 94 | public static class LatencyGauge implements Gauge { 95 | private double value; 96 | 97 | public void report(long timeDelta, long batchSize) { 98 | if (batchSize != 0) { 99 | this.value = (1.0 * timeDelta) / batchSize; 100 | } 101 | } 102 | 103 | public void report(long value) { 104 | this.value = 1.0 * value; 105 | } 106 | 107 | public void report(double value) { 108 | this.value = value; 109 | } 110 | 111 | @Override 112 | public Double getValue() { 113 | return value; 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /datahub-connector/src/main/java/com/alibaba/flink/connectors/datahub/table/DatahubTableFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.datahub.table; 20 | 21 | import org.apache.flink.annotation.Internal; 22 | import org.apache.flink.table.api.TableSchema; 23 | import org.apache.flink.table.descriptors.DescriptorProperties; 24 | import org.apache.flink.table.factories.TableSinkFactory; 25 | import org.apache.flink.table.sinks.TableSink; 26 | import org.apache.flink.types.Row; 27 | 28 | import java.util.ArrayList; 29 | import java.util.HashMap; 30 | import java.util.List; 31 | import java.util.Map; 32 | 33 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_ACCESS_ID; 34 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_ACCESS_KEY; 35 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BATCH_SIZE; 36 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS; 37 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_BUFFER_SIZE; 38 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_ENDPOINT; 39 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_MAX_RETRY_TIMES; 40 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_PROJECT; 41 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_RETRY_TIMEOUT_IN_MILLS; 42 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_TOPIC; 43 | import static com.alibaba.flink.connectors.datahub.table.DatahubDescriptorValidator.CONNECTOR_TYPE_VALUE_DATAHUB; 44 | import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_PROPERTY_VERSION; 45 | import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_TYPE; 46 | import static org.apache.flink.table.descriptors.Schema.SCHEMA; 47 | import static org.apache.flink.table.descriptors.Schema.SCHEMA_NAME; 48 | import static org.apache.flink.table.descriptors.Schema.SCHEMA_TYPE; 49 | 50 | /** 51 | * Factory for creating Datahub table source and sink. 52 | */ 53 | @Internal 54 | public class DatahubTableFactory implements TableSinkFactory { 55 | 56 | public String getConnectorTypeValue() { 57 | return CONNECTOR_TYPE_VALUE_DATAHUB; 58 | } 59 | 60 | @Override 61 | public Map requiredContext() { 62 | Map context = new HashMap<>(); 63 | context.put(CONNECTOR_TYPE, getConnectorTypeValue()); // datahub 64 | context.put(CONNECTOR_PROPERTY_VERSION, "1"); // backwards compatibility 65 | return context; 66 | } 67 | 68 | @Override 69 | public List supportedProperties() { 70 | List properties = new ArrayList<>(); 71 | 72 | properties.add(CONNECTOR_PROJECT); 73 | properties.add(CONNECTOR_TOPIC); 74 | properties.add(CONNECTOR_ACCESS_ID); 75 | properties.add(CONNECTOR_ACCESS_KEY); 76 | properties.add(CONNECTOR_ENDPOINT); 77 | 78 | properties.add(CONNECTOR_BATCH_SIZE); 79 | properties.add(CONNECTOR_BUFFER_SIZE); 80 | properties.add(CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS); 81 | properties.add(CONNECTOR_RETRY_TIMEOUT_IN_MILLS); 82 | properties.add(CONNECTOR_MAX_RETRY_TIMES); 83 | 84 | // schema 85 | properties.add(SCHEMA + ".#." + SCHEMA_TYPE); 86 | properties.add(SCHEMA + ".#." + SCHEMA_NAME); 87 | 88 | return properties; 89 | } 90 | 91 | @Override 92 | public TableSink createTableSink(Map prop) { 93 | DescriptorProperties params = new DescriptorProperties(); 94 | params.putProperties(prop); 95 | 96 | new DatahubDescriptorValidator().validate(params); 97 | 98 | TableSchema schema = params.getTableSchema(SCHEMA); 99 | 100 | String project = params.getString(CONNECTOR_PROJECT); 101 | String topic = params.getString(CONNECTOR_TOPIC); 102 | String accessId = params.getString(CONNECTOR_ACCESS_ID); 103 | String accessKey = params.getString(CONNECTOR_ACCESS_KEY); 104 | String endpoint = params.getString(CONNECTOR_ENDPOINT); 105 | 106 | return new DatahubTableSink( 107 | project, 108 | topic, 109 | accessId, 110 | accessKey, 111 | endpoint, 112 | schema, 113 | params 114 | ); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /cloudhbase-connector/src/main/java/com/alibaba/flink/connectors/cloudhbase/example/CloudHBaseSinkFunctionExample.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.cloudhbase.example; 19 | 20 | import org.apache.flink.api.java.tuple.Tuple3; 21 | import org.apache.flink.configuration.Configuration; 22 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 23 | import org.apache.flink.streaming.api.functions.source.RichSourceFunction; 24 | import org.apache.flink.types.Row; 25 | 26 | import com.alibaba.flink.connectors.cloudhbase.sink.CloudHBaseRecordResolver; 27 | import com.alibaba.flink.connectors.cloudhbase.sink.CloudHBaseSinkFunction; 28 | import org.apache.hadoop.hbase.client.Delete; 29 | import org.apache.hadoop.hbase.client.Mutation; 30 | import org.apache.hadoop.hbase.client.Put; 31 | 32 | import java.util.concurrent.ThreadLocalRandom; 33 | 34 | /** 35 | * Example for the usage of {@link CloudHBaseSinkFunction}. 36 | */ 37 | public class CloudHBaseSinkFunctionExample { 38 | 39 | public static void main(String[] args) { 40 | String zkQuorum = "sink.cloudhbase.connectors.flink.alibaba.com:2181"; 41 | String tableName = "tableName"; 42 | String columnFamily = "columnFamily"; 43 | int numColumns = 3; 44 | Row columnNames = new Row(numColumns); 45 | columnNames.setField(0, "a"); 46 | columnNames.setField(1, "b"); 47 | columnNames.setField(2, "c"); 48 | 49 | StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); 50 | env.addSource(new RandomTupleSourceFunction(columnNames)) 51 | .addSink(new CloudHBaseSinkFunction<>( 52 | zkQuorum, tableName, new TupleRecordResolver(columnFamily, columnNames))); 53 | } 54 | 55 | static class RandomTupleSourceFunction extends RichSourceFunction> { 56 | 57 | private final Row columnNames; 58 | 59 | private transient boolean isCancelled; 60 | 61 | RandomTupleSourceFunction(Row columnNames) { 62 | this.columnNames = columnNames; 63 | } 64 | 65 | @Override 66 | public void open(Configuration parameters) throws Exception { 67 | super.open(parameters); 68 | this.isCancelled = false; 69 | } 70 | 71 | @Override 72 | public void run(SourceContext> sourceContext) throws Exception { 73 | while (!isCancelled) { 74 | sourceContext.collect(ThreadLocalRandom.current().nextBoolean() ? getPutTuple() : getDeleteTuple()); 75 | } 76 | } 77 | 78 | private Tuple3 getPutTuple() { 79 | ThreadLocalRandom random = ThreadLocalRandom.current(); 80 | long rowKey = random.nextLong(); 81 | Row row = new Row(columnNames.getArity()); 82 | for (int i = 0; i < columnNames.getArity(); i++) { 83 | row.setField(i, random.nextDouble()); 84 | } 85 | return Tuple3.of(true, rowKey, row); 86 | } 87 | 88 | private Tuple3 getDeleteTuple() { 89 | ThreadLocalRandom random = ThreadLocalRandom.current(); 90 | long rowKey = random.nextLong(); 91 | Row row = new Row(columnNames.getArity()); 92 | for (int i = 0; i < columnNames.getArity(); i++) { 93 | if (random.nextBoolean()) { 94 | row.setField(i, true); 95 | } 96 | } 97 | return Tuple3.of(false, rowKey, row); 98 | } 99 | 100 | @Override 101 | public void close() throws Exception { 102 | super.close(); 103 | } 104 | 105 | @Override 106 | public void cancel() { 107 | this.isCancelled = true; 108 | } 109 | } 110 | 111 | /** 112 | * A resolver for only one column family. 113 | */ 114 | static class TupleRecordResolver implements CloudHBaseRecordResolver> { 115 | 116 | private final String columnFamily; 117 | 118 | private final Row columnNames; 119 | 120 | public TupleRecordResolver(String columnFamily, Row columnNames) { 121 | this.columnFamily = columnFamily; 122 | this.columnNames = columnNames; 123 | } 124 | 125 | @Override 126 | public String getRowKey(Tuple3 record) { 127 | return record.f1.toString(); 128 | } 129 | 130 | @Override 131 | public Mutation getMutation(Tuple3 record) { 132 | if (record.f0) { 133 | // Put mutation 134 | Put put = new Put(record.f1.toString().getBytes()); 135 | Row row = record.f2; 136 | for (int i = 0; i < columnNames.getArity(); i++) { 137 | Object object = row.getField(i); 138 | if (object != null) { 139 | put.addColumn(columnFamily.getBytes(), 140 | columnNames.getField(i).toString().getBytes(), 141 | object.toString().getBytes()); 142 | } 143 | } 144 | return put; 145 | } else { 146 | // Delete mutation 147 | // Put mutation 148 | Delete delete = new Delete(record.f1.toString().getBytes()); 149 | Row row = record.f2; 150 | for (int i = 0; i < columnNames.getArity(); i++) { 151 | Object object = row.getField(i); 152 | if (object != null) { 153 | delete.addColumn(columnFamily.getBytes(), 154 | columnNames.getField(i).toString().getBytes()); 155 | } 156 | } 157 | return delete; 158 | } 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/util/RetryUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Licensed to the Apache Software Foundation (ASF) under one or more 4 | * contributor license agreements. See the NOTICE file distributed with 5 | * this work for additional information regarding copyright ownership. 6 | * The ASF licenses this file to You under the Apache License, Version 2.0 7 | * (the "License"); you may not use this file except in compliance with 8 | * the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | * 18 | */ 19 | 20 | package com.alibaba.flink.connectors.common.util; 21 | 22 | import org.slf4j.Logger; 23 | import org.slf4j.LoggerFactory; 24 | 25 | import javax.annotation.Nonnull; 26 | 27 | import java.util.List; 28 | import java.util.concurrent.Callable; 29 | import java.util.concurrent.Future; 30 | import java.util.concurrent.SynchronousQueue; 31 | import java.util.concurrent.ThreadPoolExecutor; 32 | import java.util.concurrent.TimeUnit; 33 | 34 | /** 35 | * This class is highly inspired from dataX's RetryUtil. 36 | */ 37 | public final class RetryUtils { 38 | 39 | private static final Logger LOG = LoggerFactory.getLogger(RetryUtils.class); 40 | 41 | private static final long MAX_SLEEP_MILLISECOND = 256 * 1000; 42 | 43 | public static T executeWithRetry( 44 | Callable callable, 45 | int retryTimes, 46 | long sleepTimeInMilliSecond, 47 | boolean exponential) throws Exception { 48 | Retry retry = new Retry(); 49 | return retry.doRetry(callable, retryTimes, sleepTimeInMilliSecond, exponential, null); 50 | } 51 | 52 | public static T executeWithRetry( 53 | Callable callable, 54 | int retryTimes, 55 | long sleepTimeInMilliSecond, 56 | boolean exponential, 57 | List> retryExceptionClasss) throws Exception { 58 | Retry retry = new Retry(); 59 | return retry.doRetry(callable, retryTimes, sleepTimeInMilliSecond, exponential, retryExceptionClasss); 60 | } 61 | 62 | public static T asyncExecuteWithRetry( 63 | Callable callable, 64 | int retryTimes, 65 | long sleepTimeInMilliSecond, 66 | boolean exponential, 67 | long timeoutMs, 68 | ThreadPoolExecutor executor) throws Exception { 69 | Retry retry = new AsyncRetry(timeoutMs, executor); 70 | return retry.doRetry(callable, retryTimes, sleepTimeInMilliSecond, exponential, null); 71 | } 72 | 73 | public static ThreadPoolExecutor createThreadPoolExecutor() { 74 | return new ThreadPoolExecutor(0, 5, 60L, TimeUnit.SECONDS, new SynchronousQueue()); 75 | } 76 | 77 | private static class Retry { 78 | 79 | public T doRetry( 80 | @Nonnull Callable callable, int retryTimes, long sleepTimeInMilliSecond, boolean exponential, 81 | List> retryExceptionClasss) 82 | throws Exception { 83 | 84 | if (retryTimes < 1) { 85 | throw new IllegalArgumentException(String.format( 86 | "RetryTime[%d] can not less then 1 !", retryTimes)); 87 | } 88 | 89 | Exception saveException = null; 90 | for (int i = 0; i < retryTimes; i++) { 91 | try { 92 | return call(callable); 93 | } catch (Exception e) { 94 | saveException = e; 95 | if (i == 0) { 96 | LOG.error(String.format("Exception when calling callable, Exception Msg:%s", saveException 97 | .getMessage()), saveException); 98 | } 99 | 100 | if (null != retryExceptionClasss && !retryExceptionClasss.isEmpty()) { 101 | boolean needRetry = false; 102 | for (Class eachExceptionClass : retryExceptionClasss) { 103 | if (eachExceptionClass == e.getClass()) { 104 | needRetry = true; 105 | break; 106 | } 107 | } 108 | if (!needRetry) { 109 | throw saveException; 110 | } 111 | } 112 | 113 | if (i + 1 < retryTimes && sleepTimeInMilliSecond > 0) { 114 | long startTime = System.currentTimeMillis(); 115 | 116 | long timeToSleep; 117 | if (exponential) { 118 | timeToSleep = sleepTimeInMilliSecond * (long) Math.pow(2, i); 119 | if (timeToSleep >= MAX_SLEEP_MILLISECOND) { 120 | timeToSleep = MAX_SLEEP_MILLISECOND; 121 | } 122 | } else { 123 | timeToSleep = sleepTimeInMilliSecond; 124 | if (timeToSleep >= MAX_SLEEP_MILLISECOND) { 125 | timeToSleep = MAX_SLEEP_MILLISECOND; 126 | } 127 | } 128 | 129 | try { 130 | Thread.sleep(timeToSleep); 131 | } catch (InterruptedException ignored) { 132 | } 133 | 134 | long realTimeSleep = System.currentTimeMillis() - startTime; 135 | 136 | LOG.error(String.format( 137 | "Exception when calling callable, Retry %s .Wait [%s]ms," + 138 | "actual wait [%s]ms, Exception Msg:[%s]", 139 | i + 1, 140 | timeToSleep, 141 | realTimeSleep, 142 | e.getMessage())); 143 | 144 | } 145 | } 146 | } 147 | throw saveException; 148 | } 149 | 150 | protected T call(Callable callable) throws Exception { 151 | return callable.call(); 152 | } 153 | } 154 | 155 | private static class AsyncRetry extends Retry { 156 | 157 | private long timeoutMs; 158 | private ThreadPoolExecutor executor; 159 | 160 | public AsyncRetry(long timeoutMs, ThreadPoolExecutor executor) { 161 | this.timeoutMs = timeoutMs; 162 | this.executor = executor; 163 | } 164 | 165 | @Override 166 | protected T call(Callable callable) throws Exception { 167 | Future future = executor.submit(callable); 168 | try { 169 | return future.get(timeoutMs, TimeUnit.MILLISECONDS); 170 | } catch (Exception e) { 171 | LOG.warn("Try once failed", e); 172 | throw e; 173 | } finally { 174 | if (!future.isDone()) { 175 | future.cancel(true); 176 | LOG.warn("Try once task not done, cancel it, active count: " + executor.getActiveCount()); 177 | } 178 | } 179 | } 180 | } 181 | 182 | } 183 | -------------------------------------------------------------------------------- /tools/maven/scalastyle-config.xml: -------------------------------------------------------------------------------- 1 | 17 | 18 | 19 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | Scalastyle standard configuration 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | true 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/source/AbstractDynamicParallelSource.java: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Licensed to the Apache Software Foundation (ASF) under one or more 4 | * contributor license agreements. See the NOTICE file distributed with 5 | * this work for additional information regarding copyright ownership. 6 | * The ASF licenses this file to You under the Apache License, Version 2.0 7 | * (the "License"); you may not use this file except in compliance with 8 | * the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | * 18 | */ 19 | 20 | package com.alibaba.flink.connectors.common.source; 21 | 22 | import org.apache.flink.api.common.state.ListState; 23 | import org.apache.flink.api.common.state.ListStateDescriptor; 24 | import org.apache.flink.api.common.typeinfo.TypeInformation; 25 | import org.apache.flink.api.java.tuple.Tuple2; 26 | import org.apache.flink.api.java.typeutils.PojoField; 27 | import org.apache.flink.api.java.typeutils.PojoTypeInfo; 28 | import org.apache.flink.api.java.typeutils.TypeExtractor; 29 | import org.apache.flink.configuration.Configuration; 30 | import org.apache.flink.core.io.InputSplit; 31 | import org.apache.flink.runtime.state.FunctionInitializationContext; 32 | import org.apache.flink.runtime.state.FunctionSnapshotContext; 33 | import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; 34 | 35 | import org.slf4j.Logger; 36 | import org.slf4j.LoggerFactory; 37 | 38 | import java.io.IOException; 39 | import java.io.Serializable; 40 | import java.lang.reflect.ParameterizedType; 41 | import java.util.ArrayList; 42 | import java.util.HashSet; 43 | import java.util.List; 44 | import java.util.Map; 45 | import java.util.Set; 46 | 47 | /** 48 | * AbstractDynamicParallelSource. 49 | * @param 50 | * @param 51 | */ 52 | public abstract class AbstractDynamicParallelSource 53 | extends AbstractParallelSourceBase 54 | implements CheckpointedFunction { 55 | private static final Logger LOG = LoggerFactory.getLogger(AbstractDynamicParallelSource.class); 56 | private static final long serialVersionUID = -7848357196819780804L; 57 | private static final String SOURCE_STATE_NAME = "source_offsets_state_name"; 58 | private transient ListState> unionInitialProgress; 59 | private transient List> allSplitsInCP; 60 | 61 | protected transient List> reservedProgress; 62 | 63 | public AbstractDynamicParallelSource() { 64 | super(); 65 | } 66 | 67 | public abstract List> reAssignInputSplitsForCurrentSubTask( 68 | int numberOfParallelSubTasks, 69 | int indexOfThisSubTask, 70 | List> allSplitsInState) throws IOException; 71 | 72 | /** 73 | * Used to deal with situation where some state needed to reserve. 74 | * @param numberOfParallelSubTasks 75 | * @param indexOfThisSubTask 76 | * @param allSplitsInState 77 | * @return the split list 78 | * @throws IOException 79 | */ 80 | public List> reserveInputSplitsForCurrentSubTask( 81 | int numberOfParallelSubTasks, 82 | int indexOfThisSubTask, 83 | List> allSplitsInState) throws IOException{ 84 | List> result = new ArrayList<>(); 85 | return result; 86 | } 87 | 88 | protected void createParallelReader(Configuration config) throws IOException { 89 | if (isRecoryFromState()) { 90 | LOG.info("Reocory State!"); 91 | initialProgress = reAssignInputSplitsForCurrentSubTask(getRuntimeContext().getNumberOfParallelSubtasks(), 92 | getRuntimeContext().getIndexOfThisSubtask(), 93 | allSplitsInCP); 94 | reservedProgress = reserveInputSplitsForCurrentSubTask(getRuntimeContext().getNumberOfParallelSubtasks(), 95 | getRuntimeContext().getIndexOfThisSubtask(), 96 | allSplitsInCP); 97 | } 98 | super.createParallelReader(config); 99 | } 100 | 101 | @Override 102 | public void initializeState(FunctionInitializationContext context) throws Exception { 103 | LOG.info("initializeState"); 104 | ParameterizedType p = (ParameterizedType) this.getClass().getGenericSuperclass(); 105 | TypeInformation type0 = TypeExtractor.createTypeInfo(InputSplit.class); 106 | TypeInformation type1 = TypeExtractor.createTypeInfo(p.getActualTypeArguments()[1]); 107 | // TypeInformation> stateTypeInfo = new TupleTypeInfo<>(type0, type1); 108 | List pojoFields = new ArrayList<>(); 109 | pojoFields.add(new PojoField(InnerProgress.class.getField("inputSplit"), type0)); 110 | pojoFields.add(new PojoField(InnerProgress.class.getField("cursor"), type1)); 111 | TypeInformation stateTypeInfo = new PojoTypeInfo<>(InnerProgress.class, pojoFields); 112 | 113 | // ListStateDescriptor> descriptor = new ListStateDescriptor<>(SOURCE_STATE_NAME, stateTypeInfo); 114 | ListStateDescriptor> descriptor = new ListStateDescriptor(SOURCE_STATE_NAME, stateTypeInfo); 115 | unionInitialProgress = context.getOperatorStateStore().getUnionListState(descriptor); 116 | LOG.info("Restoring state: {}", unionInitialProgress); 117 | allSplitsInCP = new ArrayList<>(); 118 | if (context.isRestored()) { 119 | recoryFromState = true; 120 | for (InnerProgress progress: unionInitialProgress.get()){ 121 | allSplitsInCP.add(new InnerProgress(progress.inputSplit, progress.cursor)); 122 | } 123 | } 124 | } 125 | 126 | @Override 127 | public void snapshotState(FunctionSnapshotContext context) throws Exception { 128 | if (disableParallelRead) { 129 | return; 130 | } 131 | unionInitialProgress.clear(); 132 | 133 | // partition with progress 134 | Set partitionWithState = new HashSet<>(); 135 | for (Map.Entry entry : parallelReader.getProgress().getProgress().entrySet()) { 136 | unionInitialProgress.add(new InnerProgress(entry.getKey(), entry.getValue())); 137 | partitionWithState.add(entry.getKey()); 138 | } 139 | 140 | // partition without progress 141 | for (Tuple2 entry : initialProgress) { 142 | if (!partitionWithState.contains(entry.f0)) { 143 | unionInitialProgress.add(new InnerProgress(entry.f0, entry.f1)); 144 | } 145 | } 146 | 147 | if (null != reservedProgress) { 148 | // reserved partition progress 149 | for (Tuple2 entry : reservedProgress) { 150 | if (!partitionWithState.contains(entry.f0)) { 151 | unionInitialProgress.add(new InnerProgress(entry.f0, entry.f1)); 152 | } 153 | } 154 | } 155 | } 156 | 157 | /** 158 | * InnerProgress. 159 | * @param 160 | */ 161 | public static class InnerProgress implements Serializable { 162 | 163 | private static final long serialVersionUID = -7756210303146639268L; 164 | public InputSplit inputSplit; 165 | public CURSOR cursor; 166 | 167 | public InnerProgress() { 168 | } 169 | 170 | public InnerProgress(InputSplit inputSplit, CURSOR cursor) { 171 | this.inputSplit = inputSplit; 172 | this.cursor = cursor; 173 | } 174 | 175 | public InputSplit getInputSplit() { 176 | return inputSplit; 177 | } 178 | 179 | public InnerProgress setInputSplit(InputSplit inputSplit) { 180 | this.inputSplit = inputSplit; 181 | return this; 182 | } 183 | 184 | public CURSOR getCursor() { 185 | return cursor; 186 | } 187 | 188 | public InnerProgress setCursor(CURSOR cursor) { 189 | this.cursor = cursor; 190 | return this; 191 | } 192 | 193 | @Override 194 | public String toString() { 195 | return "InnerProgress{" + "inputSplit=" + inputSplit + ", cursor=" + cursor + '}'; 196 | } 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/source/SourceUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.common.source; 20 | 21 | import org.apache.flink.api.java.tuple.Tuple2; 22 | import org.apache.flink.util.Preconditions; 23 | 24 | import java.io.ByteArrayInputStream; 25 | import java.io.IOException; 26 | import java.io.ObjectInputStream; 27 | import java.sql.Timestamp; 28 | import java.text.ParseException; 29 | import java.text.SimpleDateFormat; 30 | import java.util.LinkedList; 31 | import java.util.List; 32 | 33 | import static org.apache.flink.util.InstantiationUtil.serializeObject; 34 | 35 | /** 36 | * SourceUtils. 37 | */ 38 | public class SourceUtils { 39 | 40 | /** 41 | * Mod assign. 42 | * 43 | * @param sourceName the source name 44 | * @param consumerCount the consumer count 45 | * @param consumerIndex the consumer index 46 | * @param partitions the partitions 47 | * @return the list 48 | */ 49 | public static List modAssign( 50 | String sourceName, 51 | int consumerCount, 52 | int consumerIndex, 53 | int partitions) { 54 | argumentsCheck(sourceName, consumerCount, consumerIndex, partitions); 55 | 56 | List assignedPartitions = new LinkedList<>(); 57 | for (int i = 0; i < partitions; i++) { 58 | if (i % consumerCount == consumerIndex) { 59 | assignedPartitions.add(i); 60 | } 61 | } 62 | return assignedPartitions; 63 | } 64 | 65 | /** 66 | * Division assign list. 67 | * 68 | * @param sourceName the source name 69 | * @param consumerCount the consumer count 70 | * @param consumerIndex the consumer index 71 | * @param partitions the partitions 72 | * @return the list 73 | */ 74 | public static List divisionAssign( 75 | String sourceName, 76 | int consumerCount, 77 | int consumerIndex, 78 | int partitions) { 79 | argumentsCheck(sourceName, consumerCount, consumerIndex, partitions); 80 | Preconditions.checkState(partitions % consumerCount == 0); 81 | 82 | List assignedPartitions = new LinkedList<>(); 83 | 84 | int assignedPartitionCount = partitions / consumerCount; 85 | int startIndex = assignedPartitionCount * consumerIndex; 86 | for (int i = 0; i < assignedPartitionCount; i++) { 87 | assignedPartitions.add(startIndex + i); 88 | } 89 | return assignedPartitions; 90 | } 91 | 92 | /** 93 | * Division mod assign list. 94 | * 95 | * @param sourceName the source name 96 | * @param consumerCount the consumer count 97 | * @param consumerIndex the consumer index 98 | * @param partitions the partitions 99 | * @return the list 100 | */ 101 | public static List divisionModAssign( 102 | String sourceName, 103 | int consumerCount, 104 | int consumerIndex, 105 | int partitions) { 106 | argumentsCheck(sourceName, consumerCount, consumerIndex, partitions); 107 | 108 | List assignedPartitions = new LinkedList<>(); 109 | 110 | int assignedPartitionCount = partitions / consumerCount; 111 | int startIndex = assignedPartitionCount * consumerIndex; 112 | for (int i = 0; i < assignedPartitionCount; i++) { 113 | assignedPartitions.add(startIndex + i); 114 | } 115 | int modAssigned = assignedPartitionCount * consumerCount + consumerIndex; 116 | if (partitions > modAssigned) { 117 | assignedPartitions.add(modAssigned); 118 | } 119 | return assignedPartitions; 120 | } 121 | 122 | /** 123 | * Range[startIndex, count] assigned. 124 | * The last range may be larger than the others. 125 | * 126 | * @param sourceName the source name 127 | * @param consumerCount the consumer count 128 | * @param consumerIndex the consumer index 129 | * @param totalRecordCount the total record count 130 | * @return the start index and count 131 | */ 132 | public static Tuple2 rangeAssign( 133 | String sourceName, 134 | int consumerCount, 135 | int consumerIndex, 136 | long totalRecordCount) { 137 | argumentsCheck(sourceName, consumerCount, consumerIndex); 138 | 139 | if (totalRecordCount <= consumerIndex) { 140 | // Do not assign for this consumer. 141 | return null; 142 | } 143 | 144 | long assignedPartitionCount = totalRecordCount / consumerCount; 145 | if (assignedPartitionCount == 0) { 146 | return new Tuple2<>((long) consumerIndex, 1L); 147 | } 148 | long startIndex = assignedPartitionCount * consumerIndex; 149 | long count = consumerIndex + 1 == consumerCount ? totalRecordCount - startIndex : assignedPartitionCount; 150 | return new Tuple2<>(startIndex, count); 151 | } 152 | 153 | /** 154 | * Parse date string long. 155 | * 156 | * @param formatString the format string 157 | * @param dateString the date string 158 | * @return the number of milliseconds since January 1, 1970, 00:00:00 GMT 159 | * @throws ParseException the parse exception 160 | */ 161 | public static Long parseDateString( 162 | String formatString, 163 | String dateString) throws ParseException { 164 | SimpleDateFormat simpleDateFormat = new SimpleDateFormat(formatString); 165 | return simpleDateFormat.parse(dateString).getTime(); 166 | } 167 | 168 | public static String toDateString( 169 | String formatString, 170 | Long ts) { 171 | Timestamp timestamp = new Timestamp(ts); 172 | SimpleDateFormat simpleDateFormat = new SimpleDateFormat(formatString); 173 | return simpleDateFormat.format(timestamp); 174 | 175 | } 176 | 177 | /** 178 | * Arguments check. 179 | * 180 | * @param sourceName the source name 181 | * @param consumerCount the consumer count 182 | * @param consumerIndex the consumer index 183 | * @param partitions the partitions 184 | */ 185 | protected static void argumentsCheck( 186 | String sourceName, 187 | int consumerCount, 188 | int consumerIndex, 189 | int partitions) { 190 | argumentsCheck(sourceName, consumerCount, consumerIndex); 191 | 192 | Preconditions.checkArgument(partitions > 0, 193 | "Source: [" + sourceName + "], partition count: " + partitions + " must be more than 0."); 194 | Preconditions.checkState(partitions >= consumerCount, 195 | "Source: [" + sourceName + "], partition count: " + partitions + " is less than consumer count: " + 196 | consumerCount + "\n 源表: [" + sourceName + "] 的分区数目 " + partitions + " 小于资源配置文件中的 " + 197 | "Data Source并发度:" + consumerCount + "。请调整资源配置文件中Data Source的并发度不大于" + partitions); 198 | } 199 | 200 | /** 201 | * Arguments check. 202 | * 203 | * @param sourceName the source name 204 | * @param consumerCount the consumer count 205 | * @param consumerIndex the consumer index 206 | */ 207 | protected static void argumentsCheck( 208 | String sourceName, 209 | int consumerCount, 210 | int consumerIndex) { 211 | Preconditions.checkArgument(consumerCount > 0, 212 | "Source: [" + sourceName + "], Consumer count: " + consumerCount + " must be more than 0."); 213 | Preconditions.checkElementIndex(consumerIndex, consumerCount, 214 | "Source: [" + sourceName + "], Consumer index: " + consumerIndex + " is out of range of consumer count: " + consumerCount); 215 | } 216 | 217 | public static T deserializeObject(byte[] bytes) throws IOException, ClassNotFoundException { 218 | ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes)); 219 | T ret = (T) ois.readObject(); 220 | ois.close(); 221 | return ret; 222 | } 223 | 224 | public static T cloneObject(T src) throws IOException, ClassNotFoundException { 225 | return deserializeObject(serializeObject(src)); 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /sls-connector/src/main/java/com/alibaba/flink/connectors/sls/datastream/source/SlsSourceFunction.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.alibaba.flink.connectors.sls.datastream.source; 20 | 21 | import org.apache.flink.api.java.tuple.Tuple2; 22 | import org.apache.flink.configuration.Configuration; 23 | import org.apache.flink.core.io.InputSplit; 24 | 25 | import com.alibaba.flink.connectors.common.reader.RecordReader; 26 | import com.alibaba.flink.connectors.common.source.AbstractDynamicParallelSource; 27 | import com.aliyun.openservices.log.common.LogGroupData; 28 | import com.aliyun.openservices.log.common.Shard; 29 | 30 | import org.slf4j.Logger; 31 | import org.slf4j.LoggerFactory; 32 | 33 | import java.io.IOException; 34 | import java.util.ArrayList; 35 | import java.util.Collections; 36 | import java.util.Comparator; 37 | import java.util.LinkedList; 38 | import java.util.List; 39 | 40 | /** 41 | * SlsSourceFunction. 42 | */ 43 | public class SlsSourceFunction extends AbstractDynamicParallelSource, String> { 44 | private static final long serialVersionUID = 6289824294498842746L; 45 | private static final Logger LOG = LoggerFactory.getLogger(SlsSourceFunction.class); 46 | 47 | public static final String NEW_SLS_START_FLAG = "new_sls_start_flag"; 48 | 49 | protected String endPoint = null; 50 | protected String accessKeyId = null; 51 | protected String accessKeySecret = null; 52 | protected String project = null; 53 | protected String logStore = null; 54 | private String consumerGroup; 55 | private Configuration properties; 56 | protected int maxRetryTime = 3; 57 | private int batchGetSize = 10; 58 | private int startInSec = 0; 59 | private int stopInSec = Integer.MAX_VALUE; 60 | private boolean directMode = false; 61 | private List initShardList = new ArrayList<>(); 62 | 63 | private transient SlsClientProxy clientProxy = null; 64 | 65 | public SlsSourceFunction( 66 | String endPoint, 67 | String accessKeyId, 68 | String accessKeySecret, 69 | String project, 70 | String logStore, 71 | long startInMs, 72 | long stopInMs, 73 | int maxRetryTime, 74 | int batchGetSize, 75 | Configuration properties, 76 | String consumerGroup) throws Exception { 77 | this.endPoint = endPoint; 78 | this.accessKeyId = accessKeyId; 79 | this.accessKeySecret = accessKeySecret; 80 | this.project = project; 81 | this.logStore = logStore; 82 | this.properties = properties; 83 | this.consumerGroup = consumerGroup; 84 | this.maxRetryTime = maxRetryTime; 85 | this.batchGetSize = batchGetSize; 86 | this.startInSec = (int) (startInMs / 1000); 87 | this.stopInSec = 88 | stopInMs / 1000 > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) (stopInMs / 1000); 89 | initShardList(); 90 | init(); 91 | } 92 | 93 | public SlsSourceFunction( 94 | String endPoint, 95 | Configuration properties, 96 | String project, 97 | String logStore, 98 | long startInMs, 99 | long stopInMs, 100 | int maxRetryTime, 101 | int batchGetSize, 102 | String consumerGroup 103 | ) throws Exception { 104 | this.endPoint = endPoint; 105 | this.project = project; 106 | this.logStore = logStore; 107 | this.properties = properties; 108 | this.consumerGroup = consumerGroup; 109 | this.maxRetryTime = maxRetryTime; 110 | this.batchGetSize = batchGetSize; 111 | this.startInSec = (int) (startInMs / 1000); 112 | this.stopInSec = 113 | stopInMs / 1000 > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) (stopInMs / 1000); 114 | initShardList(); 115 | init(); 116 | } 117 | 118 | private SlsClientProxy getClientProxy() { 119 | if (clientProxy == null) { 120 | this.clientProxy = new SlsClientProxy(endPoint, accessKeyId, accessKeySecret, project, logStore, consumerGroup, properties); 121 | this.clientProxy.setDirectMode(directMode); 122 | } 123 | return clientProxy; 124 | } 125 | 126 | private void init() { 127 | /** 128 | * consumerGroup一旦创建后会保存在服务端。 129 | * 首先创建consumerGroup,如果之前已经创建过则比较conuserGroup的信息是否一致, 130 | */ 131 | getClientProxy().ensureConsumerGroupCreated(); 132 | } 133 | 134 | @Override 135 | public RecordReader, String> createReader(Configuration config) throws IOException { 136 | return new SlsRecordReader( 137 | endPoint, 138 | accessKeyId, 139 | project, 140 | logStore, 141 | startInSec, 142 | stopInSec, 143 | maxRetryTime, 144 | batchGetSize, 145 | initShardList, 146 | consumerGroup, 147 | getClientProxy()); 148 | } 149 | 150 | @Override 151 | public InputSplit[] createInputSplitsForCurrentSubTask( 152 | int numberOfParallelSubTasks, int indexOfThisSubTask) throws IOException { 153 | List subscribedPartitions = 154 | modAssign(numberOfParallelSubTasks, indexOfThisSubTask); 155 | 156 | SlsInputSplit[] inputSplits = new SlsInputSplit[subscribedPartitions.size()]; 157 | int i = 0; 158 | for (Shard shard : subscribedPartitions) { 159 | inputSplits[i++] = new SlsInputSplit(shard.GetShardId()); 160 | } 161 | return inputSplits; 162 | } 163 | 164 | @Override 165 | public List> reAssignInputSplitsForCurrentSubTask( 166 | int numberOfParallelSubTasks, int indexOfThisSubTask, List> allSplitsInState) 167 | throws IOException { 168 | List> initialProgess = new ArrayList<>(); 169 | List subscribedPartitions = modAssign(numberOfParallelSubTasks, indexOfThisSubTask); 170 | for (Shard shard : subscribedPartitions) { 171 | boolean existBefore = false; 172 | for (InnerProgress progress: allSplitsInState) { 173 | if (shard.GetShardId() == progress.getInputSplit().getSplitNumber()){ 174 | initialProgess.add(new Tuple2<>(progress.getInputSplit(), progress.getCursor())); 175 | existBefore = true; 176 | break; 177 | } 178 | } 179 | if (!existBefore) { 180 | // 新增加的shardId 标识0为shard的开头 181 | initialProgess.add(Tuple2.of(new SlsInputSplit(shard.GetShardId()), NEW_SLS_START_FLAG)); 182 | } 183 | } 184 | 185 | return initialProgess; 186 | } 187 | 188 | @Override 189 | public List getPartitionList() throws Exception { 190 | List partitions = new ArrayList<>(); 191 | List shards = getClientProxy().listShards(); 192 | for (Shard shard : shards) { 193 | partitions.add("" + shard.GetShardId()); 194 | } 195 | return partitions; 196 | } 197 | 198 | @Override 199 | public void open(Configuration config) throws IOException { 200 | initShardList(); 201 | super.open(config); 202 | LOG.info("Init source succ."); 203 | } 204 | 205 | @Override 206 | public void close() throws IOException { 207 | super.close(); 208 | } 209 | 210 | private void initShardList() { 211 | if (null != initShardList) { 212 | try { 213 | initShardList = getClientProxy().listShards(); 214 | Collections.sort(initShardList, new Comparator() { 215 | @Override 216 | public int compare(Shard o1, Shard o2) { 217 | return o1.GetShardId() - o2.GetShardId(); 218 | } 219 | }); 220 | } catch (Exception e){ 221 | throw new RuntimeException("", e); 222 | } 223 | } 224 | } 225 | 226 | public SlsSourceFunction setDirectMode(boolean directMode) { 227 | if (clientProxy != null) { 228 | clientProxy.setDirectMode(directMode); 229 | } 230 | this.directMode = directMode; 231 | return this; 232 | } 233 | 234 | private List modAssign( 235 | int consumerCount, 236 | int consumerIndex) { 237 | List assignedShards = new LinkedList<>(); 238 | 239 | for (Shard shard: initShardList) { 240 | if (shard.GetShardId() % consumerCount == consumerIndex) { 241 | assignedShards.add(shard); 242 | } 243 | } 244 | return assignedShards; 245 | } 246 | 247 | @Override 248 | public String toString() { 249 | return String.format("Sls Source from %s.%s", project, logStore); 250 | } 251 | } 252 | -------------------------------------------------------------------------------- /aliyun-connectors-common/src/main/java/com/alibaba/flink/connectors/common/sts/AbstractClientProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package com.alibaba.flink.connectors.common.sts; 19 | 20 | import org.apache.flink.configuration.Configuration; 21 | 22 | import org.apache.flink.shaded.guava18.com.google.common.cache.CacheBuilder; 23 | import org.apache.flink.shaded.guava18.com.google.common.cache.CacheLoader; 24 | import org.apache.flink.shaded.guava18.com.google.common.cache.LoadingCache; 25 | 26 | import com.alibaba.flink.connectors.common.conf.BlinkOptions; 27 | import com.aliyuncs.sts.model.v20150401.AssumeRoleResponse; 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | import java.util.concurrent.ExecutionException; 32 | import java.util.concurrent.TimeUnit; 33 | 34 | /** 35 | * A provider class that auto refresh STS temporary account. 36 | * You need implement produceNormalClient which connect remote server with normal aliyun account, and 37 | * implement produceStsClient which connect remote server with STS temporary account. 38 | */ 39 | public abstract class AbstractClientProvider { 40 | private static Logger logger = LoggerFactory.getLogger(AbstractClientProvider.class); 41 | private String accessId = null; 42 | private String accessKey = null; 43 | private String stsRoleArn = null; 44 | private String stsAccessId = null; 45 | private String stsAccessKey = null; 46 | private String stsSessionName = null; 47 | private String stsAssumeRoleFor = null; 48 | private int stsExpireSeconds = 86400; 49 | private boolean useSts = false; 50 | private static LoadingCache cacheArnResponse = null; 51 | private static long lastCertificationUpdateTime = 0; 52 | private static Object mutex = new Object(); 53 | protected T client; 54 | private long lastUpdateTime = 0; 55 | 56 | public AbstractClientProvider(String accessId, String accessKey) { 57 | this.accessId = accessId; 58 | this.accessKey = accessKey; 59 | useSts = false; 60 | } 61 | 62 | /** 63 | * Internal class for sts identity loader. 64 | */ 65 | public static class StsIdentityLoader extends CacheLoader { 66 | 67 | private String stsAccessId; 68 | private String stsAccessKey; 69 | private String stsRoleArn; 70 | private String stsSessionName; 71 | private String stsAssumeRoleFor; 72 | private Configuration properties; 73 | 74 | public StsIdentityLoader(AbstractClientProvider provider, Configuration properties) { 75 | this.stsAccessId = provider.stsAccessId; 76 | this.stsAccessKey = provider.stsAccessKey; 77 | this.stsRoleArn = provider.stsRoleArn; 78 | this.stsSessionName = provider.stsSessionName; 79 | this.stsAssumeRoleFor = provider.stsAssumeRoleFor; 80 | this.properties = new Configuration(); 81 | this.properties.addAll(properties); 82 | } 83 | 84 | @Override 85 | public InnerStsIdentity load(String key) throws Exception { 86 | logger.info("getAssumeRole with para accessId " + stsAccessId + ", secretKey " + 87 | stsAccessKey + ", roleArn " + stsRoleArn + 88 | ", stsSessionName " + stsSessionName); 89 | AssumeRoleResponse role = StsServiceRequest.assumeRoleWithServiceIdentity( 90 | stsAccessId, stsAccessKey, stsRoleArn, stsSessionName, stsAssumeRoleFor, properties); 91 | return new InnerStsIdentity(role.getCredentials().getAccessKeyId(), 92 | role.getCredentials().getAccessKeySecret(), 93 | role.getCredentials().getSecurityToken()); 94 | } 95 | 96 | } 97 | 98 | public AbstractClientProvider(Configuration properties) { 99 | this.stsRoleArn = properties.getString(BlinkOptions.STS.STS_ROLE_ARN); 100 | this.stsAccessId = properties.getString(BlinkOptions.STS.STS_ACCESS_ID); 101 | this.stsAccessKey = properties.getString(BlinkOptions.STS.STS_ACCESS_KEY); 102 | this.stsAssumeRoleFor = properties.getString(BlinkOptions.STS.STS_UID); 103 | this.stsSessionName = String.valueOf(System.currentTimeMillis()); 104 | this.stsExpireSeconds = properties.getInteger(BlinkOptions.STS.STS_ROLEARN_UPDATE_SECONDS); 105 | useSts = true; 106 | synchronized (mutex) { 107 | if (this.cacheArnResponse == null) { 108 | this.cacheArnResponse = CacheBuilder.newBuilder().concurrencyLevel(5).initialCapacity(1).maximumSize(3) 109 | .expireAfterWrite(stsExpireSeconds, TimeUnit.SECONDS).build( 110 | new StsIdentityLoader(this, properties)); 111 | } 112 | } 113 | } 114 | 115 | protected T produceClient() { 116 | T client; 117 | if (useSts) { 118 | InnerStsIdentity role = null; 119 | try { 120 | lastUpdateTime = System.currentTimeMillis(); 121 | role = cacheArnResponse.get(StsConstants.STS_ROLE_RESPONSE_KEY); 122 | } catch (ExecutionException e) { 123 | logger.info("catched ExecutionException, maybe too much call", e); 124 | throw new RuntimeException(e); 125 | } 126 | if (null == role) { 127 | throw new RuntimeException("failed to get sts identify!"); 128 | } 129 | client = produceStsClient( 130 | role.getAccessKeyId(), 131 | role.getAccessKeySecret(), 132 | role.getSecurityToken()); 133 | } else { 134 | client = produceNormalClient(accessId, accessKey); 135 | } 136 | 137 | return client; 138 | } 139 | 140 | /** 141 | * getClient with some option. 142 | * 143 | * @param forceReconnect If this param is true, provider will produce new client and aliyun account will not change. 144 | * @param forceRefresh If this param is true and current account is generated by STS, provider will request new STS account and produce 145 | * new client. If this param is true and current account is normal account, provider will just produce new client. 146 | * @return return client. 147 | */ 148 | public T getClient(boolean forceReconnect, boolean forceRefresh) { 149 | long nowTime = System.currentTimeMillis(); 150 | if (useSts) { 151 | if (forceRefresh || lastCertificationUpdateTime == 0 152 | || ((nowTime - lastCertificationUpdateTime) > (stsExpireSeconds * 1000))) { 153 | synchronized (mutex){ 154 | if (forceRefresh || lastCertificationUpdateTime == 0 155 | || ((nowTime - lastCertificationUpdateTime) > (stsExpireSeconds * 1000))) { 156 | lastCertificationUpdateTime = nowTime; 157 | cacheArnResponse.invalidate(StsConstants.STS_ROLE_RESPONSE_KEY); 158 | } 159 | } 160 | closeClient(); 161 | client = produceClient(); 162 | return client; 163 | } else if (forceReconnect || lastUpdateTime < lastCertificationUpdateTime || 164 | client == null) { 165 | closeClient(); 166 | client = produceClient(); 167 | return client; 168 | } else { 169 | return client; 170 | } 171 | } else { 172 | if (forceRefresh || forceReconnect || client == null) { 173 | closeClient(); 174 | client = produceClient(); 175 | return client; 176 | } else { 177 | return client; 178 | } 179 | } 180 | } 181 | 182 | public T getClient(boolean forceReconnect) { 183 | return getClient(forceReconnect, false); 184 | } 185 | 186 | public T getClient() { 187 | return getClient(false, false); 188 | } 189 | 190 | protected void setClientNull() { 191 | client = null; 192 | } 193 | 194 | protected abstract void closeClient(); 195 | 196 | protected abstract T produceNormalClient(String accessId, String accessKey); 197 | 198 | protected abstract T produceStsClient(String accessId, String accessKey, String securityToken); 199 | 200 | /** 201 | * InnerStsIdentity. 202 | */ 203 | public static class InnerStsIdentity { 204 | public String accessKeyId; 205 | public String accessKeySecret; 206 | public String securityToken; 207 | public String expiration; 208 | public String roleArn; 209 | 210 | public InnerStsIdentity() { 211 | } 212 | 213 | public InnerStsIdentity(String accessKeyId, String accessKeySecret, String securityToken) { 214 | this.accessKeyId = accessKeyId; 215 | this.accessKeySecret = accessKeySecret; 216 | this.securityToken = securityToken; 217 | } 218 | 219 | public InnerStsIdentity setAccessKeyId(String accessKeyId) { 220 | this.accessKeyId = accessKeyId; 221 | return this; 222 | } 223 | 224 | public InnerStsIdentity setAccessKeySecret(String accessKeySecret) { 225 | this.accessKeySecret = accessKeySecret; 226 | return this; 227 | } 228 | 229 | public InnerStsIdentity setSecurityToken(String securityToken) { 230 | this.securityToken = securityToken; 231 | return this; 232 | } 233 | 234 | public InnerStsIdentity setExpiration(String expiration) { 235 | this.expiration = expiration; 236 | return this; 237 | } 238 | 239 | public InnerStsIdentity setRoleArn(String roleArn) { 240 | this.roleArn = roleArn; 241 | return this; 242 | } 243 | 244 | public String getAccessKeyId() { 245 | return accessKeyId; 246 | } 247 | 248 | public String getAccessKeySecret() { 249 | return accessKeySecret; 250 | } 251 | 252 | public String getSecurityToken() { 253 | return securityToken; 254 | } 255 | 256 | public String getExpiration() { 257 | return expiration; 258 | } 259 | 260 | public String getRoleArn() { 261 | return roleArn; 262 | } 263 | } 264 | } 265 | --------------------------------------------------------------------------------