├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE.md
└── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── NOTICE
├── README.md
├── THIRD_PARTY_LICENSES
├── build.gradle
├── build.sh
├── build_using_gradle.sh
├── checkstyle.xml
├── lib
├── aws-java-sdk-redshift-arcadia-internal-1.0.jar
└── aws-java-sdk-redshift-internal-1.12.x.jar
├── pom.xml
├── settings.gradle
└── src
├── assembly
├── assembly.xml
└── dist.xml
└── main
├── checkstyle
├── checks.xml
├── pgjdbc-eclipse-java-google-style.xml
├── pgjdbc-intellij-java-google-style.xml
├── pgjdbc.importorder
└── suppressions.xml
├── feature
└── feature.xml
├── java
└── com
│ └── amazon
│ └── redshift
│ ├── AuthMech.java
│ ├── CredentialsHolder.java
│ ├── Driver.java
│ ├── INativePlugin.java
│ ├── IPlugin.java
│ ├── NativeTokenHolder.java
│ ├── RedshiftConnection.java
│ ├── RedshiftNotification.java
│ ├── RedshiftProperty.java
│ ├── RedshiftRefCursorResultSet.java
│ ├── RedshiftResultSetMetaData.java
│ ├── RedshiftStatement.java
│ ├── copy
│ ├── CopyDual.java
│ ├── CopyIn.java
│ ├── CopyManager.java
│ ├── CopyOperation.java
│ ├── CopyOut.java
│ ├── RedshiftCopyInputStream.java
│ └── RedshiftCopyOutputStream.java
│ ├── core
│ ├── BaseConnection.java
│ ├── BaseQueryKey.java
│ ├── BaseStatement.java
│ ├── ByteOptimizedUTF8Encoder.java
│ ├── CachedQuery.java
│ ├── CachedQueryCreateAction.java
│ ├── CallableQueryKey.java
│ ├── CharOptimizedUTF8Encoder.java
│ ├── CommandCompleteParser.java
│ ├── CompressedInputStream.java
│ ├── ConnectionFactory.java
│ ├── Encoding.java
│ ├── EncodingPredictor.java
│ ├── Field.java
│ ├── FixedLengthOutputStream.java
│ ├── IamHelper.java
│ ├── IdpAuthHelper.java
│ ├── JavaVersion.java
│ ├── JdbcCallParseInfo.java
│ ├── NativeAuthPluginHelper.java
│ ├── NativeQuery.java
│ ├── Notification.java
│ ├── Oid.java
│ ├── OptimizedUTF8Encoder.java
│ ├── ParameterList.java
│ ├── Parser.java
│ ├── PluginProfilesConfigFile.java
│ ├── Provider.java
│ ├── Query.java
│ ├── QueryExecutor.java
│ ├── QueryExecutorBase.java
│ ├── QueryWithReturningColumnsKey.java
│ ├── RedshiftBindException.java
│ ├── RedshiftJDBCSettings.java
│ ├── RedshiftStream.java
│ ├── ReplicationProtocol.java
│ ├── ResultCursor.java
│ ├── ResultHandler.java
│ ├── ResultHandlerBase.java
│ ├── ResultHandlerDelegate.java
│ ├── ServerVersion.java
│ ├── ServerlessIamHelper.java
│ ├── SetupQueryRunner.java
│ ├── SocketFactoryFactory.java
│ ├── SqlCommand.java
│ ├── SqlCommandType.java
│ ├── TransactionState.java
│ ├── Tuple.java
│ ├── TypeInfo.java
│ ├── Utils.java
│ ├── Version.java
│ ├── VisibleBufferedInputStream.java
│ └── v3
│ │ ├── BatchedQuery.java
│ │ ├── CompositeParameterList.java
│ │ ├── CompositeQuery.java
│ │ ├── ConnectionFactoryImpl.java
│ │ ├── CopyDualImpl.java
│ │ ├── CopyInImpl.java
│ │ ├── CopyOperationImpl.java
│ │ ├── CopyOutImpl.java
│ │ ├── CopyQueryExecutor.java
│ │ ├── DescribeRequest.java
│ │ ├── ExecuteRequest.java
│ │ ├── MessageLoopState.java
│ │ ├── Portal.java
│ │ ├── QueryExecutorImpl.java
│ │ ├── RedshiftMemoryUtils.java
│ │ ├── RedshiftRowsBlockingQueue.java
│ │ ├── SimpleParameterList.java
│ │ ├── SimpleQuery.java
│ │ ├── TypeTransferModeRegistry.java
│ │ ├── V3ParameterList.java
│ │ └── replication
│ │ ├── V3RedshiftReplicationStream.java
│ │ └── V3ReplicationProtocol.java
│ ├── ds
│ ├── RedshiftConnectionPoolDataSource.java
│ ├── RedshiftPooledConnection.java
│ ├── RedshiftPoolingDataSource.java
│ ├── RedshiftSimpleDataSource.java
│ └── common
│ │ ├── BaseDataSource.java
│ │ └── RedshiftObjectFactory.java
│ ├── fastpath
│ ├── Fastpath.java
│ └── FastpathArg.java
│ ├── geometric
│ ├── RedshiftBox.java
│ ├── RedshiftCircle.java
│ ├── RedshiftLine.java
│ ├── RedshiftLseg.java
│ ├── RedshiftPath.java
│ ├── RedshiftPoint.java
│ └── RedshiftPolygon.java
│ ├── gss
│ ├── GSSCallbackHandler.java
│ ├── GssAction.java
│ └── MakeGSS.java
│ ├── hostchooser
│ ├── CandidateHost.java
│ ├── GlobalHostStatusTracker.java
│ ├── HostChooser.java
│ ├── HostChooserFactory.java
│ ├── HostRequirement.java
│ ├── HostStatus.java
│ ├── MultiHostChooser.java
│ └── SingleHostChooser.java
│ ├── httpclient
│ └── log
│ │ └── IamCustomLogFactory.java
│ ├── jdbc
│ ├── AbstractBlobClob.java
│ ├── AutoSave.java
│ ├── BatchResultHandler.java
│ ├── BooleanTypeUtil.java
│ ├── CallableBatchResultHandler.java
│ ├── DataSource.java
│ ├── Driver.java
│ ├── EscapeSyntaxCallMode.java
│ ├── EscapedFunctions.java
│ ├── EscapedFunctions2.java
│ ├── FieldMetadata.java
│ ├── MetadataAPIHelper.java
│ ├── MetadataAPIPostProcessing.java
│ ├── MetadataServerAPIHelper.java
│ ├── PreferQueryMode.java
│ ├── PrimitiveArraySupport.java
│ ├── RedshiftArray.java
│ ├── RedshiftBlob.java
│ ├── RedshiftCallableStatement.java
│ ├── RedshiftClob.java
│ ├── RedshiftConnectionImpl.java
│ ├── RedshiftDatabaseMetaData.java
│ ├── RedshiftParameterMetaData.java
│ ├── RedshiftPreparedStatement.java
│ ├── RedshiftResultSet.java
│ ├── RedshiftResultSetMetaDataImpl.java
│ ├── RedshiftSQLXML.java
│ ├── RedshiftSavepoint.java
│ ├── RedshiftStatementImpl.java
│ ├── RedshiftWarningWrapper.java
│ ├── ResultWrapper.java
│ ├── SslMode.java
│ ├── StatementCancelState.java
│ ├── TimestampUtils.java
│ ├── TypeInfoCache.java
│ └── UUIDArrayAssistant.java
│ ├── jdbc2
│ ├── ArrayAssistant.java
│ ├── ArrayAssistantRegistry.java
│ └── optional
│ │ ├── ConnectionPool.java
│ │ ├── PoolingDataSource.java
│ │ └── SimpleDataSource.java
│ ├── jdbc3
│ ├── Jdbc3ConnectionPool.java
│ ├── Jdbc3PoolingDataSource.java
│ └── Jdbc3SimpleDataSource.java
│ ├── jdbc42
│ ├── DataSource.java
│ └── Driver.java
│ ├── jre7
│ └── sasl
│ │ └── ScramAuthenticator.java
│ ├── largeobject
│ ├── BlobInputStream.java
│ ├── BlobOutputStream.java
│ ├── LargeObject.java
│ └── LargeObjectManager.java
│ ├── logger
│ ├── LogConsoleHandler.java
│ ├── LogFileHandler.java
│ ├── LogHandler.java
│ ├── LogLevel.java
│ ├── LogWriterHandler.java
│ └── RedshiftLogger.java
│ ├── osgi
│ ├── RedshiftBundleActivator.java
│ └── RedshiftDataSourceFactory.java
│ ├── plugin
│ ├── AdfsCredentialsProvider.java
│ ├── AzureCredentialsProvider.java
│ ├── BasicJwtCredentialsProvider.java
│ ├── BasicNativeSamlCredentialsProvider.java
│ ├── BasicSamlCredentialsProvider.java
│ ├── BrowserAzureCredentialsProvider.java
│ ├── BrowserAzureOAuth2CredentialsProvider.java
│ ├── BrowserIdcAuthPlugin.java
│ ├── BrowserOktaSAMLCredentialsProvider.java
│ ├── BrowserSamlCredentialsProvider.java
│ ├── CommonCredentialsProvider.java
│ ├── IdpCredentialsProvider.java
│ ├── IdpTokenAuthPlugin.java
│ ├── InternalPluginException.java
│ ├── JwtCredentialsProvider.java
│ ├── OktaCredentialsProvider.java
│ ├── PingCredentialsProvider.java
│ ├── SamlCredentialsProvider.java
│ ├── httpserver
│ │ ├── InternalServerException.java
│ │ ├── InvalidHttpRequestHandler.java
│ │ ├── RequestHandler.java
│ │ ├── Server.java
│ │ └── ValidHttpRequestHandler.java
│ ├── log-factory.properties
│ └── utils
│ │ ├── CheckUtils.java
│ │ ├── RandomStateUtil.java
│ │ ├── RequestUtils.java
│ │ └── ResponseUtils.java
│ ├── replication
│ ├── LogSequenceNumber.java
│ ├── RedshiftReplicationConnection.java
│ ├── RedshiftReplicationConnectionImpl.java
│ ├── RedshiftReplicationStream.java
│ ├── ReplicationSlotInfo.java
│ ├── ReplicationType.java
│ └── fluent
│ │ ├── AbstractCreateSlotBuilder.java
│ │ ├── AbstractStreamBuilder.java
│ │ ├── ChainedCommonCreateSlotBuilder.java
│ │ ├── ChainedCommonStreamBuilder.java
│ │ ├── ChainedCreateReplicationSlotBuilder.java
│ │ ├── ChainedStreamBuilder.java
│ │ ├── CommonOptions.java
│ │ ├── ReplicationCreateSlotBuilder.java
│ │ ├── ReplicationStreamBuilder.java
│ │ ├── logical
│ │ ├── ChainedLogicalCreateSlotBuilder.java
│ │ ├── ChainedLogicalStreamBuilder.java
│ │ ├── LogicalCreateSlotBuilder.java
│ │ ├── LogicalReplicationOptions.java
│ │ ├── LogicalStreamBuilder.java
│ │ └── StartLogicalReplicationCallback.java
│ │ └── physical
│ │ ├── ChainedPhysicalCreateSlotBuilder.java
│ │ ├── ChainedPhysicalStreamBuilder.java
│ │ ├── PhysicalCreateSlotBuilder.java
│ │ ├── PhysicalReplicationOptions.java
│ │ ├── PhysicalStreamBuilder.java
│ │ └── StartPhysicalReplicationCallback.java
│ ├── ssl
│ ├── DbKeyStoreSocketFactory.java
│ ├── DefaultJavaSSLFactory.java
│ ├── LazyKeyManager.java
│ ├── LibPQFactory.java
│ ├── MakeSSL.java
│ ├── NonValidatingFactory.java
│ ├── PKCS12KeyManager.java
│ ├── RedshiftjdbcHostnameVerifier.java
│ ├── SingleCertValidatingFactory.java
│ ├── WrappedFactory.java
│ └── jdbc4
│ │ └── LibPQFactory.java
│ ├── sspi
│ ├── ISSPIClient.java
│ ├── NTDSAPI.java
│ ├── NTDSAPIWrapper.java
│ └── SSPIClient.java
│ ├── translation
│ ├── bg.po
│ ├── cs.po
│ ├── de.po
│ ├── es.po
│ ├── fr.po
│ ├── it.po
│ ├── ja.po
│ ├── messages.pot
│ ├── messages_bg.java
│ ├── messages_cs.java
│ ├── messages_de.java
│ ├── messages_es.java
│ ├── messages_fr.java
│ ├── messages_it.java
│ ├── messages_ja.java
│ ├── messages_nl.java
│ ├── messages_pl.java
│ ├── messages_pt_BR.java
│ ├── messages_ru.java
│ ├── messages_sr.java
│ ├── messages_tr.java
│ ├── messages_zh_CN.java
│ ├── messages_zh_TW.java
│ ├── nl.po
│ ├── pl.po
│ ├── pt_BR.po
│ ├── ru.po
│ ├── sr.po
│ ├── tr.po
│ ├── zh_CN.po
│ └── zh_TW.po
│ ├── util
│ ├── Base64.java
│ ├── ByteBufferByteStreamWriter.java
│ ├── ByteConverter.java
│ ├── ByteStreamWriter.java
│ ├── CanEstimateSize.java
│ ├── DriverInfo.java
│ ├── ExpressionProperties.java
│ ├── ExtensibleDigest.java
│ ├── GT.java
│ ├── Gettable.java
│ ├── GettableHashMap.java
│ ├── HStoreConverter.java
│ ├── HostSpec.java
│ ├── IniFile.java
│ ├── JdbcBlackHole.java
│ ├── LruCache.java
│ ├── MD5Digest.java
│ ├── ObjectFactory.java
│ ├── QuerySanitizer.java
│ ├── ReaderInputStream.java
│ ├── RedshiftBinaryObject.java
│ ├── RedshiftByteTypes.java
│ ├── RedshiftBytea.java
│ ├── RedshiftConstants.java
│ ├── RedshiftException.java
│ ├── RedshiftGeography.java
│ ├── RedshiftGeometry.java
│ ├── RedshiftInterval.java
│ ├── RedshiftIntervalDayToSecond.java
│ ├── RedshiftIntervalYearToMonth.java
│ ├── RedshiftJDBCMain.java
│ ├── RedshiftMoney.java
│ ├── RedshiftObject.java
│ ├── RedshiftProperties.java
│ ├── RedshiftPropertyMaxResultBufferParser.java
│ ├── RedshiftState.java
│ ├── RedshiftTime.java
│ ├── RedshiftTimestamp.java
│ ├── RedshiftTokenizer.java
│ ├── RedshiftVarbyte.java
│ ├── RedshiftWarning.java
│ ├── ServerErrorMessage.java
│ ├── SharedTimer.java
│ ├── StreamWrapper.java
│ └── URLCoder.java
│ └── xa
│ ├── RecoveredXid.java
│ ├── RedshiftXAConnection.java
│ ├── RedshiftXADataSource.java
│ ├── RedshiftXADataSourceFactory.java
│ └── RedshiftXAException.java
├── resources
├── META-INF
│ ├── LICENSE
│ └── services
│ │ └── java.sql.Driver
├── com
│ └── amazon
│ │ └── redshift
│ │ └── plugin
│ │ └── adfs.exe
└── redshift_jdbc_driver.properties
└── test
└── java
└── .gitkeep
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | @aws/amazon-redshift-devs
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Issue report
3 | about: Report an issue
4 | title: ''
5 | assignees: ''
6 |
7 | ---
8 |
9 | ## Driver version
10 |
11 |
12 | ## Redshift version
13 |
14 |
15 | ## Client Operating System
16 |
17 |
18 | ## JAVA/JVM version
19 |
20 |
21 | ## Table schema
22 |
23 |
24 | ## Problem description
25 |
26 | 1. Expected behaviour:
27 | 2. Actual behaviour:
28 | 3. Error message/stack trace:
29 | 4. Any other details that can be helpful:
30 |
31 | ## JDBC trace logs
32 |
33 |
34 | ## Reproduction code
35 |
36 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## Description
4 |
5 |
6 | ## Motivation and Context
7 |
8 |
9 |
10 | ## Testing
11 |
12 |
13 |
14 |
15 | ## Screenshots (if appropriate)
16 |
17 | ## Types of changes
18 |
19 | - [ ] Bug fix (non-breaking change which fixes an issue)
20 | - [ ] New feature (non-breaking change which adds functionality)
21 |
22 | ## Checklist
23 |
24 |
25 |
26 | - [ ] Local run of `mvn install` succeeds
27 | - [ ] My code follows the code style of this project
28 | - [ ] My change requires a change to the Javadoc documentation
29 | - [ ] I have updated the Javadoc documentation accordingly
30 | - [ ] I have read the **README** document
31 | - [ ] I have added tests to cover my changes
32 | - [ ] All new and existing tests passed
33 | - [ ] A short description of the change has been added to the **CHANGELOG**
34 |
35 |
36 | ## License
37 |
38 |
39 |
40 | - By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.
41 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | .gitchangelog.rc
3 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Code of Conduct
2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
4 | opensource-codeofconduct@amazon.com with any additional questions or comments.
5 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Redshift JDBC Driver
2 |
3 | The Amazon JDBC Driver for Redshift is a Type 4 JDBC driver that provides database connectivity through the standard JDBC application program interfaces (APIs) available in the Java Platform, Enterprise Editions. The Driver provides access to Redshift from any Java application, application server, or Java-enabled applet.
4 |
5 | The driver has many Redshift specific features such as,
6 |
7 | * IAM authentication
8 | * IDP authentication
9 | * Redshift specific datatypes support
10 | * External schema support as part of getTables() and getColumns() JDBC API
11 |
12 | The driver supports JDBC 4.2 specification.
13 |
14 | [](https://maven-badges.herokuapp.com/maven-central/com.amazon.redshift/redshift-jdbc42)
15 | [](https://javadoc.io/doc/com.amazon.redshift/redshift-jdbc42)
16 |
17 | ## Build Driver
18 | ### Prerequisites
19 | * JDK8
20 | * Maven 3.x
21 | * Redshift instance connect to.
22 |
23 | ### Build Artifacts
24 | On Unix system run:
25 | ```
26 | build.sh
27 | ```
28 | It builds **redshift-jdbc42-{version}.jar** and **redshift-jdbc42-{version}.zip** files under **target** directory.
29 | The jar file is the Redshift JDBC driver.The zip file contains the driver jar file and all required dependencies files to use AWS SDK for the IDP/IAM features.
30 |
31 | ### Installation and Configuration of Driver
32 |
33 | See [Amazon Redshift JDBC Driver Installation and Configuration Guide](https://docs.aws.amazon.com/redshift/latest/mgmt/jdbc20-install.html) for more information.
34 |
35 | Here are download links for the latest release:
36 |
37 | * https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/2.1.0.33/redshift-jdbc42-2.1.0.33.jar
38 | * https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/2.1.0.33/redshift-jdbc42-2.1.0.33.zip
39 |
40 | It also available on Maven Central, groupId: com.amazon.redshift and artifactId: redshift-jdbc42.
41 |
42 | ## Report Bugs
43 |
44 | See [CONTRIBUTING](CONTRIBUTING.md#Reporting-Bugs/Feature-Requests) for more information.
45 |
46 | ## Contributing Code Development
47 |
48 | See [CONTRIBUTING](CONTRIBUTING.md#Contributing-via-Pull-Requests) for more information.
49 |
50 | ## Changelog Generation
51 | An entry in the changelog is generated upon release using `gitchangelog `.
52 | Please use the configuration file, ``.gitchangelog.rc`` when generating the changelog.
53 |
54 | ## Security
55 |
56 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information.
57 |
58 | ## License
59 |
60 | This project is licensed under the Apache-2.0 License.
61 |
62 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # One option argument is version number.
4 | # When version needs to change, please provide version in n.n.n.n format. e.g.
5 | # ./build.sh 2.3.4.5
6 |
7 | check_version()
8 | {
9 | MAJOR=$(echo $FULL_VERSION | cut -d'.' -f1)
10 | MINOR=$(echo $FULL_VERSION | cut -d'.' -f2)
11 | PATCH=$(echo $FULL_VERSION | cut -d'.' -f3)
12 | RELEASE=$(echo $FULL_VERSION | cut -d'.' -f4)
13 | EXTRA=$(echo $FULL_VERSION | cut -d'.' -f5)
14 |
15 | if [ "$MAJOR" == "" ] || [ "$MINOR" == "" ] || [ "$PATCH" == "" ] || [ "$RELEASE" == "" ] || [ "$EXTRA" != "" ];
16 | then
17 | echo Invalid version format: $FULL_VERSION. please give it in n.n.n.n format.
18 | exit 1
19 | fi
20 | }
21 |
22 | if [ "$1" != "" ];
23 | then
24 | # check version
25 | FULL_VERSION=$1
26 | check_version
27 | echo "version=${FULL_VERSION}" >|./src/main/resources/redshift_jdbc_driver.properties
28 | mvn versions:set -DartifactId=redshift-jdbc42 -DnewVersion=$1
29 | mvn versions:commit
30 | fi
31 |
32 | #mvn install:install-file -Dfile=lib/aws-java-sdk-redshift-internal-1.12.x.jar -DgroupId=com.amazonaws -DartifactId=aws-java-sdk-redshift-internal -Dversion=1.12.x -Dpackaging=jar -DgeneratePom=true
33 |
34 | #mvn install:install-file -Dfile=lib/aws-java-sdk-redshift-arcadia-internal-1.0.jar -DgroupId=com.amazonaws -DartifactId=aws-java-sdk-redshift-arcadia-internal -Dversion=1.0 -Dpackaging=jar -DgeneratePom=true
35 |
36 | #mvn install:install-file -Dfile=lib/AWSSSOOIDCJavaClient-1.12.x.jar -DgroupId=com.amazonaws -DartifactId=aws-java-sdk-ssooidc -Dversion=1.12.x -Dpackaging=jar -DgeneratePom=true
37 |
38 | mvn -P release-artifacts clean install
39 |
--------------------------------------------------------------------------------
/build_using_gradle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # One option argument is version number.
4 | # When version needs to change, please provide version in n.n.n.n format. e.g.
5 | # ./build.sh 2.3.4.5
6 |
7 | check_version()
8 | {
9 | MAJOR=$(echo $FULL_VERSION | cut -d'.' -f1)
10 | MINOR=$(echo $FULL_VERSION | cut -d'.' -f2)
11 | PATCH=$(echo $FULL_VERSION | cut -d'.' -f3)
12 | RELEASE=$(echo $FULL_VERSION | cut -d'.' -f4)
13 | EXTRA=$(echo $FULL_VERSION | cut -d'.' -f5)
14 |
15 | if [ "$MAJOR" == "" ] || [ "$MINOR" == "" ] || [ "$PATCH" == "" ] || [ "$RELEASE" == "" ] || [ "$EXTRA" != "" ];
16 | then
17 | echo Invalid version format: $FULL_VERSION. please give it in n.n.n.n format.
18 | exit 1
19 | fi
20 | }
21 |
22 | FULL_VERSION="2.0.0.0"
23 |
24 | if [ "$1" != "" ];
25 | then
26 | # check version
27 | FULL_VERSION=$1
28 | check_version
29 | fi
30 |
31 | ./gradlew -PbuildVersion=$FULL_VERSION clean build javadoc
32 |
33 |
34 |
--------------------------------------------------------------------------------
/checkstyle.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/lib/aws-java-sdk-redshift-arcadia-internal-1.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/amazon-redshift-jdbc-driver/635b66e99d4ac70415c99d020ed2da330dcc0c6d/lib/aws-java-sdk-redshift-arcadia-internal-1.0.jar
--------------------------------------------------------------------------------
/lib/aws-java-sdk-redshift-internal-1.12.x.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/amazon-redshift-jdbc-driver/635b66e99d4ac70415c99d020ed2da330dcc0c6d/lib/aws-java-sdk-redshift-internal-1.12.x.jar
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | /*
2 | * This file was generated by the Gradle 'init' task.
3 | */
4 |
5 | rootProject.name = 'redshift-jdbc42'
6 |
--------------------------------------------------------------------------------
/src/assembly/assembly.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | release
6 |
7 | zip
8 |
9 |
10 |
11 |
12 | com.github.waffle:*
13 | org.osgi:*
14 | com.ongres.scram:*
15 | com.ongres.stringprep:*
16 | net.java.dev.jna:*
17 | org.slf4j:*
18 | com.github.ben-manes.caffeine:*
19 | org.junit.jupiter:*
20 | org.junit.platform:*
21 | org.opentest4j:*
22 | org.apiguardian:*
23 |
24 | false
25 | compile
26 | true
27 | true
28 | /
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/src/assembly/dist.xml:
--------------------------------------------------------------------------------
1 |
4 | dist
5 |
6 | tar.gz
7 |
8 |
9 |
10 | ${project.basedir}
11 |
12 | README*
13 | LICENSE*
14 | NOTICE*
15 | pom.xml
16 |
17 | true
18 |
19 |
20 | ${project.basedir}/src
21 | true
22 |
23 |
24 | ${project.build.directory}/apidocs
25 | doc
26 |
27 |
28 | ${project.build.directory}
29 | lib
30 |
31 | *.jar
32 |
33 |
34 |
35 |
36 |
37 | lib
38 | false
39 | false
40 | runtime
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/src/main/checkstyle/pgjdbc.importorder:
--------------------------------------------------------------------------------
1 | #Organize Import Order
2 | #Mon Dec 28 23:58:04 MSK 2015
3 | 4=java
4 | 3=javax
5 | 2=
6 | 1=org.postgresql
7 | 0=\#
8 |
--------------------------------------------------------------------------------
/src/main/checkstyle/suppressions.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/src/main/feature/feature.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | transaction-api
5 |
6 |
7 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/AuthMech.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift;
2 |
3 | /**
4 | * Provided authentication mechanism type enum.
5 | */
6 | public enum AuthMech
7 | {
8 | /**
9 | * Indicates the mechanism type is non-SSL.
10 | */
11 | DISABLE,
12 |
13 | /**
14 | * Indicates that the mechanism type is using non-SSL first and then SSL if non-SSL fails.
15 | */
16 | ALLOW,
17 |
18 | /**
19 | * Indicates that the mechanism type is using SSL first and then non-SSL if SSL fails.
20 | */
21 | PREFER,
22 |
23 | /**
24 | * Indicates the mechanism type is using SSL.
25 | */
26 | REQUIRE,
27 |
28 | /**
29 | * Indicates the mechanism type is using SSL and verify the trusted certificate authority.
30 | */
31 | VERIFY_CA,
32 |
33 | /**
34 | * Indicates the mechanism type is using SSL and verify the trusted certificate authority and
35 | * the server hostname
36 | */
37 | VERIFY_FULL;
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/INativePlugin.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift;
2 |
3 | import com.amazon.redshift.logger.RedshiftLogger;
4 | import com.amazon.redshift.util.RedshiftException;
5 |
6 | public interface INativePlugin
7 | {
8 | void addParameter(String key, String value);
9 | void setLogger(RedshiftLogger log);
10 | String getPluginSpecificCacheKey();
11 | String getIdpToken() throws RedshiftException;
12 | String getCacheKey();
13 |
14 | NativeTokenHolder getCredentials() throws RedshiftException;
15 | void refresh() throws RedshiftException;
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/IPlugin.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift;
2 |
3 | import com.amazon.redshift.logger.RedshiftLogger;
4 | import com.amazonaws.auth.AWSCredentialsProvider;
5 |
6 | public interface IPlugin extends AWSCredentialsProvider
7 | {
8 | void addParameter(String key, String value);
9 | void setLogger(RedshiftLogger log);
10 | String getPluginSpecificCacheKey();
11 | void setGroupFederation(boolean groupFederation);
12 | String getIdpToken();
13 | String getCacheKey();
14 | int getSubType();
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/NativeTokenHolder.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift;
2 |
3 | import com.amazon.redshift.plugin.utils.RequestUtils;
4 |
5 | import java.util.Date;
6 |
7 |
8 | public class NativeTokenHolder {
9 | protected String m_accessToken;
10 | private Date m_expiration;
11 | private boolean refresh; // true means newly added, false means from cache.
12 |
13 | protected NativeTokenHolder(String accessToken)
14 | {
15 | this(accessToken, new Date(System.currentTimeMillis() + 15 * 60 * 1000));
16 | }
17 |
18 | protected NativeTokenHolder(String accessToken, Date expiration)
19 | {
20 | this.m_accessToken = accessToken;
21 | this.m_expiration = expiration;
22 | }
23 |
24 | public static NativeTokenHolder newInstance(String accessToken)
25 | {
26 | return new NativeTokenHolder(accessToken);
27 | }
28 |
29 | public static NativeTokenHolder newInstance(String accessToken, Date expiration)
30 | {
31 | return new NativeTokenHolder(accessToken, expiration);
32 | }
33 |
34 | public boolean isExpired()
35 | {
36 | return RequestUtils.isCredentialExpired(m_expiration);
37 | }
38 |
39 | public String getAccessToken()
40 | {
41 | return m_accessToken;
42 | }
43 |
44 | public Date getExpiration()
45 | {
46 | return m_expiration;
47 | }
48 |
49 | public void setRefresh(boolean flag)
50 | {
51 | refresh = flag;
52 | }
53 |
54 | public boolean isRefresh()
55 | {
56 | return refresh;
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/RedshiftNotification.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift;
7 |
8 | /**
9 | * This interface defines the public Redshift extension for Notifications.
10 | */
11 | public interface RedshiftNotification {
12 | /**
13 | * Returns name of this notification.
14 | *
15 | * @return name of this notification
16 | * @since 7.3
17 | */
18 | String getName();
19 |
20 | /**
21 | * Returns the process id of the backend process making this notification.
22 | *
23 | * @return process id of the backend process making this notification
24 | * @since 7.3
25 | */
26 | int getPID();
27 |
28 | /**
29 | * Returns additional information from the notifying process. This feature has only been
30 | * implemented in server versions 9.0 and later, so previous versions will always return an empty
31 | * String.
32 | *
33 | * @return additional information from the notifying process
34 | * @since 8.0
35 | */
36 | String getParameter();
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/RedshiftRefCursorResultSet.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift;
7 |
8 | /**
9 | * A ref cursor based result set.
10 | *
11 | * @deprecated As of 8.0, this interface is only present for backwards- compatibility purposes. New
12 | * code should call getString() on the ResultSet that contains the refcursor to obtain
13 | * the underlying cursor name.
14 | */
15 | @Deprecated
16 | public interface RedshiftRefCursorResultSet {
17 |
18 | /**
19 | * @return the name of the cursor.
20 | * @deprecated As of 8.0, replaced with calling getString() on the ResultSet that this ResultSet
21 | * was obtained from.
22 | */
23 | @Deprecated
24 | String getRefCursor();
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/RedshiftResultSetMetaData.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift;
7 |
8 | import com.amazon.redshift.core.Field;
9 |
10 | import java.sql.SQLException;
11 |
12 | public interface RedshiftResultSetMetaData {
13 |
14 | /**
15 | * Returns the underlying column name of a query result, or "" if it is unable to be determined.
16 | *
17 | * @param column column position (1-based)
18 | * @return underlying column name of a query result
19 | * @throws SQLException if something wrong happens
20 | * @since 8.0
21 | */
22 | String getBaseColumnName(int column) throws SQLException;
23 |
24 | /**
25 | * Returns the underlying table name of query result, or "" if it is unable to be determined.
26 | *
27 | * @param column column position (1-based)
28 | * @return underlying table name of query result
29 | * @throws SQLException if something wrong happens
30 | * @since 8.0
31 | */
32 | String getBaseTableName(int column) throws SQLException;
33 |
34 | /**
35 | * Returns the underlying schema name of query result, or "" if it is unable to be determined.
36 | *
37 | * @param column column position (1-based)
38 | * @return underlying schema name of query result
39 | * @throws SQLException if something wrong happens
40 | * @since 8.0
41 | */
42 | String getBaseSchemaName(int column) throws SQLException;
43 |
44 | /**
45 | * Is a column Text or Binary?
46 | *
47 | * @param column column position (1-based)
48 | * @return 0 if column data foramt is TEXT, or 1 if BINARY
49 | * @throws SQLException if something wrong happens
50 | * @see Field#BINARY_FORMAT
51 | * @see Field#TEXT_FORMAT
52 | * @since 9.4
53 | */
54 | int getFormat(int column) throws SQLException;
55 | }
56 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/copy/CopyDual.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.copy;
7 |
8 | /**
9 | * Bidirectional via copy stream protocol. Via bidirectional copy protocol work Redshift
10 | * replication.
11 | *
12 | * @see CopyIn
13 | * @see CopyOut
14 | */
15 | public interface CopyDual extends CopyIn, CopyOut {
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/copy/CopyIn.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2009, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.copy;
7 |
8 | import com.amazon.redshift.util.ByteStreamWriter;
9 |
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * Copy bulk data from client into a Redshift table very fast.
14 | */
15 | public interface CopyIn extends CopyOperation {
16 |
17 | /**
18 | * Writes specified part of given byte array to an open and writable copy operation.
19 | *
20 | * @param buf array of bytes to write
21 | * @param off offset of first byte to write (normally zero)
22 | * @param siz number of bytes to write (normally buf.length)
23 | * @throws SQLException if the operation fails
24 | */
25 | void writeToCopy(byte[] buf, int off, int siz) throws SQLException;
26 |
27 | /**
28 | * Writes a ByteStreamWriter to an open and writable copy operation.
29 | *
30 | * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
31 | * @throws SQLException if the operation fails
32 | */
33 | void writeToCopy(ByteStreamWriter from) throws SQLException;
34 |
35 | /**
36 | * Force any buffered output to be sent over the network to the backend. In general this is a
37 | * useless operation as it will get pushed over in due time or when endCopy is called. Some
38 | * specific modified server versions (Truviso) want this data sooner. If you are unsure if you
39 | * need to use this method, don't.
40 | *
41 | * @throws SQLException if the operation fails.
42 | */
43 | void flushCopy() throws SQLException;
44 |
45 | /**
46 | * Finishes copy operation successfully.
47 | *
48 | * @return number of updated rows for server 8.2 or newer (see getHandledRowCount())
49 | * @throws SQLException if the operation fails.
50 | */
51 | long endCopy() throws SQLException;
52 | }
53 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/copy/CopyOperation.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2009, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.copy;
7 |
8 | import java.sql.SQLException;
9 |
10 | /**
11 | * Exchange bulk data between client and Redshift database tables. See CopyIn and CopyOut for full
12 | * interfaces for corresponding copy directions.
13 | */
14 | public interface CopyOperation {
15 |
16 | /**
17 | * @return number of fields in each row for this operation
18 | */
19 | int getFieldCount();
20 |
21 | /**
22 | * @return overall format of each row: 0 = textual, 1 = binary
23 | */
24 | int getFormat();
25 |
26 | /**
27 | * @param field number of field (0..fieldCount()-1)
28 | * @return format of requested field: 0 = textual, 1 = binary
29 | */
30 | int getFieldFormat(int field);
31 |
32 | /**
33 | * @return is connection reserved for this Copy operation?
34 | */
35 | boolean isActive();
36 |
37 | /**
38 | * Cancels this copy operation, discarding any exchanged data.
39 | *
40 | * @throws SQLException if cancelling fails
41 | */
42 | void cancelCopy() throws SQLException;
43 |
44 | /**
45 | * After successful end of copy, returns the number of database records handled in that operation.
46 | * Unimplemented in Redshift server. Returns -1.
47 | *
48 | * @return number of handled rows or -1
49 | */
50 | long getHandledRowCount();
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/copy/CopyOut.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2009, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.copy;
7 |
8 | import java.sql.SQLException;
9 |
10 | public interface CopyOut extends CopyOperation {
11 | /**
12 | * Blocks wait for a row of data to be received from server on an active copy operation.
13 | *
14 | * @return byte array received from server, null if server complete copy operation
15 | * @throws SQLException if something goes wrong for example socket timeout
16 | */
17 | byte[] readFromCopy() throws SQLException;
18 |
19 | /**
20 | * Wait for a row of data to be received from server on an active copy operation.
21 | *
22 | * @param block {@code true} if need wait data from server otherwise {@code false} and will read
23 | * pending message from server
24 | * @return byte array received from server, if pending message from server absent and use no
25 | * blocking mode return null
26 | * @throws SQLException if something goes wrong for example socket timeout
27 | */
28 | byte[] readFromCopy(boolean block) throws SQLException;
29 | }
30 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/BaseQueryKey.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import com.amazon.redshift.util.CanEstimateSize;
9 |
10 | /**
11 | * This class is used as a cache key for simple statements that have no "returning columns".
12 | * Prepared statements that have no returning columns use just {@code String sql} as a key.
13 | * Simple and Prepared statements that have returning columns use {@link QueryWithReturningColumnsKey}
14 | * as a cache key.
15 | */
16 | class BaseQueryKey implements CanEstimateSize {
17 | public final String sql;
18 | public final boolean isParameterized;
19 | public final boolean escapeProcessing;
20 |
21 | BaseQueryKey(String sql, boolean isParameterized, boolean escapeProcessing) {
22 | this.sql = sql;
23 | this.isParameterized = isParameterized;
24 | this.escapeProcessing = escapeProcessing;
25 | }
26 |
27 | @Override
28 | public String toString() {
29 | return "BaseQueryKey{"
30 | + "sql='" + sql + '\''
31 | + ", isParameterized=" + isParameterized
32 | + ", escapeProcessing=" + escapeProcessing
33 | + '}';
34 | }
35 |
36 | @Override
37 | public long getSize() {
38 | if (sql == null) { // just in case
39 | return 16;
40 | }
41 | return 16 + sql.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
42 | }
43 |
44 | @Override
45 | public boolean equals(Object o) {
46 | if (this == o) {
47 | return true;
48 | }
49 | if (o == null || getClass() != o.getClass()) {
50 | return false;
51 | }
52 |
53 | BaseQueryKey that = (BaseQueryKey) o;
54 |
55 | if (isParameterized != that.isParameterized) {
56 | return false;
57 | }
58 | if (escapeProcessing != that.escapeProcessing) {
59 | return false;
60 | }
61 | return sql != null ? sql.equals(that.sql) : that.sql == null;
62 |
63 | }
64 |
65 | @Override
66 | public int hashCode() {
67 | int result = sql != null ? sql.hashCode() : 0;
68 | result = 31 * result + (isParameterized ? 1 : 0);
69 | result = 31 * result + (escapeProcessing ? 1 : 0);
70 | return result;
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/ByteOptimizedUTF8Encoder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2019, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import java.io.IOException;
9 | import java.nio.charset.Charset;
10 |
11 | /**
12 | * UTF-8 encoder which validates input and is optimized for jdk 9+ where {@code String} objects are backed by
13 | * {@code byte[]}.
14 | * @author Brett Okken
15 | */
16 | final class ByteOptimizedUTF8Encoder extends OptimizedUTF8Encoder {
17 |
18 | private static final Charset ASCII_CHARSET = Charset.forName("ascii");
19 |
20 | /**
21 | * {@inheritDoc}
22 | */
23 | @Override
24 | public String decode(byte[] encodedString, int offset, int length) throws IOException {
25 | //for very short strings going straight to chars is up to 30% faster
26 | if (length <= 32) {
27 | return charDecode(encodedString, offset, length);
28 | }
29 | for (int i = offset, j = offset + length; i < j; ++i) {
30 | // bytes are signed values. all ascii values are positive
31 | if (encodedString[i] < 0) {
32 | return slowDecode(encodedString, offset, length, i);
33 | }
34 | }
35 | // we have confirmed all chars are ascii, give java that hint
36 | return new String(encodedString, offset, length, ASCII_CHARSET);
37 | }
38 |
39 | /**
40 | * Decodes to {@code char[]} in presence of non-ascii values after first copying all known ascii chars directly
41 | * from {@code byte[]} to {@code char[]}.
42 | */
43 | private synchronized String slowDecode(byte[] encodedString, int offset, int length, int curIdx) throws IOException {
44 | final char[] chars = getCharArray(length);
45 | int out = 0;
46 | for (int i = offset; i < curIdx; ++i) {
47 | chars[out++] = (char) encodedString[i];
48 | }
49 | return decodeToChars(encodedString, curIdx, length - (curIdx - offset), chars, out);
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/CachedQuery.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import com.amazon.redshift.util.CanEstimateSize;
9 |
10 | /**
11 | * Stores information on the parsed JDBC query. It is used to cut parsing overhead when executing
12 | * the same query through {@link java.sql.Connection#prepareStatement(String)}.
13 | */
14 | public class CachedQuery implements CanEstimateSize {
15 | /**
16 | * Cache key. {@link String} or {@code com.amazon.redshift.util.CanEstimateSize}.
17 | */
18 | public final Object key;
19 | public final Query query;
20 | public final boolean isFunction;
21 |
22 | private int executeCount;
23 |
24 | public CachedQuery(Object key, Query query, boolean isFunction) {
25 | assert key instanceof String || key instanceof CanEstimateSize
26 | : "CachedQuery.key should either be String or implement CanEstimateSize."
27 | + " Actual class is " + key.getClass();
28 | this.key = key;
29 | this.query = query;
30 | this.isFunction = isFunction;
31 | }
32 |
33 | public void increaseExecuteCount() {
34 | if (executeCount < Integer.MAX_VALUE) {
35 | executeCount++;
36 | }
37 | }
38 |
39 | public void increaseExecuteCount(int inc) {
40 | int newValue = executeCount + inc;
41 | if (newValue > 0) { // if overflows, just ignore the update
42 | executeCount = newValue;
43 | }
44 | }
45 |
46 | /**
47 | * Number of times this statement has been used.
48 | *
49 | * @return number of times this statement has been used
50 | */
51 | public int getExecuteCount() {
52 | return executeCount;
53 | }
54 |
55 | @Override
56 | public long getSize() {
57 | long queryLength;
58 | if (key instanceof String) {
59 | queryLength = ((String) key).length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
60 | } else {
61 | queryLength = ((CanEstimateSize) key).getSize();
62 | }
63 | return queryLength * 2 /* original query and native sql */
64 | + 100L /* entry in hash map, CachedQuery wrapper, etc */;
65 | }
66 |
67 | @Override
68 | public String toString() {
69 | return "CachedQuery{"
70 | + "executeCount=" + executeCount
71 | + ", query=" + query
72 | + ", isFunction=" + isFunction
73 | + '}';
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/CallableQueryKey.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | /**
9 | * Serves as a cache key for {@link java.sql.CallableStatement}.
10 | * Callable statements require some special parsing before use (due to JDBC {@code {?= call...}}
11 | * syntax, thus a special cache key class is used to trigger proper parsing for callable statements.
12 | */
13 | class CallableQueryKey extends BaseQueryKey {
14 |
15 | CallableQueryKey(String sql) {
16 | super(sql, true, true);
17 | }
18 |
19 | @Override
20 | public String toString() {
21 | return "CallableQueryKey{"
22 | + "sql='" + sql + '\''
23 | + ", isParameterized=" + isParameterized
24 | + ", escapeProcessing=" + escapeProcessing
25 | + '}';
26 | }
27 |
28 | @Override
29 | public int hashCode() {
30 | return super.hashCode() * 31;
31 | }
32 |
33 | @Override
34 | public boolean equals(Object o) {
35 | // Nothing interesting here, overriding equals to make hashCode and equals paired
36 | return super.equals(o);
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/CharOptimizedUTF8Encoder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2019, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import java.io.IOException;
9 |
10 | /**
11 | * UTF-8 encoder which validates input and is optimized for jdk 8 and lower where {@code String} objects are backed by
12 | * {@code char[]}.
13 | * @author Brett Okken
14 | */
15 | final class CharOptimizedUTF8Encoder extends OptimizedUTF8Encoder {
16 |
17 | /**
18 | * {@inheritDoc}
19 | */
20 | @Override
21 | public String decode(byte[] encodedString, int offset, int length) throws IOException {
22 | return charDecode(encodedString, offset, length);
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/FixedLengthOutputStream.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import java.io.IOException;
9 | import java.io.OutputStream;
10 |
11 | /**
12 | * A stream that refuses to write more than a maximum number of bytes.
13 | */
14 | public class FixedLengthOutputStream extends OutputStream {
15 |
16 | private final int size;
17 | private final OutputStream target;
18 | private int written;
19 |
20 | public FixedLengthOutputStream(int size, OutputStream target) {
21 | this.size = size;
22 | this.target = target;
23 | }
24 |
25 | @Override
26 | public void write(int b) throws IOException {
27 | verifyAllowed(1);
28 | written++;
29 | target.write(b);
30 | }
31 |
32 | public void write(byte[] buf, int offset, int len) throws IOException {
33 | if ((offset < 0) || (len < 0) || ((offset + len) > buf.length)) {
34 | throw new IndexOutOfBoundsException();
35 | } else if (len == 0) {
36 | return;
37 | }
38 | verifyAllowed(len);
39 | target.write(buf, offset, len);
40 | written += len;
41 | }
42 |
43 | public int remaining() {
44 | return size - written;
45 | }
46 |
47 | private void verifyAllowed(int wanted) throws IOException {
48 | if (remaining() < wanted) {
49 | throw new IOException("Attempt to write more than the specified " + size + " bytes");
50 | }
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/JavaVersion.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2017, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | public enum JavaVersion {
9 | // Note: order is important,
10 | v1_6,
11 | v1_7,
12 | v1_8,
13 | other;
14 |
15 | private static final JavaVersion RUNTIME_VERSION = from(System.getProperty("java.version"));
16 |
17 | /**
18 | * Returns enum value that represents current runtime. For instance, when using -jre7.jar via Java
19 | * 8, this would return v18
20 | *
21 | * @return enum value that represents current runtime.
22 | */
23 | public static JavaVersion getRuntimeVersion() {
24 | return RUNTIME_VERSION;
25 | }
26 |
27 | /**
28 | * Java version string like in {@code "java.version"} property.
29 | *
30 | * @param version string like 1.6, 1.7, etc
31 | * @return JavaVersion enum
32 | */
33 | public static JavaVersion from(String version) {
34 | // Minimum supported is Java 1.6
35 | if (version.startsWith("1.6")) {
36 | return v1_6;
37 | }
38 | if (version.startsWith("1.7")) {
39 | return v1_7;
40 | }
41 | if (version.startsWith("1.8")) {
42 | return v1_8;
43 | }
44 | return other;
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/JdbcCallParseInfo.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | /**
9 | * Contains parse flags from {@link Parser#modifyJdbcCall(String, boolean, int, int, EscapeSyntaxCallMode)}.
10 | */
11 | public class JdbcCallParseInfo {
12 | private final String sql;
13 | private final boolean isFunction;
14 |
15 | public JdbcCallParseInfo(String sql, boolean isFunction) {
16 | this.sql = sql;
17 | this.isFunction = isFunction;
18 | }
19 |
20 | /**
21 | * SQL in a native for certain backend version.
22 | *
23 | * @return SQL in a native for certain backend version
24 | */
25 | public String getSql() {
26 | return sql;
27 | }
28 |
29 | /**
30 | * Returns if given SQL is a function.
31 | *
32 | * @return {@code true} if given SQL is a function
33 | */
34 | public boolean isFunction() {
35 | return isFunction;
36 | }
37 |
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/Notification.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import com.amazon.redshift.RedshiftNotification;
9 |
10 | public class Notification implements RedshiftNotification {
11 |
12 | private final String name;
13 | private final String parameter;
14 | private final int pid;
15 |
16 | public Notification(String name, int pid) {
17 | this(name, pid, "");
18 | }
19 |
20 | public Notification(String name, int pid, String parameter) {
21 | this.name = name;
22 | this.pid = pid;
23 | this.parameter = parameter;
24 | }
25 |
26 | /*
27 | * Returns name of this notification
28 | */
29 | public String getName() {
30 | return name;
31 | }
32 |
33 | /*
34 | * Returns the process id of the backend process making this notification
35 | */
36 | public int getPID() {
37 | return pid;
38 | }
39 |
40 | public String getParameter() {
41 | return parameter;
42 | }
43 |
44 | }
45 |
46 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/Provider.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | /**
9 | * Represents a provider of results.
10 | *
11 | * @param the type of results provided by this provider
12 | */
13 | public interface Provider {
14 |
15 | /**
16 | * Gets a result.
17 | *
18 | * @return a result
19 | */
20 | T get();
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/QueryWithReturningColumnsKey.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import java.util.Arrays;
9 |
10 | /**
11 | * Cache key for a query that have some returning columns.
12 | * {@code columnNames} should contain non-quoted column names.
13 | * The parser will quote them automatically.
14 | * There's a special case of {@code columnNames == new String[]{"*"}} that means all columns
15 | * should be returned. {@link Parser} is aware of that and does not quote {@code *}
16 | */
17 | class QueryWithReturningColumnsKey extends BaseQueryKey {
18 | public final String[] columnNames;
19 | private int size; // query length cannot exceed MAX_INT
20 |
21 | QueryWithReturningColumnsKey(String sql, boolean isParameterized, boolean escapeProcessing,
22 | String[] columnNames) {
23 | super(sql, isParameterized, escapeProcessing);
24 | if (columnNames == null) {
25 | // TODO: teach parser to fetch key columns somehow when no column names were given
26 | columnNames = new String[]{"*"};
27 | }
28 | this.columnNames = columnNames;
29 | }
30 |
31 | @Override
32 | public long getSize() {
33 | int size = this.size;
34 | if (size != 0) {
35 | return size;
36 | }
37 | size = (int) super.getSize();
38 | if (columnNames != null) {
39 | size += 16L; // array itself
40 | for (String columnName: columnNames) {
41 | size += columnName.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
42 | }
43 | }
44 | this.size = size;
45 | return size;
46 | }
47 |
48 | @Override
49 | public String toString() {
50 | return "QueryWithReturningColumnsKey{"
51 | + "sql='" + sql + '\''
52 | + ", isParameterized=" + isParameterized
53 | + ", escapeProcessing=" + escapeProcessing
54 | + ", columnNames=" + Arrays.toString(columnNames)
55 | + '}';
56 | }
57 |
58 | @Override
59 | public boolean equals(Object o) {
60 | if (this == o) {
61 | return true;
62 | }
63 | if (o == null || getClass() != o.getClass()) {
64 | return false;
65 | }
66 | if (!super.equals(o)) {
67 | return false;
68 | }
69 |
70 | QueryWithReturningColumnsKey that = (QueryWithReturningColumnsKey) o;
71 |
72 | // Probably incorrect - comparing Object[] arrays with Arrays.equals
73 | return Arrays.equals(columnNames, that.columnNames);
74 | }
75 |
76 | @Override
77 | public int hashCode() {
78 | int result = super.hashCode();
79 | result = 31 * result + Arrays.hashCode(columnNames);
80 | return result;
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/RedshiftBindException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import java.io.IOException;
9 |
10 | public class RedshiftBindException extends IOException {
11 |
12 | private final IOException ioe;
13 |
14 | public RedshiftBindException(IOException ioe) {
15 | this.ioe = ioe;
16 | }
17 |
18 | public IOException getIOException() {
19 | return ioe;
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/ReplicationProtocol.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | import com.amazon.redshift.logger.RedshiftLogger;
9 | import com.amazon.redshift.replication.RedshiftReplicationStream;
10 | import com.amazon.redshift.replication.fluent.logical.LogicalReplicationOptions;
11 | import com.amazon.redshift.replication.fluent.physical.PhysicalReplicationOptions;
12 |
13 | import java.sql.SQLException;
14 |
15 | /**
16 | * Abstracts the protocol-specific details of physic and logic replication.
17 | *
18 | * With each connection open with replication options associate own instance ReplicationProtocol.
19 | */
20 | public interface ReplicationProtocol {
21 | /**
22 | * @param options not null options for logical replication stream
23 | * @param logger the logger to log the entry for debugging.
24 | * @return not null stream instance from which available fetch wal logs that was decode by output
25 | * plugin
26 | * @throws SQLException on error
27 | */
28 | RedshiftReplicationStream startLogical(LogicalReplicationOptions options, RedshiftLogger logger) throws SQLException;
29 |
30 | /**
31 | * @param options not null options for physical replication stream
32 | * @param logger the logger to log the entry for debugging.
33 | * @return not null stream instance from which available fetch wal logs
34 | * @throws SQLException on error
35 | */
36 | RedshiftReplicationStream startPhysical(PhysicalReplicationOptions options, RedshiftLogger logger) throws SQLException;
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/ResultCursor.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 | // Copyright (c) 2004, Open Cloud Limited.
6 |
7 | package com.amazon.redshift.core;
8 |
9 | /**
10 | * Abstraction of a cursor over a returned resultset. This is an opaque interface that only provides
11 | * a way to close the cursor; all other operations are done by passing a ResultCursor to
12 | * QueryExecutor methods.
13 | *
14 | * @author Oliver Jowett (oliver@opencloud.com)
15 | */
16 | public interface ResultCursor {
17 | /**
18 | * Close this cursor. This may not immediately free underlying resources but may make it happen
19 | * more promptly. Closed cursors should not be passed to QueryExecutor methods.
20 | */
21 | void close();
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/SetupQueryRunner.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 | // Copyright (c) 2004, Open Cloud Limited.
6 |
7 | package com.amazon.redshift.core;
8 |
9 | import com.amazon.redshift.core.v3.MessageLoopState;
10 | import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
11 | import com.amazon.redshift.util.GT;
12 | import com.amazon.redshift.util.RedshiftException;
13 | import com.amazon.redshift.util.RedshiftState;
14 |
15 | import java.sql.SQLException;
16 | import java.sql.SQLWarning;
17 | import java.util.List;
18 | import java.util.Properties;
19 |
20 | /**
21 | * Poor man's Statement & ResultSet, used for initial queries while we're still initializing the
22 | * system.
23 | */
24 | public class SetupQueryRunner {
25 |
26 | private static class SimpleResultHandler extends ResultHandlerBase {
27 | private List tuples;
28 |
29 | SimpleResultHandler() {
30 | // This class overrided the handleWarning method and ignore warnings.
31 | // No need to handle property value
32 | super(new Properties());
33 | }
34 |
35 | List getResults() {
36 | return tuples;
37 | }
38 |
39 | public void handleResultRows(Query fromQuery, Field[] fields, List tuples,
40 | ResultCursor cursor, RedshiftRowsBlockingQueue queueTuples,
41 | int[] rowCount, Thread ringBufferThread) {
42 | this.tuples = tuples;
43 | }
44 |
45 | public void handleWarning(SQLWarning warning) {
46 | // We ignore warnings. We assume we know what we're
47 | // doing in the setup queries.
48 | }
49 | }
50 |
51 | public static Tuple run(QueryExecutor executor, String queryString,
52 | boolean wantResults) throws SQLException {
53 | Query query = executor.createSimpleQuery(queryString);
54 | SimpleResultHandler handler = new SimpleResultHandler();
55 |
56 | int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_SUPPRESS_BEGIN
57 | | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
58 | if (!wantResults) {
59 | flags |= QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_NO_METADATA;
60 | }
61 |
62 | try {
63 | executor.execute(query, null, handler, 0, 0, flags);
64 | } finally {
65 | query.close();
66 | }
67 |
68 | if (!wantResults) {
69 | return null;
70 | }
71 |
72 | List tuples = handler.getResults();
73 | if (tuples == null || tuples.size() != 1) {
74 | throw new RedshiftException(GT.tr("An unexpected result was returned by a query."),
75 | RedshiftState.CONNECTION_UNABLE_TO_CONNECT);
76 | }
77 |
78 | return tuples.get(0);
79 | }
80 |
81 | }
82 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/SqlCommandType.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | /**
9 | * Type information inspection support.
10 | * @author Jeremy Whiting jwhiting@redhat.com
11 | *
12 | */
13 |
14 | public enum SqlCommandType {
15 |
16 | /**
17 | * Use BLANK for empty sql queries or when parsing the sql string is not
18 | * necessary.
19 | */
20 | BLANK,
21 | INSERT,
22 | UPDATE,
23 | DELETE,
24 | MOVE,
25 | SELECT,
26 | WITH,
27 | PREPARE;
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/TransactionState.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | public enum TransactionState {
9 | IDLE,
10 | OPEN,
11 | FAILED
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/Version.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core;
7 |
8 | public interface Version {
9 |
10 | /**
11 | * Get a machine-readable version number.
12 | *
13 | * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
14 | */
15 | int getVersionNum();
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/CopyDualImpl.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core.v3;
7 |
8 | import com.amazon.redshift.copy.CopyDual;
9 | import com.amazon.redshift.util.ByteStreamWriter;
10 | import com.amazon.redshift.util.RedshiftException;
11 |
12 | import java.sql.SQLException;
13 | import java.util.LinkedList;
14 | import java.util.Queue;
15 |
16 | public class CopyDualImpl extends CopyOperationImpl implements CopyDual {
17 | private Queue received = new LinkedList();
18 |
19 | public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
20 | queryExecutor.writeToCopy(this, data, off, siz);
21 | }
22 |
23 | public void writeToCopy(ByteStreamWriter from) throws SQLException {
24 | queryExecutor.writeToCopy(this, from);
25 | }
26 |
27 | public void flushCopy() throws SQLException {
28 | queryExecutor.flushCopy(this);
29 | }
30 |
31 | public long endCopy() throws SQLException {
32 | return queryExecutor.endCopy(this);
33 | }
34 |
35 | public byte[] readFromCopy() throws SQLException {
36 | return readFromCopy(true);
37 | }
38 |
39 | @Override
40 | public byte[] readFromCopy(boolean block) throws SQLException {
41 | if (received.isEmpty()) {
42 | queryExecutor.readFromCopy(this, block);
43 | }
44 |
45 | return received.poll();
46 | }
47 |
48 | @Override
49 | public void handleCommandStatus(String status) throws RedshiftException {
50 | }
51 |
52 | protected void handleCopydata(byte[] data) {
53 | received.add(data);
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/CopyInImpl.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2009, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core.v3;
7 |
8 | import com.amazon.redshift.copy.CopyIn;
9 | import com.amazon.redshift.util.ByteStreamWriter;
10 | import com.amazon.redshift.util.GT;
11 | import com.amazon.redshift.util.RedshiftException;
12 | import com.amazon.redshift.util.RedshiftState;
13 |
14 | import java.sql.SQLException;
15 |
16 | /**
17 | * COPY FROM STDIN operation.
18 | *
19 | * Anticipated flow:
20 | *
21 | * CopyManager.copyIn() ->QueryExecutor.startCopy() - sends given query to server
22 | * ->processCopyResults(): - receives CopyInResponse from Server - creates new CopyInImpl
23 | * ->initCopy(): - receives copy metadata from server ->CopyInImpl.init() ->lock()
24 | * connection for this operation - if query fails an exception is thrown - if query returns wrong
25 | * CopyOperation, copyIn() cancels it before throwing exception <-return: new CopyInImpl holding
26 | * lock on connection repeat CopyIn.writeToCopy() for all data ->CopyInImpl.writeToCopy()
27 | * ->QueryExecutorImpl.writeToCopy() - sends given data ->processCopyResults() - parameterized
28 | * not to block, just peek for new messages from server - on ErrorResponse, waits until protocol is
29 | * restored and unlocks connection CopyIn.endCopy() ->CopyInImpl.endCopy()
30 | * ->QueryExecutorImpl.endCopy() - sends CopyDone - processCopyResults() - on CommandComplete
31 | * ->CopyOperationImpl.handleCommandComplete() - sets updatedRowCount when applicable - on
32 | * ReadyForQuery unlock() connection for use by other operations <-return:
33 | * CopyInImpl.getUpdatedRowCount()
34 | */
35 | public class CopyInImpl extends CopyOperationImpl implements CopyIn {
36 |
37 | public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
38 | queryExecutor.writeToCopy(this, data, off, siz);
39 | }
40 |
41 | public void writeToCopy(ByteStreamWriter from) throws SQLException {
42 | queryExecutor.writeToCopy(this, from);
43 | }
44 |
45 | public void flushCopy() throws SQLException {
46 | queryExecutor.flushCopy(this);
47 | }
48 |
49 | public long endCopy() throws SQLException {
50 | return queryExecutor.endCopy(this);
51 | }
52 |
53 | protected void handleCopydata(byte[] data) throws RedshiftException {
54 | throw new RedshiftException(GT.tr("CopyIn copy direction can't receive data"),
55 | RedshiftState.PROTOCOL_VIOLATION);
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/CopyOperationImpl.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2009, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core.v3;
7 |
8 | import com.amazon.redshift.copy.CopyOperation;
9 | import com.amazon.redshift.util.GT;
10 | import com.amazon.redshift.util.RedshiftException;
11 | import com.amazon.redshift.util.RedshiftState;
12 |
13 | import java.sql.SQLException;
14 |
15 | public abstract class CopyOperationImpl implements CopyOperation {
16 | QueryExecutorImpl queryExecutor;
17 | int rowFormat;
18 | int[] fieldFormats;
19 | long handledRowCount = -1;
20 |
21 | void init(QueryExecutorImpl q, int fmt, int[] fmts) {
22 | queryExecutor = q;
23 | rowFormat = fmt;
24 | fieldFormats = fmts;
25 | }
26 |
27 | public void cancelCopy() throws SQLException {
28 | queryExecutor.cancelCopy(this);
29 | }
30 |
31 | public int getFieldCount() {
32 | return fieldFormats.length;
33 | }
34 |
35 | public int getFieldFormat(int field) {
36 | return fieldFormats[field];
37 | }
38 |
39 | public int getFormat() {
40 | return rowFormat;
41 | }
42 |
43 | public boolean isActive() {
44 | synchronized (queryExecutor) {
45 | return queryExecutor.hasLock(this);
46 | }
47 | }
48 |
49 | public void handleCommandStatus(String status) throws RedshiftException {
50 | if (status.startsWith("COPY")) {
51 | int i = status.lastIndexOf(' ');
52 | handledRowCount = i > 3 ? Long.parseLong(status.substring(i + 1)) : -1;
53 | } else {
54 | throw new RedshiftException(GT.tr("CommandComplete expected COPY but got: " + status),
55 | RedshiftState.COMMUNICATION_ERROR);
56 | }
57 | }
58 |
59 | /**
60 | * Consume received copy data.
61 | *
62 | * @param data data that was receive by copy protocol
63 | * @throws RedshiftException if some internal problem occurs
64 | */
65 | protected abstract void handleCopydata(byte[] data) throws RedshiftException;
66 |
67 | public long getHandledRowCount() {
68 | return handledRowCount;
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/CopyOutImpl.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2009, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core.v3;
7 |
8 | import com.amazon.redshift.copy.CopyOut;
9 |
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * Anticipated flow of a COPY TO STDOUT operation:
14 | *
15 | * CopyManager.copyOut() ->QueryExecutor.startCopy() - sends given query to server
16 | * ->processCopyResults(): - receives CopyOutResponse from Server - creates new CopyOutImpl
17 | * ->initCopy(): - receives copy metadata from server ->CopyOutImpl.init() ->lock()
18 | * connection for this operation - if query fails an exception is thrown - if query returns wrong
19 | * CopyOperation, copyOut() cancels it before throwing exception <-returned: new CopyOutImpl
20 | * holding lock on connection repeat CopyOut.readFromCopy() until null
21 | * ->CopyOutImpl.readFromCopy() ->QueryExecutorImpl.readFromCopy() ->processCopyResults() -
22 | * on copydata row from server ->CopyOutImpl.handleCopydata() stores reference to byte array - on
23 | * CopyDone, CommandComplete, ReadyForQuery ->unlock() connection for use by other operations
24 | * <-returned: byte array of data received from server or null at end.
25 | */
26 | public class CopyOutImpl extends CopyOperationImpl implements CopyOut {
27 | private byte[] currentDataRow;
28 |
29 | public byte[] readFromCopy() throws SQLException {
30 | return readFromCopy(true);
31 | }
32 |
33 | @Override
34 | public byte[] readFromCopy(boolean block) throws SQLException {
35 | currentDataRow = null;
36 | queryExecutor.readFromCopy(this, block);
37 | return currentDataRow;
38 | }
39 |
40 | protected void handleCopydata(byte[] data) {
41 | currentDataRow = data;
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/DescribeRequest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core.v3;
7 |
8 | /**
9 | * Information for "pending describe queue".
10 | *
11 | * @see QueryExecutorImpl#pendingDescribeStatementQueue
12 | */
13 | class DescribeRequest {
14 | public final SimpleQuery query;
15 | public final SimpleParameterList parameterList;
16 | public final boolean describeOnly;
17 | public final String statementName;
18 |
19 | DescribeRequest(SimpleQuery query, SimpleParameterList parameterList,
20 | boolean describeOnly, String statementName) {
21 | this.query = query;
22 | this.parameterList = parameterList;
23 | this.describeOnly = describeOnly;
24 | this.statementName = statementName;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/ExecuteRequest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core.v3;
7 |
8 | /**
9 | * Information for "pending execute queue".
10 | *
11 | * @see QueryExecutorImpl#pendingExecuteQueue
12 | */
13 | class ExecuteRequest {
14 | public final SimpleQuery query;
15 | public final Portal portal;
16 | public final boolean asSimple;
17 |
18 | ExecuteRequest(SimpleQuery query, Portal portal, boolean asSimple) {
19 | this.query = query;
20 | this.portal = portal;
21 | this.asSimple = asSimple;
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/MessageLoopState.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.core.v3;
2 |
3 | import com.amazon.redshift.core.Tuple;
4 |
5 | /**
6 | * Keep the state of the message loop for Ring Buffer to work on separate thread.
7 | * This is use in processResult(). It store all local vars of processResult() methods,
8 | * so it can process in multiple threads.
9 | *
10 | * @author igarish
11 | *
12 | */
13 | public class MessageLoopState
14 | {
15 | // All vars are package-private, so no need to expose accessor methods.
16 | RedshiftRowsBlockingQueue queueTuples;
17 |
18 | // At the end of a command execution we have the CommandComplete
19 | // message to tell us we're done, but with a describeOnly command
20 | // we have no real flag to let us know we're done. We've got to
21 | // look for the next RowDescription or NoData message and return
22 | // from there.
23 | boolean doneAfterRowDescNoData;
24 |
25 | // Constructor
26 | public MessageLoopState()
27 | {
28 | initMessageLoopState(null, false);
29 | }
30 |
31 | public MessageLoopState(RedshiftRowsBlockingQueue queueTuples,
32 | boolean doneAfterRowDescNoData)
33 | {
34 | initMessageLoopState(queueTuples,
35 | doneAfterRowDescNoData);
36 | }
37 |
38 | /**
39 | * Initialize the object before starting the run.
40 | *
41 | */
42 | void initMessageLoopState(RedshiftRowsBlockingQueue queueTuples,
43 | boolean doneAfterRowDescNoData)
44 | {
45 | this.queueTuples = queueTuples;
46 | this.doneAfterRowDescNoData = doneAfterRowDescNoData;
47 | }
48 | }
49 |
50 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/Portal.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 | // Copyright (c) 2004, Open Cloud Limited.
6 |
7 | package com.amazon.redshift.core.v3;
8 |
9 | import com.amazon.redshift.core.ResultCursor;
10 | import com.amazon.redshift.core.Utils;
11 |
12 | import java.lang.ref.PhantomReference;
13 |
14 | /**
15 | * V3 ResultCursor implementation in terms of backend Portals. This holds the state of a single
16 | * Portal. We use a PhantomReference managed by our caller to handle resource cleanup.
17 | *
18 | * @author Oliver Jowett (oliver@opencloud.com)
19 | */
20 | class Portal implements ResultCursor {
21 | Portal(SimpleQuery query, String portalName) {
22 | this.query = query;
23 | this.portalName = portalName;
24 | this.encodedName = Utils.encodeUTF8(portalName);
25 | }
26 |
27 | public void close() {
28 | if (cleanupRef != null) {
29 | cleanupRef.clear();
30 | cleanupRef.enqueue();
31 | cleanupRef = null;
32 | }
33 | }
34 |
35 | String getPortalName() {
36 | return portalName;
37 | }
38 |
39 | byte[] getEncodedPortalName() {
40 | return encodedName;
41 | }
42 |
43 | SimpleQuery getQuery() {
44 | return query;
45 | }
46 |
47 | void setCleanupRef(PhantomReference> cleanupRef) {
48 | this.cleanupRef = cleanupRef;
49 | }
50 |
51 | public String toString() {
52 | return portalName;
53 | }
54 |
55 | // Holding on to a reference to the generating query has
56 | // the nice side-effect that while this Portal is referenced,
57 | // so is the SimpleQuery, so the underlying statement won't
58 | // be closed while the portal is open (the backend closes
59 | // all open portals when the statement is closed)
60 |
61 | private final SimpleQuery query;
62 | private final String portalName;
63 | private final byte[] encodedName;
64 | private PhantomReference> cleanupRef;
65 | }
66 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/TypeTransferModeRegistry.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.core.v3;
7 |
8 | public interface TypeTransferModeRegistry {
9 | /**
10 | * Returns if given oid should be sent in binary format.
11 | * @param oid type oid
12 | * @return true if given oid should be sent in binary format
13 | */
14 | boolean useBinaryForSend(int oid);
15 |
16 | /**
17 | * Returns if given oid should be received in binary format.
18 | * @param oid type oid
19 | * @return true if given oid should be received in binary format
20 | */
21 | boolean useBinaryForReceive(int oid);
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/core/v3/V3ParameterList.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 | // Copyright (c) 2004, Open Cloud Limited.
6 |
7 | package com.amazon.redshift.core.v3;
8 |
9 | import com.amazon.redshift.core.ParameterList;
10 |
11 | import java.sql.SQLException;
12 |
13 | /**
14 | * Common interface for all V3 parameter list implementations.
15 | *
16 | * @author Oliver Jowett (oliver@opencloud.com)
17 | */
18 | interface V3ParameterList extends ParameterList {
19 | /**
20 | * Ensure that all parameters in this list have been assigned values. Return silently if all is
21 | * well, otherwise throw an appropriate exception.
22 | *
23 | * @throws SQLException if not all parameters are set.
24 | */
25 | void checkAllParametersSet() throws SQLException;
26 |
27 | /**
28 | * Convert any function output parameters to the correct type (void) and set an ignorable value
29 | * for it.
30 | */
31 | void convertFunctionOutParameters();
32 |
33 | /**
34 | * Return a list of the SimpleParameterList objects that make up this parameter list. If this
35 | * object is already a SimpleParameterList, returns null (avoids an extra array construction in
36 | * the common case).
37 | *
38 | * @return an array of single-statement parameter lists, or null
if this object is
39 | * already a single-statement parameter list.
40 | */
41 | SimpleParameterList[] getSubparams();
42 |
43 | /**
44 | * Return the parameter type information.
45 | * @return an array of {@link com.amazon.redshift.core.Oid} type information
46 | */
47 | int[] getParamTypes();
48 |
49 | /**
50 | * Return the flags for each parameter.
51 | * @return an array of bytes used to store flags.
52 | */
53 | byte[] getFlags();
54 |
55 | /**
56 | * Return the encoding for each parameter.
57 | * @return nested byte array of bytes with encoding information.
58 | */
59 | byte[][] getEncoding();
60 | }
61 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/ds/RedshiftSimpleDataSource.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.ds;
7 |
8 | import com.amazon.redshift.ds.common.BaseDataSource;
9 |
10 | import java.io.IOException;
11 | import java.io.ObjectInputStream;
12 | import java.io.ObjectOutputStream;
13 | import java.io.Serializable;
14 | import java.sql.SQLException;
15 |
16 | import javax.sql.DataSource;
17 |
18 | /**
19 | * Simple DataSource which does not perform connection pooling. In order to use the DataSource, you
20 | * must set the property databaseName. The settings for serverName, portNumber, user, and password
21 | * are optional. Note: these properties are declared in the superclass.
22 | *
23 | * @author Aaron Mulder (ammulder@chariotsolutions.com)
24 | */
25 | public class RedshiftSimpleDataSource extends BaseDataSource implements DataSource, Serializable {
26 | /**
27 | * Gets a description of this DataSource.
28 | */
29 | public String getDescription() {
30 | return "Non-Pooling DataSource from " + com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME;
31 | }
32 |
33 | private void writeObject(ObjectOutputStream out) throws IOException {
34 | writeBaseObject(out);
35 | }
36 |
37 | private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
38 | readBaseObject(in);
39 | }
40 |
41 | public boolean isWrapperFor(Class> iface) throws SQLException {
42 | return iface.isAssignableFrom(getClass());
43 | }
44 |
45 | public T unwrap(Class iface) throws SQLException {
46 | if (iface.isAssignableFrom(getClass())) {
47 | return iface.cast(this);
48 | }
49 | throw new SQLException("Cannot unwrap to " + iface.getName());
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/gss/GSSCallbackHandler.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2008, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.gss;
7 |
8 | import java.io.IOException;
9 |
10 | import javax.security.auth.callback.Callback;
11 | import javax.security.auth.callback.CallbackHandler;
12 | import javax.security.auth.callback.NameCallback;
13 | import javax.security.auth.callback.PasswordCallback;
14 | import javax.security.auth.callback.TextOutputCallback;
15 | import javax.security.auth.callback.UnsupportedCallbackException;
16 |
17 | public class GSSCallbackHandler implements CallbackHandler {
18 |
19 | private final String user;
20 | private final String password;
21 |
22 | public GSSCallbackHandler(String user, String password) {
23 | this.user = user;
24 | this.password = password;
25 | }
26 |
27 | public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
28 | for (Callback callback : callbacks) {
29 | if (callback instanceof TextOutputCallback) {
30 | TextOutputCallback toc = (TextOutputCallback) callback;
31 | switch (toc.getMessageType()) {
32 | case TextOutputCallback.INFORMATION:
33 | System.out.println("INFO: " + toc.getMessage());
34 | break;
35 | case TextOutputCallback.ERROR:
36 | System.out.println("ERROR: " + toc.getMessage());
37 | break;
38 | case TextOutputCallback.WARNING:
39 | System.out.println("WARNING: " + toc.getMessage());
40 | break;
41 | default:
42 | throw new IOException("Unsupported message type: " + toc.getMessageType());
43 | }
44 | } else if (callback instanceof NameCallback) {
45 | NameCallback nc = (NameCallback) callback;
46 | nc.setName(user);
47 | } else if (callback instanceof PasswordCallback) {
48 | PasswordCallback pc = (PasswordCallback) callback;
49 | if (password == null) {
50 | throw new IOException("No cached kerberos ticket found and no password supplied.");
51 | }
52 | pc.setPassword(password.toCharArray());
53 | } else {
54 | throw new UnsupportedCallbackException(callback, "Unrecognized Callback");
55 | }
56 | }
57 | }
58 |
59 | }
60 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/hostchooser/CandidateHost.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2017, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.hostchooser;
7 |
8 | import com.amazon.redshift.util.HostSpec;
9 |
10 | /**
11 | * Candidate host to be connected.
12 | */
13 | public class CandidateHost {
14 | public final HostSpec hostSpec;
15 | public final HostRequirement targetServerType;
16 |
17 | public CandidateHost(HostSpec hostSpec, HostRequirement targetServerType) {
18 | this.hostSpec = hostSpec;
19 | this.targetServerType = targetServerType;
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/hostchooser/HostChooser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.hostchooser;
7 |
8 | import java.util.Iterator;
9 |
10 | /**
11 | * Lists connections in preferred order.
12 | */
13 | public interface HostChooser extends Iterable {
14 | /**
15 | * Lists connection hosts in preferred order.
16 | *
17 | * @return connection hosts in preferred order.
18 | */
19 | @Override
20 | Iterator iterator();
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/hostchooser/HostChooserFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.hostchooser;
7 |
8 | import com.amazon.redshift.util.HostSpec;
9 |
10 | import java.util.Properties;
11 |
12 | /**
13 | * Chooses a {@link HostChooser} instance based on the number of hosts and properties.
14 | */
15 | public class HostChooserFactory {
16 |
17 | public static HostChooser createHostChooser(HostSpec[] hostSpecs,
18 | HostRequirement targetServerType, Properties info) {
19 | if (hostSpecs.length == 1) {
20 | return new SingleHostChooser(hostSpecs[0], targetServerType);
21 | }
22 | return new MultiHostChooser(hostSpecs, targetServerType, info);
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/hostchooser/HostRequirement.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.hostchooser;
7 |
8 | /**
9 | * Describes the required server type.
10 | */
11 | public enum HostRequirement {
12 | any {
13 | public boolean allowConnectingTo(HostStatus status) {
14 | return status != HostStatus.ConnectFail;
15 | }
16 | },
17 | /**
18 | * @deprecated we no longer use the terms master or slave in the driver, or the Redshift
19 | * project.
20 | */
21 | @Deprecated
22 | master {
23 | public boolean allowConnectingTo(HostStatus status) {
24 | return primary.allowConnectingTo(status);
25 | }
26 | },
27 | primary {
28 | public boolean allowConnectingTo(HostStatus status) {
29 | return status == HostStatus.Primary || status == HostStatus.ConnectOK;
30 | }
31 | },
32 | secondary {
33 | public boolean allowConnectingTo(HostStatus status) {
34 | return status == HostStatus.Secondary || status == HostStatus.ConnectOK;
35 | }
36 | },
37 | preferSecondary {
38 | public boolean allowConnectingTo(HostStatus status) {
39 | return status != HostStatus.ConnectFail;
40 | }
41 | };
42 |
43 | public abstract boolean allowConnectingTo(HostStatus status);
44 |
45 | /**
46 | * The Redshift project has decided not to use the term slave to refer to alternate servers.
47 | * secondary or standby is preferred. We have arbitrarily chosen secondary.
48 | * As of Jan 2018 in order not to break existing code we are going to accept both slave or
49 | * secondary for names of alternate servers.
50 | *
51 | * The current policy is to keep accepting this silently but not document slave, or slave preferSlave
52 | *
53 | * As of Jul 2018 silently deprecate the use of the word master as well
54 | *
55 | * @param targetServerType the value of {@code targetServerType} connection property
56 | * @return HostRequirement
57 | */
58 |
59 | public static HostRequirement getTargetServerType(String targetServerType) {
60 |
61 | String allowSlave = targetServerType.replace("lave", "econdary").replace("master", "primary");
62 | return valueOf(allowSlave);
63 | }
64 |
65 | }
66 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/hostchooser/HostStatus.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.hostchooser;
7 |
8 | /**
9 | * Known state of a server.
10 | */
11 | public enum HostStatus {
12 | ConnectFail,
13 | ConnectOK,
14 | Primary,
15 | Secondary
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/hostchooser/SingleHostChooser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2014, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.hostchooser;
7 |
8 | import com.amazon.redshift.util.HostSpec;
9 |
10 | import java.util.Collection;
11 | import java.util.Collections;
12 | import java.util.Iterator;
13 |
14 | /**
15 | * Host chooser that returns the single host.
16 | */
17 | class SingleHostChooser implements HostChooser {
18 | private final Collection candidateHost;
19 |
20 | SingleHostChooser(HostSpec hostSpec, HostRequirement targetServerType) {
21 | this.candidateHost = Collections.singletonList(new CandidateHost(hostSpec, targetServerType));
22 | }
23 |
24 | @Override
25 | public Iterator iterator() {
26 | return candidateHost.iterator();
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/httpclient/log/IamCustomLogFactory.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.httpclient.log;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogConfigurationException;
5 | import org.apache.commons.logging.impl.LogFactoryImpl;
6 | import org.apache.commons.logging.impl.NoOpLog;
7 |
8 | /**
9 | * This class provides an implementation of LogFactoryImpl that will prevent any http wire logging.
10 | * This was requested as a security measure to prevent possible interception of user names and
11 | * passwords when connecting with IAM.
12 | */
13 | public class IamCustomLogFactory extends LogFactoryImpl
14 | {
15 | /**
16 | * The class to block logging for.
17 | */
18 | private static String BANNED = "org.apache.http.wire";
19 |
20 | /**
21 | * Get the Log indicated by the class name. If trying to get wire logs, block by returning
22 | * new NoOpLog instance.
23 | *
24 | * @param clazz The log class to return.
25 | */
26 | @Override
27 | public Log getInstance(Class clazz) throws LogConfigurationException
28 | {
29 | if (clazz.getName().equals(BANNED))
30 | {
31 | return new NoOpLog();
32 | }
33 | else
34 | {
35 | return super.getInstance(clazz);
36 | }
37 | }
38 |
39 | /**
40 | * Get the Log indicated by the name. If trying to get wire logs, block by returning
41 | * new NoOpLog instance.
42 | *
43 | * @param name The name of the log class to return.
44 | */
45 | @Override
46 | public Log getInstance(String name) throws LogConfigurationException
47 | {
48 |
49 | if (name.equals(BANNED))
50 | {
51 | return new NoOpLog();
52 | }
53 | else
54 | {
55 | return super.getInstance(name);
56 | }
57 | }
58 |
59 | }
60 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/AutoSave.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2005, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc;
7 |
8 | public enum AutoSave {
9 | NEVER,
10 | ALWAYS,
11 | CONSERVATIVE;
12 |
13 | private final String value;
14 |
15 | AutoSave() {
16 | value = this.name().toLowerCase();
17 | }
18 |
19 | public String value() {
20 | return value;
21 | }
22 |
23 | public static AutoSave of(String value) {
24 | return valueOf(value.toUpperCase());
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/CallableBatchResultHandler.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc;
7 |
8 | import com.amazon.redshift.core.Field;
9 | import com.amazon.redshift.core.ParameterList;
10 | import com.amazon.redshift.core.Query;
11 | import com.amazon.redshift.core.ResultCursor;
12 | import com.amazon.redshift.core.Tuple;
13 | import com.amazon.redshift.core.v3.MessageLoopState;
14 | import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
15 |
16 | import java.util.List;
17 |
18 | class CallableBatchResultHandler extends BatchResultHandler {
19 | CallableBatchResultHandler(RedshiftStatementImpl statement, Query[] queries, ParameterList[] parameterLists) {
20 | super(statement, queries, parameterLists, false);
21 | }
22 |
23 | public void handleResultRows(Query fromQuery, Field[] fields, List tuples, ResultCursor cursor,
24 | RedshiftRowsBlockingQueue queueTuples, int[] rowCount, Thread ringBufferThread) {
25 | /* ignore */
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/DataSource.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.jdbc;
2 |
3 | import com.amazon.redshift.ds.RedshiftConnectionPoolDataSource;
4 |
5 | /**
6 | * Backward compatible DataSource class.
7 | *
8 | * @author iggarish
9 | *
10 | */
11 | public class DataSource extends RedshiftConnectionPoolDataSource {
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/Driver.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.jdbc;
2 |
3 | /**
4 | * Backward compatible Driver class.
5 | *
6 | * @author iggarish
7 | *
8 | */
9 | public class Driver extends com.amazon.redshift.Driver {
10 | }
11 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/EscapeSyntaxCallMode.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2019, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc;
7 |
8 | /**
9 | * Specifies whether a SELECT/CALL statement is used for the underlying SQL for JDBC escape call syntax: 'select' means to
10 | * always use SELECT, 'callIfNoReturn' means to use CALL if there is no return parameter (otherwise use SELECT), and 'call' means
11 | * to always use CALL.
12 | *
13 | * @see com.amazon.redshift.RedshiftProperty#ESCAPE_SYNTAX_CALL_MODE
14 | */
15 | public enum EscapeSyntaxCallMode {
16 | SELECT("select"),
17 | CALL_IF_NO_RETURN("callIfNoReturn"),
18 | CALL("call");
19 |
20 | private final String value;
21 |
22 | EscapeSyntaxCallMode(String value) {
23 | this.value = value;
24 | }
25 |
26 | public static EscapeSyntaxCallMode of(String mode) {
27 | for (EscapeSyntaxCallMode escapeSyntaxCallMode : values()) {
28 | if (escapeSyntaxCallMode.value.equals(mode)) {
29 | return escapeSyntaxCallMode;
30 | }
31 | }
32 | return SELECT;
33 | }
34 |
35 | public String value() {
36 | return value;
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/PreferQueryMode.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc;
7 |
8 | /**
9 | * Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only),
10 | * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only.
11 | *
12 | * Note: this is for debugging purposes only.
13 | *
14 | * @see com.amazon.redshift.RedshiftProperty#PREFER_QUERY_MODE
15 | */
16 | public enum PreferQueryMode {
17 | SIMPLE("simple"),
18 | EXTENDED_FOR_PREPARED("extendedForPrepared"),
19 | EXTENDED("extended"),
20 | EXTENDED_CACHE_EVERYTHING("extendedCacheEverything");
21 |
22 | private final String value;
23 |
24 | PreferQueryMode(String value) {
25 | this.value = value;
26 | }
27 |
28 | public static PreferQueryMode of(String mode) {
29 | for (PreferQueryMode preferQueryMode : values()) {
30 | if (preferQueryMode.value.equals(mode)) {
31 | return preferQueryMode;
32 | }
33 | }
34 | return EXTENDED;
35 | }
36 |
37 | public String value() {
38 | return value;
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/RedshiftBlob.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc;
7 |
8 | import com.amazon.redshift.largeobject.LargeObject;
9 |
10 | import java.sql.SQLException;
11 |
12 | public class RedshiftBlob extends AbstractBlobClob implements java.sql.Blob {
13 |
14 | public RedshiftBlob(com.amazon.redshift.core.BaseConnection conn, long oid) throws SQLException {
15 | super(conn, oid);
16 | }
17 |
18 | public synchronized java.io.InputStream getBinaryStream(long pos, long length)
19 | throws SQLException {
20 | checkFreed();
21 | LargeObject subLO = getLo(false).copy();
22 | addSubLO(subLO);
23 | if (pos > Integer.MAX_VALUE) {
24 | subLO.seek64(pos - 1, LargeObject.SEEK_SET);
25 | } else {
26 | subLO.seek((int) pos - 1, LargeObject.SEEK_SET);
27 | }
28 | return subLO.getInputStream(length);
29 | }
30 |
31 | public synchronized int setBytes(long pos, byte[] bytes) throws SQLException {
32 | return setBytes(pos, bytes, 0, bytes.length);
33 | }
34 |
35 | public synchronized int setBytes(long pos, byte[] bytes, int offset, int len)
36 | throws SQLException {
37 | assertPosition(pos);
38 | getLo(true).seek((int) (pos - 1));
39 | getLo(true).write(bytes, offset, len);
40 | return len;
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/RedshiftSavepoint.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc;
7 |
8 | import com.amazon.redshift.core.Utils;
9 | import com.amazon.redshift.util.GT;
10 | import com.amazon.redshift.util.RedshiftException;
11 | import com.amazon.redshift.util.RedshiftState;
12 |
13 | import java.sql.SQLException;
14 | import java.sql.Savepoint;
15 |
16 | public class RedshiftSavepoint implements Savepoint {
17 |
18 | private boolean isValid;
19 | private final boolean isNamed;
20 | private int id;
21 | private String name;
22 |
23 | public RedshiftSavepoint(int id) {
24 | this.isValid = true;
25 | this.isNamed = false;
26 | this.id = id;
27 | }
28 |
29 | public RedshiftSavepoint(String name) {
30 | this.isValid = true;
31 | this.isNamed = true;
32 | this.name = name;
33 | }
34 |
35 | @Override
36 | public int getSavepointId() throws SQLException {
37 | if (!isValid) {
38 | throw new RedshiftException(GT.tr("Cannot reference a savepoint after it has been released."),
39 | RedshiftState.INVALID_SAVEPOINT_SPECIFICATION);
40 | }
41 |
42 | if (isNamed) {
43 | throw new RedshiftException(GT.tr("Cannot retrieve the id of a named savepoint."),
44 | RedshiftState.WRONG_OBJECT_TYPE);
45 | }
46 |
47 | return id;
48 | }
49 |
50 | @Override
51 | public String getSavepointName() throws SQLException {
52 | if (!isValid) {
53 | throw new RedshiftException(GT.tr("Cannot reference a savepoint after it has been released."),
54 | RedshiftState.INVALID_SAVEPOINT_SPECIFICATION);
55 | }
56 |
57 | if (!isNamed) {
58 | throw new RedshiftException(GT.tr("Cannot retrieve the name of an unnamed savepoint."),
59 | RedshiftState.WRONG_OBJECT_TYPE);
60 | }
61 |
62 | return name;
63 | }
64 |
65 | public void invalidate() {
66 | isValid = false;
67 | }
68 |
69 | public String getRSName() throws SQLException {
70 | if (!isValid) {
71 | throw new RedshiftException(GT.tr("Cannot reference a savepoint after it has been released."),
72 | RedshiftState.INVALID_SAVEPOINT_SPECIFICATION);
73 | }
74 |
75 | if (isNamed) {
76 | // We need to quote and escape the name in case it
77 | // contains spaces/quotes/etc.
78 | //
79 | return Utils.escapeIdentifier(null, name).toString();
80 | }
81 |
82 | return "JDBC_SAVEPOINT_" + id;
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/ResultWrapper.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 | // Copyright (c) 2004, Open Cloud Limited.
6 |
7 | package com.amazon.redshift.jdbc;
8 |
9 | import java.sql.ResultSet;
10 |
11 | /**
12 | * Helper class that storing result info. This handles both the ResultSet and no-ResultSet result
13 | * cases with a single interface for inspecting and stepping through them.
14 | *
15 | * @author Oliver Jowett (oliver@opencloud.com)
16 | */
17 | public class ResultWrapper {
18 | public ResultWrapper(ResultSet rs) {
19 | this.rs = rs;
20 | this.updateCount = -1;
21 | this.insertOID = -1;
22 | }
23 |
24 | public ResultWrapper(long updateCount, long insertOID) {
25 | this.rs = null;
26 | this.updateCount = updateCount;
27 | this.insertOID = insertOID;
28 | }
29 |
30 | public ResultSet getResultSet() {
31 | return rs;
32 | }
33 |
34 | public long getUpdateCount() {
35 | return updateCount;
36 | }
37 |
38 | public long getInsertOID() {
39 | return insertOID;
40 | }
41 |
42 | public ResultWrapper getNext() {
43 | return next;
44 | }
45 |
46 | public void append(ResultWrapper newResult) {
47 | ResultWrapper tail = this;
48 | while (tail.next != null) {
49 | tail = tail.next;
50 | }
51 |
52 | tail.next = newResult;
53 | }
54 |
55 | private final ResultSet rs;
56 | private final long updateCount;
57 | private final long insertOID;
58 | private ResultWrapper next;
59 | }
60 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/StatementCancelState.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc;
7 |
8 | /**
9 | * Represents {@link RedshiftStatementImpl#cancel()} state.
10 | */
11 | enum StatementCancelState {
12 | IDLE,
13 | IN_QUERY,
14 | CANCELING,
15 | CANCELLED
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc/UUIDArrayAssistant.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc;
7 |
8 | import com.amazon.redshift.jdbc2.ArrayAssistant;
9 | import com.amazon.redshift.util.ByteConverter;
10 |
11 | import java.util.UUID;
12 |
13 | public class UUIDArrayAssistant implements ArrayAssistant {
14 | @Override
15 | public Class> baseType() {
16 | return UUID.class;
17 | }
18 |
19 | @Override
20 | public Object buildElement(byte[] bytes, int pos, int len) {
21 | return new UUID(ByteConverter.int8(bytes, pos + 0), ByteConverter.int8(bytes, pos + 8));
22 | }
23 |
24 | @Override
25 | public Object buildElement(String literal) {
26 | return UUID.fromString(literal);
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc2/ArrayAssistant.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc2;
7 |
8 | /**
9 | * Implement this interface and register the its instance to ArrayAssistantRegistry, to let Redshift
10 | * driver to support more array type.
11 | *
12 | * @author Minglei Tu
13 | */
14 | public interface ArrayAssistant {
15 | /**
16 | * get array base type.
17 | *
18 | * @return array base type
19 | */
20 | Class> baseType();
21 |
22 | /**
23 | * build a array element from its binary bytes.
24 | *
25 | * @param bytes input bytes
26 | * @param pos position in input array
27 | * @param len length of the element
28 | * @return array element from its binary bytes
29 | */
30 | Object buildElement(byte[] bytes, int pos, int len);
31 |
32 | /**
33 | * build an array element from its literal string.
34 | *
35 | * @param literal string representation of array element
36 | * @return array element
37 | */
38 | Object buildElement(String literal);
39 | }
40 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc2/ArrayAssistantRegistry.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc2;
7 |
8 | import java.util.HashMap;
9 | import java.util.Map;
10 |
11 | /**
12 | * Array assistants register here.
13 | *
14 | * @author Minglei Tu
15 | */
16 | public class ArrayAssistantRegistry {
17 | private static Map arrayAssistantMap =
18 | new HashMap();
19 |
20 | public static ArrayAssistant getAssistant(int oid) {
21 | return arrayAssistantMap.get(oid);
22 | }
23 |
24 | ////
25 | public static void register(int oid, ArrayAssistant assistant) {
26 | arrayAssistantMap.put(oid, assistant);
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc2/optional/ConnectionPool.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc2.optional;
7 |
8 | import com.amazon.redshift.ds.RedshiftConnectionPoolDataSource;
9 |
10 | /**
11 | * @deprecated Please use {@link RedshiftConnectionPoolDataSource}
12 | */
13 | @Deprecated
14 | public class ConnectionPool extends RedshiftConnectionPoolDataSource {
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc2/optional/PoolingDataSource.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc2.optional;
7 |
8 | import com.amazon.redshift.ds.RedshiftPoolingDataSource;
9 |
10 | /**
11 | * @deprecated Since 2.0.0, see {@link RedshiftPoolingDataSource}
12 | */
13 | @Deprecated
14 | public class PoolingDataSource extends RedshiftPoolingDataSource {
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc2/optional/SimpleDataSource.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc2.optional;
7 |
8 | import com.amazon.redshift.ds.RedshiftSimpleDataSource;
9 |
10 | /**
11 | * @deprecated Please use {@link RedshiftSimpleDataSource}
12 | */
13 | @Deprecated
14 | public class SimpleDataSource extends RedshiftSimpleDataSource {
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc3/Jdbc3ConnectionPool.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc3;
7 |
8 | import com.amazon.redshift.ds.RedshiftConnectionPoolDataSource;
9 |
10 | /**
11 | * @deprecated Please use {@link RedshiftConnectionPoolDataSource}
12 | */
13 | @Deprecated
14 | public class Jdbc3ConnectionPool extends RedshiftConnectionPoolDataSource {
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc3/Jdbc3PoolingDataSource.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc3;
7 |
8 | import com.amazon.redshift.ds.RedshiftPoolingDataSource;
9 |
10 | /**
11 | * @deprecated Since 2.0.0, see {@link RedshiftPoolingDataSource}
12 | */
13 | @Deprecated
14 | public class Jdbc3PoolingDataSource extends RedshiftPoolingDataSource {
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc3/Jdbc3SimpleDataSource.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.jdbc3;
7 |
8 | import com.amazon.redshift.ds.RedshiftSimpleDataSource;
9 |
10 | /**
11 | * @deprecated Please use {@link RedshiftSimpleDataSource}
12 | */
13 | @Deprecated
14 | public class Jdbc3SimpleDataSource extends RedshiftSimpleDataSource {
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc42/DataSource.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.jdbc42;
2 |
3 | /*
4 | * Class retained for backwards compatibility
5 | */
6 | public class DataSource extends com.amazon.redshift.jdbc.DataSource
7 | {
8 | }
9 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/jdbc42/Driver.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.jdbc42;
2 |
3 | /*
4 | * Class retained for backwards compatibility
5 | */
6 | public class Driver extends com.amazon.redshift.jdbc.Driver
7 | {
8 | }
9 |
10 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/logger/LogConsoleHandler.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.logger;
2 |
3 | import java.io.PrintWriter;
4 |
5 | public class LogConsoleHandler implements LogHandler {
6 |
7 | private final PrintWriter writer = new PrintWriter(System.out);
8 |
9 | @Override
10 | public synchronized void write(String message) throws Exception
11 | {
12 | writer.println(message);
13 | writer.flush();
14 | }
15 |
16 | @Override
17 | public synchronized void close() throws Exception {
18 | // Do nothing as Writer is on the stdout.
19 | }
20 |
21 | @Override
22 | public synchronized void flush() {
23 | if (writer != null) {
24 | writer.flush();
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/logger/LogHandler.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.logger;
2 |
3 | public interface LogHandler {
4 |
5 | /**
6 | * Write the message using this handler.
7 | * This can be a file or console.
8 | *
9 | * @param message Log entry
10 | * @throws Exception throws when any error happens during write operation.
11 | */
12 | public void write(String message) throws Exception;
13 |
14 | public void close() throws Exception;
15 |
16 | public void flush();
17 | }
18 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/logger/LogLevel.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.logger;
2 |
3 | import java.util.ArrayList;
4 |
5 | public enum LogLevel {
6 | /*
7 | * OFF < ERROR < INFO < FUNCTION < DEBUG
8 | */
9 | OFF,
10 | ERROR,
11 | INFO,
12 | FUNCTION,
13 | DEBUG;
14 |
15 | private static ArrayList names = new ArrayList();
16 |
17 | static
18 | {
19 | names.add("OFF");
20 | names.add("ERROR");
21 | names.add("INFO");
22 | names.add("FUNCTION");
23 | names.add("DEBUG");
24 | }
25 |
26 | public static LogLevel getLogLevel(int level)
27 | {
28 | switch (level)
29 | {
30 | case 0:
31 | {
32 | return LogLevel.OFF;
33 | }
34 |
35 | case 1:
36 | {
37 | return LogLevel.ERROR;
38 | }
39 |
40 | case 2:
41 | {
42 | return LogLevel.INFO;
43 | }
44 |
45 | case 3:
46 | {
47 | return LogLevel.FUNCTION;
48 | }
49 |
50 | case 4:
51 | case 5: // TRACE for backward compatibility
52 | case 6: // DEBUG for backward compatibility
53 | {
54 | return LogLevel.DEBUG;
55 | }
56 |
57 | default:
58 | {
59 | return LogLevel.OFF;
60 | }
61 | }
62 | }
63 |
64 | public static LogLevel getLogLevel(String level)
65 | {
66 | LogLevel logLevel = LogLevel.OFF;
67 |
68 | if ((null == level) || level.equals(""))
69 | {
70 | return logLevel;
71 | }
72 |
73 | if (level.equalsIgnoreCase("OFF"))
74 | {
75 | logLevel = LogLevel.OFF;
76 | }
77 | else if (level.equalsIgnoreCase("ERROR"))
78 | {
79 | logLevel = LogLevel.ERROR;
80 | }
81 | else if (level.equalsIgnoreCase("INFO"))
82 | {
83 | logLevel = LogLevel.INFO;
84 | }
85 | else if (level.equalsIgnoreCase("FUNCTION"))
86 | {
87 | logLevel = LogLevel.FUNCTION;
88 | }
89 | else if (level.equalsIgnoreCase("DEBUG")
90 | || level.equalsIgnoreCase("TRACE")) // TRACE is for backward compatibility
91 | {
92 | logLevel = LogLevel.DEBUG;
93 | }
94 | else
95 | {
96 | try
97 | {
98 | logLevel = getLogLevel(Integer.parseInt(level));
99 | }
100 | catch (NumberFormatException e)
101 | {
102 | // Ignore
103 | }
104 | }
105 |
106 | return logLevel;
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/logger/LogWriterHandler.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.logger;
2 |
3 | import java.io.IOException;
4 | import java.io.Writer;
5 |
6 | public class LogWriterHandler implements LogHandler {
7 |
8 | private final Writer writer;
9 |
10 | public LogWriterHandler(Writer inWriter) throws Exception {
11 | writer = inWriter;
12 | }
13 |
14 | @Override
15 | public synchronized void write(String message) throws Exception
16 | {
17 | writer.write(message);
18 | writer.flush();
19 | }
20 |
21 | @Override
22 | public synchronized void close() throws Exception {
23 | // Do nothing as Writer is not created by the JDBC driver.
24 | }
25 |
26 | @Override
27 | public synchronized void flush() {
28 | if (writer != null) {
29 | try {
30 | writer.flush();
31 | } catch (IOException e) {
32 | // Ignore
33 | }
34 | }
35 | }
36 |
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/osgi/RedshiftBundleActivator.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.osgi;
7 |
8 | import com.amazon.redshift.Driver;
9 |
10 | import org.osgi.framework.BundleActivator;
11 | import org.osgi.framework.BundleContext;
12 | import org.osgi.framework.ServiceRegistration;
13 | import org.osgi.service.jdbc.DataSourceFactory;
14 |
15 | import java.util.Dictionary;
16 | import java.util.Hashtable;
17 |
18 | /**
19 | * This class is an OSGi Bundle Activator and should only be used internally by the OSGi Framework.
20 | */
21 | public class RedshiftBundleActivator implements BundleActivator {
22 | private ServiceRegistration> registration;
23 |
24 | public void start(BundleContext context) throws Exception {
25 | Dictionary properties = new Hashtable();
26 | properties.put(DataSourceFactory.OSGI_JDBC_DRIVER_CLASS, Driver.class.getName());
27 | properties.put(DataSourceFactory.OSGI_JDBC_DRIVER_NAME, com.amazon.redshift.util.DriverInfo.DRIVER_NAME);
28 | properties.put(DataSourceFactory.OSGI_JDBC_DRIVER_VERSION, com.amazon.redshift.util.DriverInfo.DRIVER_VERSION);
29 | try {
30 | registration = context.registerService(DataSourceFactory.class.getName(),
31 | new RedshiftDataSourceFactory(), properties);
32 | } catch (NoClassDefFoundError e) {
33 | String msg = e.getMessage();
34 | if (msg != null && msg.contains("org/osgi/service/jdbc/DataSourceFactory")) {
35 | if (!Boolean.getBoolean("rsjdbc.osgi.debug")) {
36 | return;
37 | }
38 |
39 | new IllegalArgumentException("Unable to load DataSourceFactory. "
40 | + "Will ignore DataSourceFactory registration. If you need one, "
41 | + "ensure org.osgi.enterprise is on the classpath", e).printStackTrace();
42 | // just ignore. Assume OSGi-enterprise is not loaded
43 | return;
44 | }
45 | throw e;
46 | }
47 | }
48 |
49 | public void stop(BundleContext context) throws Exception {
50 | if (registration != null) {
51 | registration.unregister();
52 | registration = null;
53 | }
54 |
55 | if (Driver.isRegistered()) {
56 | Driver.deregister();
57 | }
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/BasicJwtCredentialsProvider.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin;
2 |
3 | import java.io.IOException;
4 |
5 | /**
6 | * A basic JWT credential provider class. This class can be changed and implemented to work with
7 | * any desired JWT service provider.
8 | */
9 | public class BasicJwtCredentialsProvider extends JwtCredentialsProvider
10 | {
11 | private static final String KEY_WEB_IDENTITY_TOKEN = "webIdentityToken";
12 |
13 | // Mandatory parameters
14 | private String m_jwt;
15 |
16 | /**
17 | * Optional default constructor.
18 | */
19 | public BasicJwtCredentialsProvider()
20 | {
21 | m_disableCache = true;
22 | }
23 |
24 | private void checkRequiredParameters() throws IOException
25 | {
26 | if (isNullOrEmpty(m_jwt))
27 | {
28 | throw new IOException("Missing required property: " + KEY_WEB_IDENTITY_TOKEN);
29 | }
30 | }
31 |
32 | @Override
33 | public String getPluginSpecificCacheKey() {
34 | return m_jwt;
35 | }
36 |
37 | @Override
38 | public void addParameter(String key, String value)
39 | {
40 | // The parent class will take care of setting up all other connection properties which are
41 | // mentioned in the Redshift JDBC driver documentation.
42 | super.addParameter(key, value);
43 |
44 | if (KEY_WEB_IDENTITY_TOKEN.equalsIgnoreCase(key))
45 | {
46 | m_jwt = value;
47 | }
48 |
49 | }
50 | /**
51 | * This method needs to return the JWT string returned by the specific JWT provider
52 | * being used for this implementation. How you get this string will depend on the specific JWT
53 | * provider you are using. This method can decode jwt and process any custom claim/tag in it.
54 | *
55 | * This will be used by the JwtCredentialsProvider parent class to get the temporary credentials.
56 | *
57 | * @return The JWT string.
58 | * @throws IOException throws exception when required parameters are missing.
59 | */
60 | @Override
61 | protected String getJwtAssertion() throws IOException
62 | {
63 | checkRequiredParameters();
64 |
65 | return m_jwt;
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/IdpTokenAuthPlugin.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin;
2 |
3 | import com.amazon.redshift.NativeTokenHolder;
4 | import com.amazon.redshift.logger.RedshiftLogger;
5 | import com.amazonaws.util.StringUtils;
6 |
7 | import java.io.IOException;
8 | import java.util.Date;
9 |
10 | /**
11 | * A basic credential provider class.
12 | * This plugin class allows clients to directly provide any auth token that is handled by Redshift.
13 | */
14 | public class IdpTokenAuthPlugin extends CommonCredentialsProvider {
15 |
16 | private static final String KEY_TOKEN = "token";
17 | private static final String KEY_TOKEN_TYPE = "token_type";
18 | private static final int DEFAULT_IDP_TOKEN_EXPIRY_IN_SEC = 900;
19 |
20 | private String token;
21 | private String token_type;
22 |
23 | public IdpTokenAuthPlugin() {
24 | }
25 |
26 | /**
27 | * This overridden method needs to return the auth token provided by the client
28 | *
29 | * @return {@link NativeTokenHolder} A wrapper containing auth token and its expiration time information
30 | * @throws IOException indicating that some required parameter is missing.
31 | */
32 | @Override
33 | protected NativeTokenHolder getAuthToken() throws IOException {
34 | checkRequiredParameters();
35 |
36 | Date expiration = new Date(System.currentTimeMillis() + DEFAULT_IDP_TOKEN_EXPIRY_IN_SEC * 1000L);
37 | return NativeTokenHolder.newInstance(token, expiration);
38 | }
39 |
40 | private void checkRequiredParameters() throws IOException {
41 | if (StringUtils.isNullOrEmpty(token)) {
42 | throw new IOException("IdC authentication failed: The token must be included in the connection parameters.");
43 | } else if (StringUtils.isNullOrEmpty(token_type)) {
44 | throw new IOException("IdC authentication failed: The token type must be included in the connection parameters.");
45 | }
46 | }
47 |
48 | @Override
49 | public void addParameter(String key, String value) {
50 | super.addParameter(key, value);
51 |
52 | if (KEY_TOKEN.equalsIgnoreCase(key)) {
53 | token = value;
54 | if (RedshiftLogger.isEnable())
55 | m_log.logDebug("Setting token of length={0}", token.length());
56 | } else if (KEY_TOKEN_TYPE.equalsIgnoreCase(key)) {
57 | token_type = value;
58 | if (RedshiftLogger.isEnable())
59 | m_log.logDebug("Setting token_type: {0}", token_type);
60 | }
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/InternalPluginException.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin;
2 |
3 | /**
4 | * All plugin exceptional state.
5 | *
6 | * At the end would be wrapped into {@link java.io.IOException} for API compatibility reason.
7 | */
8 | public class InternalPluginException extends RuntimeException
9 | {
10 | /**
11 | * Constructor.
12 | *
13 | * @param message Error message.
14 | */
15 | public InternalPluginException(String message)
16 | {
17 | super(message);
18 | }
19 |
20 | /**
21 | * Constructor.
22 | *
23 | * @param message Error message.
24 | * @param cause Throwable object.
25 | */
26 | public InternalPluginException(String message, Throwable cause)
27 | {
28 | super(message, cause);
29 | }
30 |
31 | /**
32 | * Constructor.
33 | *
34 | * @param cause Throwable object.
35 | */
36 | public InternalPluginException(Throwable cause)
37 | {
38 | super(cause);
39 | }
40 |
41 | /**
42 | * Wrap Exception in this class.
43 | *
44 | * @param ex Exception object.
45 | *
46 | * @return instance of this class.
47 | */
48 | public static InternalPluginException wrap(Exception ex)
49 | {
50 | return new InternalPluginException(ex);
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/httpserver/InternalServerException.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin.httpserver;
2 |
3 | import com.amazon.redshift.plugin.InternalPluginException;
4 |
5 | /**
6 | * Wrapper exception for http server errors.
7 | *
8 | * Thread can`t throw any checked exceptions from run(), so it needs to be wrapped into RuntimeException.
9 | */
10 | public class InternalServerException extends InternalPluginException
11 | {
12 | /**
13 | * Constructor.
14 | *
15 | * @param cause Throwable object.
16 | */
17 | public InternalServerException(Throwable cause)
18 | {
19 | super(cause);
20 | }
21 |
22 | /**
23 | * Wrap Exception in this class.
24 | *
25 | * @param exceptionToWrap Exception object.
26 | *
27 | * @return instance of this class.
28 | */
29 | public static InternalServerException wrap(Exception exceptionToWrap)
30 | {
31 | return new InternalServerException(exceptionToWrap);
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/httpserver/InvalidHttpRequestHandler.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin.httpserver;
2 |
3 | import org.apache.http.*;
4 | import org.apache.http.entity.ContentType;
5 | import org.apache.http.entity.StringEntity;
6 | import org.apache.http.protocol.HttpContext;
7 | import org.apache.http.protocol.HttpRequestHandler;
8 |
9 | import java.io.IOException;
10 | import java.nio.charset.StandardCharsets;
11 |
12 | /**
13 | * Return invalid HTML for all requests.
14 | */
15 | public class InvalidHttpRequestHandler implements HttpRequestHandler
16 | {
17 |
18 | private static final String INVALID_RESPONSE =
19 | "
The request could not be understood by the server!
";
20 |
21 | @Override
22 | public void handle(HttpRequest request, HttpResponse response, HttpContext context)
23 | throws HttpException, IOException
24 | {
25 | response.setEntity(new StringEntity(INVALID_RESPONSE,
26 | StandardCharsets.UTF_8));
27 | response.setHeader(
28 | HttpHeaders.CONTENT_TYPE,
29 | ContentType.TEXT_HTML.withCharset(StandardCharsets.UTF_8).toString());
30 | response.setStatusCode(HttpStatus.SC_NOT_FOUND);
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/httpserver/ValidHttpRequestHandler.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin.httpserver;
2 |
3 | import org.apache.http.*;
4 | import org.apache.http.entity.ContentType;
5 | import org.apache.http.entity.StringEntity;
6 | import org.apache.http.protocol.HttpContext;
7 | import org.apache.http.protocol.HttpRequestHandler;
8 |
9 | import java.io.IOException;
10 | import java.nio.charset.StandardCharsets;
11 |
12 | /**
13 | * Return valid HTML for all requests.
14 | */
15 | public class ValidHttpRequestHandler implements HttpRequestHandler
16 | {
17 | private static final String VALID_RESPONSE = "" +
18 | "" +
20 | "Thank you for using Amazon Redshift! You can now close this window.
" + "";
21 |
22 | @Override
23 | public void handle(HttpRequest request, HttpResponse response, HttpContext context)
24 | throws HttpException, IOException
25 | {
26 | response.setEntity(new StringEntity(VALID_RESPONSE, StandardCharsets.UTF_8));
27 | response.setHeader(HttpHeaders.CONTENT_TYPE, ContentType.TEXT_HTML.withCharset(StandardCharsets.UTF_8).toString());
28 | response.setStatusCode(HttpStatus.SC_OK);
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/log-factory.properties:
--------------------------------------------------------------------------------
1 | org.apache.commons.logging.LogFactory
2 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/utils/CheckUtils.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin.utils;
2 |
3 | import com.amazon.redshift.plugin.InternalPluginException;
4 |
5 | import java.io.IOException;
6 |
7 | import static com.amazonaws.util.StringUtils.isNullOrEmpty;
8 |
9 | /**
10 | * All for plugin parameters check.
11 | */
12 | public class CheckUtils
13 | {
14 | private CheckUtils()
15 | {
16 | }
17 |
18 | public static void checkMissingAndThrows(String parameter, String parameterName)
19 | throws InternalPluginException
20 | {
21 | if (isNullOrEmpty(parameter))
22 | {
23 | throw new InternalPluginException("Missing required property: " + parameterName);
24 | }
25 | }
26 |
27 | public static void checkInvalidAndThrows(boolean condition, String parameterName)
28 | throws InternalPluginException
29 | {
30 | if (condition)
31 | {
32 | throw new InternalPluginException("Invalid property value: " + parameterName);
33 | }
34 | }
35 |
36 | public static void checkAndThrowsWithMessage(boolean condition, String message)
37 | throws InternalPluginException
38 | {
39 | if (condition)
40 | {
41 | throw new InternalPluginException(message);
42 | }
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/utils/RandomStateUtil.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin.utils;
2 |
3 | import java.util.Random;
4 |
5 | /**
6 | * Random state string generating util.
7 | */
8 | public class RandomStateUtil
9 | {
10 | /**
11 | * Length of the random state string.
12 | */
13 | private static final int DEFAULT_STATE_STRING_LENGTH = 10;
14 |
15 | /**
16 | * Generates random state string 10 char length.
17 | *
18 | * @return generated randomly.
19 | */
20 | public static String generateRandomState()
21 | {
22 | return generateRandomString();
23 | }
24 |
25 | /**
26 | * @return string generated randomly.
27 | */
28 | private static String generateRandomString()
29 | {
30 | Random random = new Random(System.currentTimeMillis());
31 | StringBuilder buffer = new StringBuilder(DEFAULT_STATE_STRING_LENGTH);
32 | for (int i = 0; i < DEFAULT_STATE_STRING_LENGTH; i++)
33 | {
34 | buffer.append((char) (random.nextInt(26) + 'a'));
35 | }
36 | return buffer.toString();
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/plugin/utils/ResponseUtils.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.plugin.utils;
2 |
3 | import org.apache.http.NameValuePair;
4 |
5 | import java.util.List;
6 |
7 | /**
8 | * Http Request/Response utils.
9 | */
10 | public class ResponseUtils
11 | {
12 |
13 | private ResponseUtils()
14 | {
15 | }
16 |
17 | /**
18 | * Find parameter by name in http request/response {@link NameValuePair} List.
19 | *
20 | * @param name name of the parameter
21 | * @param list list of parameters
22 | * @return returns value of the found parameter, otherwise null.
23 | */
24 | public static String findParameter(String name, List list)
25 | {
26 | for (NameValuePair pair : list)
27 | {
28 | if (name.equals(pair.getName()))
29 | {
30 | return pair.getValue();
31 | }
32 | }
33 | return null;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/RedshiftReplicationConnection.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication;
7 |
8 | import com.amazon.redshift.RedshiftProperty;
9 | import com.amazon.redshift.replication.fluent.ChainedCreateReplicationSlotBuilder;
10 | import com.amazon.redshift.replication.fluent.ChainedStreamBuilder;
11 |
12 | import java.sql.SQLException;
13 |
14 | /**
15 | * Api available only if connection was create with required for replication properties: {@link
16 | * RedshiftProperty#REPLICATION} and {@link RedshiftProperty#ASSUME_MIN_SERVER_VERSION}. Without it property
17 | * building replication stream fail with exception.
18 | */
19 | public interface RedshiftReplicationConnection {
20 |
21 | /**
22 | * After start replication stream this connection not available to use for another queries until
23 | * replication stream will not close.
24 | *
25 | * @return not null fluent api for build replication stream
26 | */
27 | ChainedStreamBuilder replicationStream();
28 |
29 | /**
30 | * Create replication slot, that can be next use in {@link RedshiftReplicationConnection#replicationStream()}
31 | *
32 | * Replication slots provide an automated way to ensure that the master does not remove WAL
33 | * segments until they have been received by all standbys, and that the master does not remove
34 | * rows which could cause a recovery conflict even when the standby is disconnected.
35 | *
36 | * @return not null fluent api for build create replication slot
37 | */
38 | ChainedCreateReplicationSlotBuilder createReplicationSlot();
39 |
40 | /**
41 | * @param slotName not null replication slot name exists in database that should be drop
42 | * @throws SQLException if the replication slot cannot be dropped.
43 | */
44 | void dropReplicationSlot(String slotName) throws SQLException;
45 | }
46 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/RedshiftReplicationConnectionImpl.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication;
7 |
8 | import com.amazon.redshift.core.BaseConnection;
9 | import com.amazon.redshift.replication.fluent.ChainedCreateReplicationSlotBuilder;
10 | import com.amazon.redshift.replication.fluent.ChainedStreamBuilder;
11 | import com.amazon.redshift.replication.fluent.ReplicationCreateSlotBuilder;
12 | import com.amazon.redshift.replication.fluent.ReplicationStreamBuilder;
13 |
14 | import java.sql.SQLException;
15 | import java.sql.Statement;
16 |
17 | public class RedshiftReplicationConnectionImpl implements RedshiftReplicationConnection {
18 | private BaseConnection connection;
19 |
20 | public RedshiftReplicationConnectionImpl(BaseConnection connection) {
21 | this.connection = connection;
22 | }
23 |
24 | @Override
25 | public ChainedStreamBuilder replicationStream() {
26 | return new ReplicationStreamBuilder(connection);
27 | }
28 |
29 | @Override
30 | public ChainedCreateReplicationSlotBuilder createReplicationSlot() {
31 | return new ReplicationCreateSlotBuilder(connection);
32 | }
33 |
34 | @Override
35 | public void dropReplicationSlot(String slotName) throws SQLException {
36 | if (slotName == null || slotName.isEmpty()) {
37 | throw new IllegalArgumentException("Replication slot name can't be null or empty");
38 | }
39 |
40 | Statement statement = connection.createStatement();
41 | try {
42 | statement.execute("DROP_REPLICATION_SLOT " + slotName);
43 | } finally {
44 | statement.close();
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/ReplicationType.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2017, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication;
7 |
8 | public enum ReplicationType {
9 | LOGICAL,
10 | PHYSICAL
11 | }
12 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/AbstractCreateSlotBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.core.BaseConnection;
9 | import com.amazon.redshift.core.ServerVersion;
10 | import com.amazon.redshift.util.GT;
11 |
12 | import java.sql.SQLFeatureNotSupportedException;
13 |
14 | public abstract class AbstractCreateSlotBuilder>
15 | implements ChainedCommonCreateSlotBuilder {
16 |
17 | protected String slotName;
18 | protected boolean temporaryOption = false;
19 | protected BaseConnection connection;
20 |
21 | protected AbstractCreateSlotBuilder(BaseConnection connection) {
22 | this.connection = connection;
23 | }
24 |
25 | protected abstract T self();
26 |
27 | @Override
28 | public T withSlotName(String slotName) {
29 | this.slotName = slotName;
30 | return self();
31 | }
32 |
33 | @Override
34 | public T withTemporaryOption() throws SQLFeatureNotSupportedException {
35 |
36 | if (!connection.haveMinimumServerVersion(ServerVersion.v10)) {
37 | throw new SQLFeatureNotSupportedException(
38 | GT.tr("Server does not support temporary replication slots")
39 | );
40 | }
41 |
42 | this.temporaryOption = true;
43 | return self();
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/AbstractStreamBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.replication.LogSequenceNumber;
9 |
10 | import java.util.concurrent.TimeUnit;
11 |
12 | public abstract class AbstractStreamBuilder>
13 | implements ChainedCommonStreamBuilder {
14 | private static final int DEFAULT_STATUS_INTERVAL = (int) TimeUnit.SECONDS.toMillis(10L);
15 | protected int statusIntervalMs = DEFAULT_STATUS_INTERVAL;
16 | protected LogSequenceNumber startPosition = LogSequenceNumber.INVALID_LSN;
17 | protected String slotName;
18 |
19 | protected abstract T self();
20 |
21 | @Override
22 | public T withStatusInterval(int time, TimeUnit format) {
23 | statusIntervalMs = (int) TimeUnit.MILLISECONDS.convert(time, format);
24 | return self();
25 | }
26 |
27 | @Override
28 | public T withStartPosition(LogSequenceNumber lsn) {
29 | this.startPosition = lsn;
30 | return self();
31 | }
32 |
33 | @Override
34 | public T withSlotName(String slotName) {
35 | this.slotName = slotName;
36 | return self();
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/ChainedCommonCreateSlotBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.replication.ReplicationSlotInfo;
9 |
10 | import java.sql.SQLException;
11 | import java.sql.SQLFeatureNotSupportedException;
12 |
13 | /**
14 | * Fluent interface for specify common parameters for create Logical and Physical replication slot.
15 | */
16 | public interface ChainedCommonCreateSlotBuilder> {
17 |
18 | /**
19 | * Replication slots provide an automated way to ensure that the master does not remove WAL
20 | * segments until they have been received by all standbys, and that the master does not remove
21 | * rows which could cause a recovery conflict even when the standby is disconnected.
22 | *
23 | * @param slotName not null unique replication slot name for create.
24 | * @return T a slot builder
25 | */
26 | T withSlotName(String slotName);
27 |
28 | /**
29 | * Temporary slots are not saved to disk and are automatically dropped on error or when
30 | * the session has finished.
31 | *
32 | * This feature is only supported by PostgreSQL versions >= 10.
33 | *
34 | * @return T a slot builder
35 | * @throws SQLFeatureNotSupportedException throws an exception, if not supported.
36 | */
37 | T withTemporaryOption() throws SQLFeatureNotSupportedException;
38 |
39 | /**
40 | * Create slot with specified parameters in database.
41 | *
42 | * @return ReplicationSlotInfo with the information of the created slot.
43 | * @throws SQLException on error
44 | */
45 | ReplicationSlotInfo make() throws SQLException;
46 | }
47 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/ChainedCommonStreamBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.replication.LogSequenceNumber;
9 |
10 | import java.util.concurrent.TimeUnit;
11 |
12 | /**
13 | * Fluent interface for specify common parameters for Logical and Physical replication.
14 | */
15 | public interface ChainedCommonStreamBuilder> {
16 |
17 | /**
18 | * Replication slots provide an automated way to ensure that the master does not remove WAL
19 | * segments until they have been received by all standbys, and that the master does not remove
20 | * rows which could cause a recovery conflict even when the standby is disconnected.
21 | *
22 | * @param slotName not null replication slot already exists on server.
23 | * @return this instance as a fluent interface
24 | */
25 | T withSlotName(String slotName);
26 |
27 | /**
28 | * Specifies the number of time between status packets sent back to the server. This allows for
29 | * easier monitoring of the progress from server. A value of zero disables the periodic status
30 | * updates completely, although an update will still be sent when requested by the server, to
31 | * avoid timeout disconnect. The default value is 10 seconds.
32 | *
33 | * @param time positive time
34 | * @param format format for specified time
35 | * @return not null fluent
36 | */
37 | T withStatusInterval(int time, TimeUnit format);
38 |
39 | /**
40 | * Specify start position from which backend will start stream changes. If parameter will not
41 | * specify, streaming starts from restart_lsn. For more details see pg_replication_slots
42 | * description.
43 | *
44 | * @param lsn not null position from which need start replicate changes
45 | * @return not null fluent
46 | */
47 | T withStartPosition(LogSequenceNumber lsn);
48 | }
49 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/ChainedCreateReplicationSlotBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.replication.fluent.logical.ChainedLogicalCreateSlotBuilder;
9 | import com.amazon.redshift.replication.fluent.physical.ChainedPhysicalCreateSlotBuilder;
10 |
11 | /**
12 | * Fluent interface for specify common parameters for Logical and Physical replication.
13 | */
14 | public interface ChainedCreateReplicationSlotBuilder {
15 | /**
16 | * Get the logical slot builder.
17 | * Example usage:
18 | *
19 | * {@code
20 | *
21 | * pgConnection
22 | * .getReplicationAPI()
23 | * .createReplicationSlot()
24 | * .logical()
25 | * .withSlotName("mySlot")
26 | * .withOutputPlugin("test_decoding")
27 | * .make();
28 | *
29 | * RedshiftReplicationStream stream =
30 | * pgConnection
31 | * .getReplicationAPI()
32 | * .replicationStream()
33 | * .logical()
34 | * .withSlotName("mySlot")
35 | * .withSlotOption("include-xids", false)
36 | * .withSlotOption("skip-empty-xacts", true)
37 | * .start();
38 | *
39 | * while (true) {
40 | * ByteBuffer buffer = stream.read();
41 | * //process logical changes
42 | * }
43 | *
44 | * }
45 | *
46 | * @return not null fluent api
47 | */
48 | ChainedLogicalCreateSlotBuilder logical();
49 |
50 | /**
51 | * Create physical replication stream for process wal logs in binary form.
52 | *
53 | * Example usage:
54 | *
55 | * {@code
56 | *
57 | * pgConnection
58 | * .getReplicationAPI()
59 | * .createReplicationSlot()
60 | * .physical()
61 | * .withSlotName("mySlot")
62 | * .make();
63 | *
64 | * RedshiftReplicationStream stream =
65 | * pgConnection
66 | * .getReplicationAPI()
67 | * .replicationStream()
68 | * .physical()
69 | * .withSlotName("mySlot")
70 | * .start();
71 | *
72 | * while (true) {
73 | * ByteBuffer buffer = stream.read();
74 | * //process binary WAL logs
75 | * }
76 | *
77 | * }
78 | *
79 | *
80 | * @return not null fluent api
81 | */
82 | ChainedPhysicalCreateSlotBuilder physical();
83 | }
84 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/ChainedStreamBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.replication.fluent.logical.ChainedLogicalStreamBuilder;
9 | import com.amazon.redshift.replication.fluent.physical.ChainedPhysicalStreamBuilder;
10 |
11 | /**
12 | * Start point for fluent API that build replication stream(logical or physical).
13 | * Api not thread safe, and can be use only for crate single stream.
14 | */
15 | public interface ChainedStreamBuilder {
16 | /**
17 | * Create logical replication stream that decode raw wal logs by output plugin to logical form.
18 | * Default about logical decoding you can see by following link
19 | *
20 | * Logical Decoding Concepts
21 | * .
22 | *
23 | *
24 | * Example usage:
25 | *
26 | * {@code
27 | *
28 | * RedshiftReplicationStream stream =
29 | * pgConnection
30 | * .getReplicationAPI()
31 | * .replicationStream()
32 | * .logical()
33 | * .withSlotName("test_decoding")
34 | * .withSlotOption("include-xids", false)
35 | * .withSlotOption("skip-empty-xacts", true)
36 | * .start();
37 | *
38 | * while (true) {
39 | * ByteBuffer buffer = stream.read();
40 | * //process logical changes
41 | * }
42 | *
43 | * }
44 | *
45 | *
46 | * @return not null fluent api
47 | */
48 | ChainedLogicalStreamBuilder logical();
49 |
50 | /**
51 | * Create physical replication stream for process wal logs in binary form.
52 | *
53 | * Example usage:
54 | *
55 | * {@code
56 | *
57 | * LogSequenceNumber lsn = getCurrentLSN();
58 | *
59 | * RedshiftReplicationStream stream =
60 | * pgConnection
61 | * .getReplicationAPI()
62 | * .replicationStream()
63 | * .physical()
64 | * .withStartPosition(lsn)
65 | * .start();
66 | *
67 | * while (true) {
68 | * ByteBuffer buffer = stream.read();
69 | * //process binary WAL logs
70 | * }
71 | *
72 | * }
73 | *
74 | *
75 | * @return not null fluent api
76 | */
77 | ChainedPhysicalStreamBuilder physical();
78 | }
79 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/CommonOptions.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.replication.LogSequenceNumber;
9 |
10 | /**
11 | * Common parameters for logical and physical replication.
12 | */
13 | public interface CommonOptions {
14 | /**
15 | * Replication slots provide an automated way to ensure that the master does not remove WAL
16 | * segments until they have been received by all standbys, and that the master does not remove
17 | * rows which could cause a recovery conflict even when the standby is disconnected.
18 | *
19 | * @return nullable replication slot name that already exists on server and free.
20 | */
21 | String getSlotName();
22 |
23 | /**
24 | * @return the position to start replication. This cannot be null.
25 | */
26 | LogSequenceNumber getStartLSNPosition();
27 |
28 | /**
29 | * Specifies the number of millisecond between status packets sent back to the server. This allows
30 | * for easier monitoring of the progress from server. A value of zero disables the periodic status
31 | * updates completely, although an update will still be sent when requested by the server, to
32 | * avoid timeout disconnect. The default value is 10 seconds.
33 | *
34 | * @return the current status interval
35 | */
36 | int getStatusInterval();
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/ReplicationCreateSlotBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.core.BaseConnection;
9 | import com.amazon.redshift.replication.fluent.logical.ChainedLogicalCreateSlotBuilder;
10 | import com.amazon.redshift.replication.fluent.logical.LogicalCreateSlotBuilder;
11 | import com.amazon.redshift.replication.fluent.physical.ChainedPhysicalCreateSlotBuilder;
12 | import com.amazon.redshift.replication.fluent.physical.PhysicalCreateSlotBuilder;
13 |
14 | public class ReplicationCreateSlotBuilder implements ChainedCreateReplicationSlotBuilder {
15 | private final BaseConnection baseConnection;
16 |
17 | public ReplicationCreateSlotBuilder(BaseConnection baseConnection) {
18 | this.baseConnection = baseConnection;
19 | }
20 |
21 | @Override
22 | public ChainedLogicalCreateSlotBuilder logical() {
23 | return new LogicalCreateSlotBuilder(baseConnection);
24 | }
25 |
26 | @Override
27 | public ChainedPhysicalCreateSlotBuilder physical() {
28 | return new PhysicalCreateSlotBuilder(baseConnection);
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/ReplicationStreamBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent;
7 |
8 | import com.amazon.redshift.core.BaseConnection;
9 | import com.amazon.redshift.core.ReplicationProtocol;
10 | import com.amazon.redshift.replication.RedshiftReplicationStream;
11 | import com.amazon.redshift.replication.fluent.logical.ChainedLogicalStreamBuilder;
12 | import com.amazon.redshift.replication.fluent.logical.LogicalReplicationOptions;
13 | import com.amazon.redshift.replication.fluent.logical.LogicalStreamBuilder;
14 | import com.amazon.redshift.replication.fluent.logical.StartLogicalReplicationCallback;
15 | import com.amazon.redshift.replication.fluent.physical.ChainedPhysicalStreamBuilder;
16 | import com.amazon.redshift.replication.fluent.physical.PhysicalReplicationOptions;
17 | import com.amazon.redshift.replication.fluent.physical.PhysicalStreamBuilder;
18 | import com.amazon.redshift.replication.fluent.physical.StartPhysicalReplicationCallback;
19 |
20 | import java.sql.SQLException;
21 |
22 | public class ReplicationStreamBuilder implements ChainedStreamBuilder {
23 | private final BaseConnection baseConnection;
24 |
25 | /**
26 | * @param connection not null connection with that will be associate replication
27 | */
28 | public ReplicationStreamBuilder(final BaseConnection connection) {
29 | this.baseConnection = connection;
30 | }
31 |
32 | @Override
33 | public ChainedLogicalStreamBuilder logical() {
34 | return new LogicalStreamBuilder(new StartLogicalReplicationCallback() {
35 | @Override
36 | public RedshiftReplicationStream start(LogicalReplicationOptions options) throws SQLException {
37 | ReplicationProtocol protocol = baseConnection.getReplicationProtocol();
38 | return protocol.startLogical(options, baseConnection.getLogger());
39 | }
40 | });
41 | }
42 |
43 | @Override
44 | public ChainedPhysicalStreamBuilder physical() {
45 | return new PhysicalStreamBuilder(new StartPhysicalReplicationCallback() {
46 | @Override
47 | public RedshiftReplicationStream start(PhysicalReplicationOptions options) throws SQLException {
48 | ReplicationProtocol protocol = baseConnection.getReplicationProtocol();
49 | return protocol.startPhysical(options, baseConnection.getLogger());
50 | }
51 | });
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.logical;
7 |
8 | import com.amazon.redshift.replication.fluent.ChainedCommonCreateSlotBuilder;
9 |
10 | /**
11 | * Logical replication slot specific parameters.
12 | */
13 | public interface ChainedLogicalCreateSlotBuilder
14 | extends ChainedCommonCreateSlotBuilder {
15 |
16 | /**
17 | * Output plugin that should be use for decode physical represent WAL to some logical form.
18 | * Output plugin should be installed on server(exists in shared_preload_libraries).
19 | *
20 | * Package postgresql-contrib provides sample output plugin test_decoding that can be
21 | * use for test logical replication api
22 | *
23 | * @param outputPlugin not null name of the output plugin used for logical decoding
24 | * @return the logical slot builder
25 | */
26 | ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin);
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/logical/ChainedLogicalStreamBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.logical;
7 |
8 | import com.amazon.redshift.replication.RedshiftReplicationStream;
9 | import com.amazon.redshift.replication.fluent.ChainedCommonStreamBuilder;
10 |
11 | import java.sql.SQLException;
12 | import java.util.Properties;
13 |
14 | public interface ChainedLogicalStreamBuilder
15 | extends ChainedCommonStreamBuilder {
16 | /**
17 | * Open logical replication stream.
18 | *
19 | * @return not null RedshfitReplicationStream available for fetch data in logical form
20 | * @throws SQLException if there are errors
21 | */
22 | RedshiftReplicationStream start() throws SQLException;
23 |
24 | /**
25 | *
26 | * @param optionName name of option
27 | * @param optionValue boolean value
28 | * @return ChainedLogicalStreamBuilder
29 | */
30 |
31 | ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue);
32 |
33 | /**
34 | *
35 | * @param optionName name of option
36 | * @param optionValue integer value
37 | * @return ChainedLogicalStreamBuilder
38 | */
39 | ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue);
40 |
41 | /**
42 | *
43 | * @param optionName name of option
44 | * @param optionValue String value
45 | * @return ChainedLogicalStreamBuilder
46 | */
47 | ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue);
48 |
49 | /**
50 | *
51 | * @param options properties
52 | * @return ChainedLogicalStreamBuilder
53 | */
54 | ChainedLogicalStreamBuilder withSlotOptions(Properties options);
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/logical/LogicalCreateSlotBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.logical;
7 |
8 | import com.amazon.redshift.core.BaseConnection;
9 | import com.amazon.redshift.replication.LogSequenceNumber;
10 | import com.amazon.redshift.replication.ReplicationSlotInfo;
11 | import com.amazon.redshift.replication.ReplicationType;
12 | import com.amazon.redshift.replication.fluent.AbstractCreateSlotBuilder;
13 |
14 | import java.sql.ResultSet;
15 | import java.sql.SQLException;
16 | import java.sql.Statement;
17 |
18 | public class LogicalCreateSlotBuilder
19 | extends AbstractCreateSlotBuilder
20 | implements ChainedLogicalCreateSlotBuilder {
21 |
22 | private String outputPlugin;
23 |
24 | public LogicalCreateSlotBuilder(BaseConnection connection) {
25 | super(connection);
26 | }
27 |
28 | @Override
29 | protected ChainedLogicalCreateSlotBuilder self() {
30 | return this;
31 | }
32 |
33 | @Override
34 | public ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin) {
35 | this.outputPlugin = outputPlugin;
36 | return self();
37 | }
38 |
39 | @Override
40 | public ReplicationSlotInfo make() throws SQLException {
41 | if (outputPlugin == null || outputPlugin.isEmpty()) {
42 | throw new IllegalArgumentException(
43 | "OutputPlugin required parameter for logical replication slot");
44 | }
45 |
46 | if (slotName == null || slotName.isEmpty()) {
47 | throw new IllegalArgumentException("Replication slotName can't be null");
48 | }
49 |
50 | Statement statement = connection.createStatement();
51 | ResultSet result = null;
52 | ReplicationSlotInfo slotInfo = null;
53 | try {
54 | statement.execute(String.format(
55 | "CREATE_REPLICATION_SLOT %s %s LOGICAL %s",
56 | slotName,
57 | temporaryOption ? "TEMPORARY" : "",
58 | outputPlugin
59 | ));
60 | result = statement.getResultSet();
61 | if (result != null && result.next()) {
62 | slotInfo = new ReplicationSlotInfo(
63 | result.getString("slot_name"),
64 | ReplicationType.LOGICAL,
65 | LogSequenceNumber.valueOf(result.getString("consistent_point")),
66 | result.getString("snapshot_name"),
67 | result.getString("output_plugin"));
68 | }
69 | } finally {
70 | if (result != null) {
71 | result.close();
72 | }
73 | statement.close();
74 | }
75 | return slotInfo;
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/logical/LogicalReplicationOptions.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.logical;
7 |
8 | import com.amazon.redshift.replication.fluent.CommonOptions;
9 |
10 | import java.util.Properties;
11 |
12 | public interface LogicalReplicationOptions extends CommonOptions {
13 | /**
14 | * Required parameter for logical replication.
15 | *
16 | * @return not null logical replication slot name that already exists on server and free.
17 | */
18 | String getSlotName();
19 |
20 | /**
21 | * Parameters for output plugin. Parameters will be set to output plugin that register for
22 | * specified replication slot name.
23 | *
24 | * @return list options that will be pass to output_plugin for that was create replication slot
25 | */
26 | Properties getSlotOptions();
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/logical/StartLogicalReplicationCallback.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.logical;
7 |
8 | import com.amazon.redshift.replication.RedshiftReplicationStream;
9 |
10 | import java.sql.SQLException;
11 |
12 | public interface StartLogicalReplicationCallback {
13 | RedshiftReplicationStream start(LogicalReplicationOptions options) throws SQLException;
14 | }
15 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.physical;
7 |
8 | import com.amazon.redshift.replication.fluent.ChainedCommonCreateSlotBuilder;
9 |
10 | /**
11 | * Physical replication slot specific parameters.
12 | */
13 | public interface ChainedPhysicalCreateSlotBuilder extends
14 | ChainedCommonCreateSlotBuilder {
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/physical/ChainedPhysicalStreamBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.physical;
7 |
8 | import com.amazon.redshift.replication.RedshiftReplicationStream;
9 | import com.amazon.redshift.replication.fluent.ChainedCommonStreamBuilder;
10 |
11 | import java.sql.SQLException;
12 |
13 | public interface ChainedPhysicalStreamBuilder extends
14 | ChainedCommonStreamBuilder {
15 |
16 | /**
17 | * Open physical replication stream.
18 | *
19 | * @return not null RedshiftReplicationStream available for fetch wal logs in binary form
20 | * @throws SQLException on error
21 | */
22 | RedshiftReplicationStream start() throws SQLException;
23 | }
24 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/physical/PhysicalCreateSlotBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.physical;
7 |
8 | import com.amazon.redshift.core.BaseConnection;
9 | import com.amazon.redshift.replication.LogSequenceNumber;
10 | import com.amazon.redshift.replication.ReplicationSlotInfo;
11 | import com.amazon.redshift.replication.ReplicationType;
12 | import com.amazon.redshift.replication.fluent.AbstractCreateSlotBuilder;
13 |
14 | import java.sql.ResultSet;
15 | import java.sql.SQLException;
16 | import java.sql.Statement;
17 |
18 | public class PhysicalCreateSlotBuilder
19 | extends AbstractCreateSlotBuilder
20 | implements ChainedPhysicalCreateSlotBuilder {
21 |
22 | public PhysicalCreateSlotBuilder(BaseConnection connection) {
23 | super(connection);
24 | }
25 |
26 | @Override
27 | protected ChainedPhysicalCreateSlotBuilder self() {
28 | return this;
29 | }
30 |
31 | @Override
32 | public ReplicationSlotInfo make() throws SQLException {
33 | if (slotName == null || slotName.isEmpty()) {
34 | throw new IllegalArgumentException("Replication slotName can't be null");
35 | }
36 |
37 | Statement statement = connection.createStatement();
38 | ResultSet result = null;
39 | ReplicationSlotInfo slotInfo = null;
40 | try {
41 | statement.execute(String.format(
42 | "CREATE_REPLICATION_SLOT %s %s PHYSICAL",
43 | slotName,
44 | temporaryOption ? "TEMPORARY" : ""
45 | ));
46 | result = statement.getResultSet();
47 | if (result != null && result.next()) {
48 | slotInfo = new ReplicationSlotInfo(
49 | result.getString("slot_name"),
50 | ReplicationType.PHYSICAL,
51 | LogSequenceNumber.valueOf(result.getString("consistent_point")),
52 | result.getString("snapshot_name"),
53 | result.getString("output_plugin"));
54 | }
55 | } finally {
56 | if (result != null) {
57 | result.close();
58 | }
59 | statement.close();
60 | }
61 | return slotInfo;
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/physical/PhysicalReplicationOptions.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.physical;
7 |
8 | import com.amazon.redshift.replication.fluent.CommonOptions;
9 |
10 | public interface PhysicalReplicationOptions extends CommonOptions {
11 | }
12 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/physical/PhysicalStreamBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.physical;
7 |
8 | import com.amazon.redshift.replication.LogSequenceNumber;
9 | import com.amazon.redshift.replication.RedshiftReplicationStream;
10 | import com.amazon.redshift.replication.fluent.AbstractStreamBuilder;
11 |
12 | import java.sql.SQLException;
13 |
14 | public class PhysicalStreamBuilder extends AbstractStreamBuilder
15 | implements ChainedPhysicalStreamBuilder, PhysicalReplicationOptions {
16 |
17 | private final StartPhysicalReplicationCallback startCallback;
18 |
19 | /**
20 | * @param startCallback not null callback that should be execute after build parameters for start
21 | * replication
22 | */
23 | public PhysicalStreamBuilder(StartPhysicalReplicationCallback startCallback) {
24 | this.startCallback = startCallback;
25 | }
26 |
27 | @Override
28 | protected ChainedPhysicalStreamBuilder self() {
29 | return this;
30 | }
31 |
32 | @Override
33 | public RedshiftReplicationStream start() throws SQLException {
34 | return this.startCallback.start(this);
35 | }
36 |
37 | @Override
38 | public String getSlotName() {
39 | return slotName;
40 | }
41 |
42 | @Override
43 | public LogSequenceNumber getStartLSNPosition() {
44 | return startPosition;
45 | }
46 |
47 | @Override
48 | public int getStatusInterval() {
49 | return statusIntervalMs;
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/replication/fluent/physical/StartPhysicalReplicationCallback.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2016, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.replication.fluent.physical;
7 |
8 | import com.amazon.redshift.replication.RedshiftReplicationStream;
9 |
10 | import java.sql.SQLException;
11 |
12 | public interface StartPhysicalReplicationCallback {
13 | RedshiftReplicationStream start(PhysicalReplicationOptions options) throws SQLException;
14 | }
15 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/ssl/DbKeyStoreSocketFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.ssl;
7 |
8 | import java.io.InputStream;
9 | import java.security.KeyStore;
10 |
11 | import javax.net.ssl.KeyManagerFactory;
12 | import javax.net.ssl.SSLContext;
13 | import javax.net.ssl.TrustManagerFactory;
14 |
15 | public abstract class DbKeyStoreSocketFactory extends com.amazon.redshift.ssl.WrappedFactory {
16 | /*
17 | * Populate the WrappedFactory member factory with an SSL Socket Factory that uses the JKS
18 | * keystore provided by getKeyStorePassword() and getKeyStoreStream(). A subclass only needs to
19 | * implement these two methods. The key store will be used both for selecting a private key
20 | * certificate to send to the server, as well as checking the server's certificate against a set
21 | * of trusted CAs.
22 | */
23 | public DbKeyStoreSocketFactory() throws DbKeyStoreSocketException {
24 | KeyStore keys;
25 | char[] password;
26 | try {
27 | keys = KeyStore.getInstance("JKS");
28 | password = getKeyStorePassword();
29 | keys.load(getKeyStoreStream(), password);
30 | } catch (java.security.GeneralSecurityException gse) {
31 | throw new DbKeyStoreSocketException("Failed to load keystore: " + gse.getMessage());
32 | } catch (java.io.FileNotFoundException fnfe) {
33 | throw new DbKeyStoreSocketException("Failed to find keystore file." + fnfe.getMessage());
34 | } catch (java.io.IOException ioe) {
35 | throw new DbKeyStoreSocketException("Failed to read keystore file: " + ioe.getMessage());
36 | }
37 | try {
38 | KeyManagerFactory keyfact =
39 | KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
40 | keyfact.init(keys, password);
41 |
42 | TrustManagerFactory trustfact =
43 | TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
44 | trustfact.init(keys);
45 |
46 | SSLContext ctx = SSLContext.getInstance("SSL");
47 | ctx.init(keyfact.getKeyManagers(), trustfact.getTrustManagers(), null);
48 | factory = ctx.getSocketFactory();
49 | } catch (java.security.GeneralSecurityException gse) {
50 | throw new DbKeyStoreSocketException(
51 | "Failed to set up database socket factory: " + gse.getMessage());
52 | }
53 | }
54 |
55 | public abstract char[] getKeyStorePassword();
56 |
57 | public abstract InputStream getKeyStoreStream();
58 |
59 | public static class DbKeyStoreSocketException extends Exception {
60 | public DbKeyStoreSocketException(String message) {
61 | super(message);
62 | }
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/ssl/DefaultJavaSSLFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2017, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.ssl;
7 |
8 | import java.util.Properties;
9 |
10 | import javax.net.ssl.SSLSocketFactory;
11 |
12 | /**
13 | * Socket factory that uses Java's default truststore to validate server certificate.
14 | * Note: it always validates server certificate, so it might result to downgrade to non-encrypted
15 | * connection when default truststore lacks certificates to validate server.
16 | */
17 | public class DefaultJavaSSLFactory extends WrappedFactory {
18 | public DefaultJavaSSLFactory(Properties info) {
19 | factory = (SSLSocketFactory) SSLSocketFactory.getDefault();
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/ssl/NonValidatingFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.ssl;
7 |
8 | import java.security.GeneralSecurityException;
9 | import java.security.cert.X509Certificate;
10 |
11 | import javax.net.ssl.SSLContext;
12 | import javax.net.ssl.TrustManager;
13 | import javax.net.ssl.X509TrustManager;
14 |
15 | /**
16 | * Provide a SSLSocketFactory that allows SSL connections to be made without validating the server's
17 | * certificate. This is more convenient for some applications, but is less secure as it allows "man
18 | * in the middle" attacks.
19 | */
20 | public class NonValidatingFactory extends WrappedFactory {
21 |
22 | /**
23 | * We provide a constructor that takes an unused argument solely because the ssl calling code will
24 | * look for this constructor first and then fall back to the no argument constructor, so we avoid
25 | * an exception and additional reflection lookups.
26 | *
27 | * @param arg input argument
28 | * @throws GeneralSecurityException if something goes wrong
29 | */
30 | public NonValidatingFactory(String arg) throws GeneralSecurityException {
31 | SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
32 |
33 | ctx.init(null, new TrustManager[]{new NonValidatingTM()}, null);
34 |
35 | factory = ctx.getSocketFactory();
36 | }
37 |
38 | public static class NonValidatingTM implements X509TrustManager {
39 |
40 | public X509Certificate[] getAcceptedIssuers() {
41 | return new X509Certificate[0];
42 | }
43 |
44 | public void checkClientTrusted(X509Certificate[] certs, String authType) {
45 | }
46 |
47 | public void checkServerTrusted(X509Certificate[] certs, String authType) {
48 | }
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/ssl/WrappedFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.ssl;
7 |
8 | import java.io.IOException;
9 | import java.net.InetAddress;
10 | import java.net.Socket;
11 |
12 | import javax.net.ssl.SSLSocketFactory;
13 |
14 | /**
15 | * Provide a wrapper to a real SSLSocketFactory delegating all calls to the contained instance. A
16 | * subclass needs only provide a constructor for the wrapped SSLSocketFactory.
17 | */
18 | public abstract class WrappedFactory extends SSLSocketFactory {
19 |
20 | protected SSLSocketFactory factory;
21 |
22 | public Socket createSocket(InetAddress host, int port) throws IOException {
23 | return factory.createSocket(host, port);
24 | }
25 |
26 | public Socket createSocket(String host, int port) throws IOException {
27 | return factory.createSocket(host, port);
28 | }
29 |
30 | public Socket createSocket(String host, int port, InetAddress localHost, int localPort)
31 | throws IOException {
32 | return factory.createSocket(host, port, localHost, localPort);
33 | }
34 |
35 | public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort)
36 | throws IOException {
37 | return factory.createSocket(address, port, localAddress, localPort);
38 | }
39 |
40 | public Socket createSocket(Socket socket, String host, int port, boolean autoClose)
41 | throws IOException {
42 | return factory.createSocket(socket, host, port, autoClose);
43 | }
44 |
45 | public String[] getDefaultCipherSuites() {
46 | return factory.getDefaultCipherSuites();
47 | }
48 |
49 | public String[] getSupportedCipherSuites() {
50 | return factory.getSupportedCipherSuites();
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/sspi/ISSPIClient.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 | // Copyright (c) 2004, Open Cloud Limited.
6 |
7 | package com.amazon.redshift.sspi;
8 |
9 | import java.io.IOException;
10 | import java.sql.SQLException;
11 |
12 | /**
13 | * Use Waffle-JNI to support SSPI authentication when RsJDBC is running on a Windows
14 | * client and talking to a Windows server.
15 | *
16 | * SSPI is not supported on a non-Windows client.
17 | */
18 | public interface ISSPIClient {
19 | boolean isSSPISupported();
20 |
21 | void startSSPI() throws SQLException, IOException;
22 |
23 | void continueSSPI(int msgLength) throws SQLException, IOException;
24 |
25 | void dispose();
26 | }
27 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/sspi/NTDSAPI.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 | // Copyright (c) 2004, Open Cloud Limited.
6 |
7 | package com.amazon.redshift.sspi;
8 |
9 | import com.sun.jna.LastErrorException;
10 | import com.sun.jna.Native;
11 | import com.sun.jna.WString;
12 | import com.sun.jna.ptr.IntByReference;
13 | import com.sun.jna.win32.StdCallLibrary;
14 |
15 | interface NTDSAPI extends StdCallLibrary {
16 |
17 | NTDSAPI instance = (NTDSAPI) Native.loadLibrary("NTDSAPI", NTDSAPI.class);
18 |
19 | /**
20 | * Wrap DsMakeSpn
21 | *
22 | * To get the String result, call
23 | *
24 | *
25 | * new String(buf, 0, spnLength)
26 | *
27 | *
28 | * on the byte[] buffer passed to 'spn' after testing to ensure ERROR_SUCCESS.
29 | *
30 | * @param serviceClass SPN service class (in)
31 | * @param serviceName SPN service name (in)
32 | * @param instanceName SPN instance name (in, null ok)
33 | * @param instancePort SPN port number (in, 0 to omit)
34 | * @param referrer SPN referer (in, null ok)
35 | * @param spnLength Size of 'spn' buffer (in), actul length of spn created including null
36 | * terminator (out)
37 | * @param spn SPN buffer (in/out)
38 | * @return Error code ERROR_SUCCESS, ERROR_BUFFER_OVERFLOW or ERROR_INVALID_PARAMETER
39 | * @see
40 | * https://msdn.microsoft.com/en-us/library/ms676007(v=vs.85).aspx
41 | */
42 | int DsMakeSpnW(WString serviceClass, /* in */
43 | WString serviceName, /* in */
44 | WString instanceName, /* in, optional, may be null */
45 | short instancePort, /* in */
46 | WString referrer, /* in, optional, may be null */
47 | IntByReference spnLength, /* in: length of buffer spn; out: chars written */
48 | char[] spn /* out string */
49 | ) throws LastErrorException;
50 |
51 | int ERROR_SUCCESS = 0;
52 | int ERROR_INVALID_PARAMETER = 87;
53 | int ERROR_BUFFER_OVERFLOW = 111;
54 | }
55 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/sspi/NTDSAPIWrapper.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 | // Copyright (c) 2004, Open Cloud Limited.
6 |
7 | package com.amazon.redshift.sspi;
8 |
9 | import com.sun.jna.LastErrorException;
10 | import com.sun.jna.WString;
11 | import com.sun.jna.ptr.IntByReference;
12 |
13 | public class NTDSAPIWrapper {
14 |
15 | static final NTDSAPIWrapper instance = new NTDSAPIWrapper();
16 |
17 | /**
18 | * Convenience wrapper for NTDSAPI DsMakeSpn with Java friendly string and exception handling.
19 | *
20 | * @param serviceClass See MSDN
21 | * @param serviceName See MSDN
22 | * @param instanceName See MSDN
23 | * @param instancePort See MSDN
24 | * @param referrer See MSDN
25 | * @return SPN generated
26 | * @throws LastErrorException If buffer too small or parameter incorrect
27 | * @see
28 | * https://msdn.microsoft.com/en-us/library/ms676007(v=vs.85).aspx
29 | */
30 | public String DsMakeSpn(String serviceClass, String serviceName, String instanceName,
31 | short instancePort, String referrer) throws LastErrorException {
32 | IntByReference spnLength = new IntByReference(2048);
33 | char[] spn = new char[spnLength.getValue()];
34 |
35 | final int ret =
36 | NTDSAPI.instance.DsMakeSpnW(
37 | new WString(serviceClass),
38 | new WString(serviceName),
39 | instanceName == null ? null : new WString(instanceName),
40 | instancePort,
41 | referrer == null ? null : new WString(referrer),
42 | spnLength,
43 | spn);
44 |
45 | if (ret != NTDSAPI.ERROR_SUCCESS) {
46 | /* Should've thrown LastErrorException, but just in case */
47 | throw new RuntimeException("NTDSAPI DsMakeSpn call failed with " + ret);
48 | }
49 |
50 | return new String(spn, 0, spnLength.getValue());
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/translation/cs.po:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/amazon-redshift-jdbc-driver/635b66e99d4ac70415c99d020ed2da330dcc0c6d/src/main/java/com/amazon/redshift/translation/cs.po
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/translation/de.po:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/amazon-redshift-jdbc-driver/635b66e99d4ac70415c99d020ed2da330dcc0c6d/src/main/java/com/amazon/redshift/translation/de.po
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/translation/fr.po:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/amazon-redshift-jdbc-driver/635b66e99d4ac70415c99d020ed2da330dcc0c6d/src/main/java/com/amazon/redshift/translation/fr.po
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/translation/it.po:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/amazon-redshift-jdbc-driver/635b66e99d4ac70415c99d020ed2da330dcc0c6d/src/main/java/com/amazon/redshift/translation/it.po
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/translation/messages_nl.java:
--------------------------------------------------------------------------------
1 | /* Automatically generated by GNU msgfmt. Do not modify! */
2 | package com.amazon.redshift.translation;
3 | public class messages_nl extends java.util.ResourceBundle {
4 | private static final java.lang.String[] table;
5 | static {
6 | java.lang.String[] t = new java.lang.String[36];
7 | t[0] = "";
8 | t[1] = "Project-Id-Version: Redshift JDBC Driver 2.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-11 23:55-0700\nLast-Translator: Arnout Kuiper \nLanguage-Team: Dutch \nLanguage: nl\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
9 | t[2] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
10 | t[3] = "Iets ongewoons is opgetreden, wat deze driver doet falen. Rapporteer deze fout AUB: {0}";
11 | t[8] = "Unknown Types value.";
12 | t[9] = "Onbekende Types waarde.";
13 | t[12] = "Fastpath call {0} - No result was returned and we expected an integer.";
14 | t[13] = "Fastpath aanroep {0} - Geen resultaat werd teruggegeven, terwijl we een integer verwacht hadden.";
15 | t[20] = "The fastpath function {0} is unknown.";
16 | t[21] = "De fastpath functie {0} is onbekend.";
17 | t[22] = "No results were returned by the query.";
18 | t[23] = "Geen resultaten werden teruggegeven door de query.";
19 | t[26] = "An unexpected result was returned by a query.";
20 | t[27] = "Een onverwacht resultaat werd teruggegeven door een query";
21 | table = t;
22 | }
23 | public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException {
24 | int hash_val = msgid.hashCode() & 0x7fffffff;
25 | int idx = (hash_val % 18) << 1;
26 | java.lang.Object found = table[idx];
27 | if (found != null && msgid.equals(found))
28 | return table[idx + 1];
29 | return null;
30 | }
31 | public java.util.Enumeration getKeys () {
32 | return
33 | new java.util.Enumeration() {
34 | private int idx = 0;
35 | { while (idx < 36 && table[idx] == null) idx += 2; }
36 | public boolean hasMoreElements () {
37 | return (idx < 36);
38 | }
39 | public java.lang.Object nextElement () {
40 | java.lang.Object key = table[idx];
41 | do idx += 2; while (idx < 36 && table[idx] == null);
42 | return key;
43 | }
44 | };
45 | }
46 | public java.util.ResourceBundle getParent () {
47 | return parent;
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/ByteBufferByteStreamWriter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.io.IOException;
9 | import java.nio.ByteBuffer;
10 | import java.nio.channels.Channels;
11 | import java.nio.channels.WritableByteChannel;
12 |
13 | /**
14 | * A {@link ByteStreamWriter} that writes a {@link ByteBuffer java.nio.ByteBuffer} to a byte array
15 | * parameter.
16 | */
17 | public class ByteBufferByteStreamWriter implements ByteStreamWriter {
18 |
19 | private final ByteBuffer buf;
20 | private final int length;
21 |
22 | /**
23 | * Construct the writer with the given {@link ByteBuffer}
24 | *
25 | * @param buf the buffer to use.
26 | */
27 | public ByteBufferByteStreamWriter(ByteBuffer buf) {
28 | this.buf = buf;
29 | this.length = buf.remaining();
30 | }
31 |
32 | @Override
33 | public int getLength() {
34 | return length;
35 | }
36 |
37 | @Override
38 | public void writeTo(ByteStreamTarget target) throws IOException {
39 | // this _does_ involve some copying to a temporary buffer, but that's unavoidable
40 | // as OutputStream itself only accepts single bytes or heap allocated byte arrays
41 | WritableByteChannel c = Channels.newChannel(target.getOutputStream());
42 | try {
43 | c.write(buf);
44 | } finally {
45 | c.close();
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/ByteStreamWriter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.io.IOException;
9 | import java.io.InputStream;
10 | import java.io.OutputStream;
11 |
12 | /**
13 | * A class that can be used to set a byte array parameter by writing to an OutputStream.
14 | *
15 | * The intended use case is wanting to write data to a byte array parameter that is stored off
16 | * heap in a direct memory pool or in some other form that is inconvenient to assemble into a single
17 | * heap-allocated buffer.
18 | * Users should write their own implementation depending on the
19 | * original data source. The driver provides a built-in implementation supporting the {@link
20 | * java.nio.ByteBuffer} class, see {@link ByteBufferByteStreamWriter}.
21 | * Intended usage is to simply pass in an instance using
22 | * {@link java.sql.PreparedStatement#setObject(int, Object)}:
23 | *
24 | * int bufLength = someBufferObject.length();
25 | * preparedStatement.setObject(1, new MyByteStreamWriter(bufLength, someBufferObject));
26 | *
27 | * The length must be known ahead of the stream being written to.
28 | * This provides the application more control over memory management than calling
29 | * {@link java.sql.PreparedStatement#setBinaryStream(int, InputStream)} as with the latter the
30 | * caller has no control over the buffering strategy.
31 | */
32 | public interface ByteStreamWriter {
33 |
34 | /**
35 | * Returns the length of the stream.
36 | *
37 | * This must be known ahead of calling {@link #writeTo(ByteStreamTarget)}.
38 | *
39 | * @return the number of bytes in the stream.
40 | */
41 | int getLength();
42 |
43 | /**
44 | * Write the data to the provided {@link OutputStream}.
45 | *
46 | * Should not write more than {@link #getLength()} bytes. If attempted, the provided stream
47 | * will throw an {@link java.io.IOException}.
48 | *
49 | * @param target the stream to write the data to
50 | * @throws IOException if the underlying stream throws or there is some other error.
51 | */
52 | void writeTo(ByteStreamTarget target) throws IOException;
53 |
54 | /**
55 | * Provides a target to write bytes to.
56 | */
57 | interface ByteStreamTarget {
58 |
59 | /**
60 | * Provides an output stream to write bytes to.
61 | *
62 | * @return an output stream
63 | */
64 | OutputStream getOutputStream();
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/CanEstimateSize.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | public interface CanEstimateSize {
9 | long getSize();
10 | }
11 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/DriverInfo.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2017, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.io.IOException;
9 | import java.io.InputStream;
10 | import java.util.Properties;
11 |
12 | /**
13 | * Utility class with constants of Driver information.
14 | */
15 | public final class DriverInfo {
16 |
17 | // Driver name
18 | public static final String DRIVER_NAME = "Redshift JDBC Driver";
19 | public static final String DRIVER_SHORT_NAME = "RsJDBC";
20 | public static final String DRIVER_VERSION;
21 | public static final String DRIVER_FULL_NAME;
22 |
23 | // Driver version
24 | public static final int MAJOR_VERSION;
25 | public static final int MINOR_VERSION;
26 | public static final int PATCH_VERSION;
27 |
28 | // JDBC specification
29 | public static final String JDBC_VERSION = "4.2";
30 | private static final int JDBC_INTVERSION = 42;
31 | public static final int JDBC_MAJOR_VERSION = JDBC_INTVERSION / 10;
32 | public static final int JDBC_MINOR_VERSION = JDBC_INTVERSION % 10;
33 |
34 |
35 | static {
36 | String version = "2.0.0.0";
37 | try (InputStream resourceAsStream = DriverInfo.class.getClassLoader().getResourceAsStream("redshift_jdbc_driver.properties")) {
38 | Properties versionFromBuild = new Properties();
39 | versionFromBuild.load(resourceAsStream);
40 | version = versionFromBuild.getProperty("version");
41 | } catch (IOException ex) {
42 | // do nothing
43 | }
44 | String[] versionComponents = version.split("\\.");
45 | int majorVersion = 2;
46 | int minorVersion = 0;
47 | int patchVersion = 0;
48 | try {
49 | if (versionComponents.length >= 3) {
50 | majorVersion = Integer.parseInt(versionComponents[0]);
51 | minorVersion = Integer.parseInt(versionComponents[1]);
52 | patchVersion = Integer.parseInt(versionComponents[2]);
53 | }
54 | else {
55 | version = "2.0.0.0";
56 | }
57 | } catch (NumberFormatException ex) {
58 | majorVersion = 2;
59 | minorVersion = 0;
60 | patchVersion = 0;
61 | }
62 | MAJOR_VERSION = majorVersion;
63 | MINOR_VERSION = minorVersion;
64 | PATCH_VERSION = patchVersion;
65 | DRIVER_VERSION = version;
66 | DRIVER_FULL_NAME = DRIVER_NAME + " " + DRIVER_VERSION;
67 | }
68 | private DriverInfo() {
69 | }
70 |
71 | }
72 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/ExtensibleDigest.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.util;
2 |
3 | import java.security.MessageDigest;
4 | import java.security.NoSuchAlgorithmException;
5 |
6 | /**
7 | * Extensible hashing utility function to obfuscate passwords before network transmission.
8 | *
9 | */
10 | public class ExtensibleDigest {
11 | private ExtensibleDigest() {
12 | }
13 |
14 | /**
15 | * Encodes user/password/salt information in the following way: SHA2(SHA2(password + user) + salt).
16 | *
17 | * @param clientNonce The client nonce.
18 | * @param password The connecting user's password.
19 | * @param salt salt sent by the server.
20 | * @param algoName Algorithm name such as "SHA-256" etc.
21 | * @param serverNonce random number generated by server
22 | * @return A byte array of the digest.
23 | */
24 | public static byte[] encode(byte[] clientNonce,
25 | byte[] password,
26 | byte[] salt,
27 | String algoName,
28 | byte[] serverNonce) {
29 | MessageDigest md;
30 | byte[] passDigest;
31 |
32 | try {
33 | md = MessageDigest.getInstance(algoName);
34 | md.update(password);
35 | md.update(salt);
36 | passDigest = md.digest();
37 |
38 | md = MessageDigest.getInstance(algoName);
39 | md.update(passDigest);
40 | md.update(serverNonce);
41 | md.update(clientNonce);
42 | passDigest = md.digest();
43 | } catch (NoSuchAlgorithmException e) {
44 | throw new IllegalStateException("Unable to encode password with extensible hashing:" + algoName, e);
45 | }
46 |
47 | return passDigest;
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/GT.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.text.MessageFormat;
9 | import java.util.Locale;
10 | import java.util.MissingResourceException;
11 | import java.util.ResourceBundle;
12 |
13 | /**
14 | * This class provides a wrapper around a gettext message catalog that can provide a localized
15 | * version of error messages. The caller provides a message String in the standard
16 | * java.text.MessageFormat syntax and any arguments it may need. The returned String is the
17 | * localized version if available or the original if not.
18 | */
19 | public class GT {
20 |
21 | private static final GT _gt = new GT();
22 | private static final Object[] noargs = new Object[0];
23 |
24 | public static String tr(String message, Object... args) {
25 | return _gt.translate(message, args);
26 | }
27 |
28 | private ResourceBundle bundle;
29 |
30 | private GT() {
31 | try {
32 | //JCP! if mvn.project.property.redshift.jdbc.spec < "JDBC4.1"
33 | //JCP> bundle = ResourceBundle.getBundle("com.amazon.redshift.translation.messages");
34 | //JCP! else
35 | bundle = ResourceBundle.getBundle("com.amazon.redshift.translation.messages", Locale.getDefault(Locale.Category.DISPLAY));
36 | //JCP! endif
37 | } catch (MissingResourceException mre) {
38 | // translation files have not been installed
39 | bundle = null;
40 | }
41 | }
42 |
43 | private String translate(String message, Object[] args) {
44 | if (bundle != null && message != null) {
45 | try {
46 | message = bundle.getString(message);
47 | } catch (MissingResourceException mre) {
48 | // If we can't find a translation, just
49 | // use the untranslated message.
50 | }
51 | }
52 |
53 | // If we don't have any parameters we still need to run
54 | // this through the MessageFormat(ter) to allow the same
55 | // quoting and escaping rules to be used for all messages.
56 | //
57 | if (args == null) {
58 | args = noargs;
59 | }
60 |
61 | // Replace placeholders with arguments
62 | //
63 | if (message != null) {
64 | message = MessageFormat.format(message, args);
65 | }
66 |
67 | return message;
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/Gettable.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2018, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | public interface Gettable {
9 | V get(K key);
10 | }
11 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/GettableHashMap.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2018, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.util.HashMap;
9 |
10 | public class GettableHashMap extends HashMap implements Gettable {
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/IniFile.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.util;
2 |
3 | import java.io.BufferedReader;
4 | import java.io.FileReader;
5 | import java.io.IOException;
6 | import java.util.HashMap;
7 | import java.util.Map;
8 | import java.util.regex.Matcher;
9 | import java.util.regex.Pattern;
10 |
11 | public class IniFile {
12 |
13 | private Pattern SECTION_PATTERN = Pattern.compile( "\\s*\\[([^]]*)\\]\\s*" );
14 | private Pattern KEY_VAL_PATTERN = Pattern.compile( "\\s*([^=]*)=(.*)" );
15 | private Map> sections = new HashMap<>();
16 |
17 | public IniFile( String path ) throws IOException {
18 | load( path );
19 | }
20 |
21 | public void load( String path ) throws IOException {
22 | try( BufferedReader br = new BufferedReader( new FileReader( path ))) {
23 | String line;
24 | String section = null;
25 | while(( line = br.readLine()) != null ) {
26 | Matcher m = SECTION_PATTERN.matcher( line );
27 | if( m.matches()) {
28 | section = m.group( 1 ).trim();
29 | }
30 | else if( section != null ) {
31 | m = KEY_VAL_PATTERN.matcher( line );
32 | if( m.matches()) {
33 | String key = m.group( 1 ).trim();
34 | String value = m.group( 2 ).trim();
35 | Map< String, String > kv = sections.get( section );
36 | if( kv == null ) {
37 | sections.put( section, kv = new HashMap<>());
38 | }
39 | kv.put( key, value );
40 | }
41 | }
42 | } // Loop
43 | }
44 | }
45 |
46 | public Map< String, String > getAllKeyVals( String section ) {
47 | Map< String, String > kv = sections.get( section );
48 | return kv;
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/JdbcBlackHole.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.sql.Connection;
9 | import java.sql.ResultSet;
10 | import java.sql.SQLException;
11 | import java.sql.Statement;
12 |
13 | public class JdbcBlackHole {
14 | public static void close(Connection con) {
15 | try {
16 | if (con != null) {
17 | con.close();
18 | }
19 | } catch (SQLException e) {
20 | /* ignore for now */
21 | }
22 | }
23 |
24 | public static void close(Statement s) {
25 | try {
26 | if (s != null) {
27 | s.close();
28 | }
29 | } catch (SQLException e) {
30 | /* ignore for now */
31 | }
32 | }
33 |
34 | public static void close(ResultSet rs) {
35 | try {
36 | if (rs != null) {
37 | rs.close();
38 | }
39 | } catch (SQLException e) {
40 | /* ignore for now */
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/MD5Digest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.security.MessageDigest;
9 | import java.security.NoSuchAlgorithmException;
10 |
11 | /**
12 | * MD5-based utility function to obfuscate passwords before network transmission.
13 | *
14 | * @author Jeremy Wohl
15 | */
16 | public class MD5Digest {
17 | private MD5Digest() {
18 | }
19 |
20 | /**
21 | * Encodes user/password/salt information in the following way: MD5(MD5(password + user) + salt).
22 | *
23 | * @param user The connecting user.
24 | * @param password The connecting user's password.
25 | * @param salt A four-salt sent by the server.
26 | * @return A 35-byte array, comprising the string "md5" and an MD5 digest.
27 | */
28 | public static byte[] encode(byte[] user, byte[] password, byte[] salt) {
29 | MessageDigest md;
30 | byte[] tempDigest;
31 | byte[] passDigest;
32 | byte[] hexDigest = new byte[35];
33 |
34 | try {
35 | md = MessageDigest.getInstance("MD5");
36 |
37 | md.update(password);
38 | md.update(user);
39 | tempDigest = md.digest();
40 |
41 | bytesToHex(tempDigest, hexDigest, 0);
42 | md.update(hexDigest, 0, 32);
43 | md.update(salt);
44 | passDigest = md.digest();
45 |
46 | bytesToHex(passDigest, hexDigest, 3);
47 | hexDigest[0] = (byte) 'm';
48 | hexDigest[1] = (byte) 'd';
49 | hexDigest[2] = (byte) '5';
50 | } catch (NoSuchAlgorithmException e) {
51 | throw new IllegalStateException("Unable to encode password with MD5", e);
52 | }
53 |
54 | return hexDigest;
55 | }
56 |
57 | /*
58 | * Turn 16-byte stream into a human-readable 32-byte hex string
59 | */
60 | private static void bytesToHex(byte[] bytes, byte[] hex, int offset) {
61 | final char[] lookup =
62 | {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
63 |
64 | int i;
65 | int c;
66 | int j;
67 | int pos = offset;
68 |
69 | for (i = 0; i < 16; i++) {
70 | c = bytes[i] & 0xFF;
71 | j = c >> 4;
72 | hex[pos++] = (byte) lookup[j];
73 | j = (c & 0xF);
74 | hex[pos++] = (byte) lookup[j];
75 | }
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/ObjectFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.lang.reflect.Constructor;
9 | import java.lang.reflect.InvocationTargetException;
10 | import java.util.Properties;
11 |
12 | /**
13 | * Helper class to instantiate objects. Note: the class is NOT public API, so it is subject
14 | * to change.
15 | */
16 | public class ObjectFactory {
17 |
18 | /**
19 | * Instantiates a class using the appropriate constructor. If a constructor with a single
20 | * Propertiesparameter exists, it is used. Otherwise, if tryString is true a constructor with a
21 | * single String argument is searched if it fails, or tryString is true a no argument constructor
22 | * is tried.
23 | *
24 | * @param subclass type
25 | * @param expectedClass expected subclass
26 | * @param classname name of the class to instantiate
27 | * @param info parameter to pass as Properties
28 | * @param tryString whether to look for a single String argument constructor
29 | * @param stringarg parameter to pass as String
30 | * @return the instantiated class
31 | * @throws ClassNotFoundException if something goes wrong
32 | * @throws SecurityException if something goes wrong
33 | * @throws NoSuchMethodException if something goes wrong
34 | * @throws IllegalArgumentException if something goes wrong
35 | * @throws InstantiationException if something goes wrong
36 | * @throws IllegalAccessException if something goes wrong
37 | * @throws InvocationTargetException if something goes wrong
38 | */
39 | public static T instantiate(Class expectedClass, String classname, Properties info, boolean tryString,
40 | String stringarg) throws ClassNotFoundException, SecurityException, NoSuchMethodException,
41 | IllegalArgumentException, InstantiationException, IllegalAccessException,
42 | InvocationTargetException {
43 | Object[] args = {info};
44 | Constructor extends T> ctor = null;
45 | Class extends T> cls = Class.forName(classname).asSubclass(expectedClass);
46 | try {
47 | ctor = cls.getConstructor(Properties.class);
48 | } catch (NoSuchMethodException nsme) {
49 | if (tryString) {
50 | try {
51 | ctor = cls.getConstructor(String.class);
52 | args = new String[]{stringarg};
53 | } catch (NoSuchMethodException nsme2) {
54 | tryString = false;
55 | }
56 | }
57 | if (!tryString) {
58 | ctor = cls.getConstructor((Class[]) null);
59 | args = null;
60 | }
61 | }
62 | return ctor.newInstance(args);
63 | }
64 |
65 | }
66 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftBinaryObject.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.sql.SQLException;
9 |
10 | /**
11 | * RedshiftBinaryObject is an interface that classes extending {@link RedshiftObject} can use to take advantage of
12 | * more optimal binary encoding of the data type.
13 | */
14 | public interface RedshiftBinaryObject {
15 | /**
16 | * This method is called to set the value of this object.
17 | *
18 | * @param value data containing the binary representation of the value of the object
19 | * @param offset the offset in the byte array where object data starts
20 | * @throws SQLException thrown if value is invalid for this type
21 | */
22 | void setByteValue(byte[] value, int offset) throws SQLException;
23 |
24 | /**
25 | * This method is called to return the number of bytes needed to store this object in the binary
26 | * form required by com.amazon.redshift.
27 | *
28 | * @return the number of bytes needed to store this object
29 | */
30 | int lengthInBytes();
31 |
32 | /**
33 | * This method is called the to store the value of the object, in the binary form required by
34 | * com.amazon.redshift.
35 | *
36 | * @param bytes the array to store the value, it is guaranteed to be at lest
37 | * {@link #lengthInBytes} in size.
38 | * @param offset the offset in the byte array where object must be stored
39 | */
40 | void toBytes(byte[] bytes, int offset);
41 | }
42 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftByteTypes.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.util;
2 |
3 | import java.sql.SQLException;
4 |
5 | // Base class for VARBYTE and GEOGRAPHY bytes conversion
6 | public class RedshiftByteTypes {
7 |
8 | /*
9 | * Converts a RS bytes raw value (i.e. the raw binary representation of the varbyte/geography data type) into
10 | * a java byte[]
11 | */
12 | public static byte[] toBytes(byte[] s) throws SQLException {
13 | if (s == null) {
14 | return null;
15 | }
16 |
17 | return toBytesFromHex(s);
18 | }
19 |
20 | public static String convertToString(byte[] data) {
21 | char[] hex = "0123456789ABCDEF".toCharArray();
22 | char[] hexChars = new char[2 * data.length];
23 | for (int i = 0; i < data.length; i++)
24 | {
25 | int v = data[i] & 0xFF;
26 | hexChars[i * 2] = hex[v >>> 4];
27 | hexChars[i * 2 + 1] = hex[v & 0x0F];
28 | }
29 |
30 | return new String(hexChars);
31 | }
32 |
33 | private static byte[] toBytesFromHex(byte[] s) {
34 | byte[] output = new byte[(s.length) / 2];
35 | for (int i = 0; i < output.length; i++) {
36 | byte b1 = gethex(s[i * 2]);
37 | byte b2 = gethex(s[i * 2 + 1]);
38 | // squid:S3034
39 | // Raw byte values should not be used in bitwise operations in combination with shifts
40 | output[i] = (byte) ((b1 << 4) | (b2 & 0xff));
41 | }
42 | return output;
43 | }
44 |
45 | private static byte gethex(byte b) {
46 | // 0-9 == 48-57
47 | if (b <= 57) {
48 | return (byte) (b - 48);
49 | }
50 |
51 | // a-f == 97-102
52 | if (b >= 97) {
53 | return (byte) (b - 97 + 10);
54 | }
55 |
56 | // A-F == 65-70
57 | return (byte) (b - 65 + 10);
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftConstants.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | *
4 | * This file is licensed under the Apache License, Version 2.0 (the "License").
5 | * You may not use this file except in compliance with the License. A copy of
6 | * the License is located at
7 | *
8 | * http://aws.amazon.com/apache2.0/
9 | *
10 | * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
11 | * CONDITIONS OF ANY KIND, either express or implied. See the License for the
12 | * specific language governing permissions and limitations under the License.
13 | */
14 |
15 | package com.amazon.redshift.util;
16 |
17 | /**
18 | * Class to contain all Redshift JDBC driver wide constants
19 | * Constants should be organized into logical groups with comments.
20 | */
21 | public final class RedshiftConstants {
22 |
23 | private RedshiftConstants() {
24 | throw new AssertionError("RedshiftConstants class should not be instantiated.");
25 | }
26 |
27 | // Auth plugins names related constants
28 | public static final String BASIC_JWT_PLUGIN = "com.amazon.redshift.plugin.BasicJwtCredentialsProvider";
29 | public static final String NATIVE_IDP_AZUREAD_BROWSER_PLUGIN = "com.amazon.redshift.plugin.BrowserAzureOAuth2CredentialsProvider";
30 | public static final String NATIVE_IDP_OKTA_BROWSER_PLUGIN = "com.amazon.redshift.plugin.BrowserOktaSAMLCredentialsProvider";
31 | public static final String NATIVE_IDP_OKTA_NON_BROWSER_PLUGIN = "com.amazon.redshift.plugin.BasicNativeSamlCredentialsProvider";
32 | public static final String IDP_TOKEN_PLUGIN = "com.amazon.redshift.plugin.IdpTokenAuthPlugin";
33 | public static final String IDC_PKCE_BROWSER_PLUGIN = "com.amazon.redshift.plugin.BrowserIdcAuthPlugin";
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.sql.SQLException;
9 |
10 | import com.amazon.redshift.logger.RedshiftLogger;
11 |
12 | public class RedshiftException extends SQLException {
13 |
14 | /**
15 | *
16 | */
17 | private static final long serialVersionUID = 1L;
18 | private ServerErrorMessage serverError;
19 |
20 | public RedshiftException(String msg, RedshiftState state, Throwable cause, RedshiftLogger logger) {
21 | this(msg, state, cause);
22 | if(RedshiftLogger.isEnable())
23 | logger.logError(this);
24 | }
25 |
26 | public RedshiftException(String msg, RedshiftState state, Throwable cause) {
27 | super(msg, state == null ? null : state.getState(), cause);
28 | }
29 |
30 | public RedshiftException(String msg, RedshiftState state) {
31 | super(msg, state == null ? null : state.getState());
32 | }
33 |
34 | public RedshiftException(String msg, Throwable cause) {
35 | super(msg, null , cause);
36 | }
37 |
38 | public RedshiftException(String msg) {
39 | super(msg, "");
40 | }
41 |
42 | public RedshiftException(ServerErrorMessage serverError) {
43 | this(serverError, true);
44 | }
45 |
46 | public RedshiftException(ServerErrorMessage serverError, boolean detail) {
47 | super(detail ? serverError.getExternalErrorMessage() : serverError.getNonSensitiveErrorMessage(), serverError.getSQLState());
48 | this.serverError = serverError;
49 | }
50 |
51 | public ServerErrorMessage getServerErrorMessage() {
52 | return serverError;
53 | }
54 |
55 | public SQLException getSQLException() {
56 | return new SQLException(this.getMessage(),this.getSQLState(), this.getCause());
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftGeography.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.util;
2 |
3 | // Right now most of methods in base class.
4 | // In future, if there are differences in bytes conversion of VARBYTE and GEOGRAPHY
5 | // then we can add more methods in this class.
6 | public class RedshiftGeography extends RedshiftByteTypes{
7 | }
8 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftJDBCMain.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import com.amazon.redshift.Driver;
9 |
10 | public class RedshiftJDBCMain {
11 |
12 | public static void main(String[] args) {
13 |
14 | java.net.URL url = Driver.class.getResource("/com/amazon/redshift/Driver.class");
15 | System.out.printf("%n%s%n", com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME);
16 | System.out.printf("Found in: %s%n%n", url);
17 |
18 | System.out.printf("The Redshift JDBC driver is not an executable Java program.%n%n"
19 | + "You must install it according to the JDBC driver installation "
20 | + "instructions for your application / container / appserver, "
21 | + "then use it by specifying a JDBC URL of the form %n jdbc:redshift://%n"
22 | + "or using an application specific method.%n%n"
23 | + "See the Redshift JDBC documentation: https://docs.aws.amazon.com/redshift/latest/mgmt/configure-jdbc-connection.html%n%n"
24 | + "This command has had no effect.%n");
25 |
26 | System.exit(1);
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftMoney.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2003, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.io.Serializable;
9 | import java.sql.SQLException;
10 |
11 | /**
12 | * This implements a class that handles the Redshift money and cash types.
13 | */
14 | public class RedshiftMoney extends RedshiftObject implements Serializable, Cloneable {
15 | /*
16 | * The value of the field
17 | */
18 | public double val;
19 |
20 | /**
21 | * @param value of field
22 | */
23 | public RedshiftMoney(double value) {
24 | this();
25 | val = value;
26 | }
27 |
28 | public RedshiftMoney(String value) throws SQLException {
29 | this();
30 | setValue(value);
31 | }
32 |
33 | /*
34 | * Required by the driver
35 | */
36 | public RedshiftMoney() {
37 | setType("money");
38 | }
39 |
40 | public void setValue(String s) throws SQLException {
41 | try {
42 | String s1;
43 | boolean negative;
44 |
45 | negative = (s.charAt(0) == '(');
46 |
47 | // Remove any () (for negative) & currency symbol
48 | s1 = RedshiftTokenizer.removePara(s).substring(1);
49 |
50 | // Strip out any , in currency
51 | int pos = s1.indexOf(',');
52 | while (pos != -1) {
53 | s1 = s1.substring(0, pos) + s1.substring(pos + 1);
54 | pos = s1.indexOf(',');
55 | }
56 |
57 | val = Double.parseDouble(s1);
58 | val = negative ? -val : val;
59 |
60 | } catch (NumberFormatException e) {
61 | throw new RedshiftException(GT.tr("Conversion of money failed."),
62 | RedshiftState.NUMERIC_CONSTANT_OUT_OF_RANGE, e);
63 | }
64 | }
65 |
66 | @Override
67 | public int hashCode() {
68 | final int prime = 31;
69 | int result = super.hashCode();
70 | long temp;
71 | temp = Double.doubleToLongBits(val);
72 | result = prime * result + (int) (temp ^ (temp >>> 32));
73 | return result;
74 | }
75 |
76 | public boolean equals(Object obj) {
77 | if (obj instanceof RedshiftMoney) {
78 | RedshiftMoney p = (RedshiftMoney) obj;
79 | return val == p.val;
80 | }
81 | return false;
82 | }
83 |
84 | public String getValue() {
85 | if (val < 0) {
86 | return "-$" + (-val);
87 | } else {
88 | return "$" + val;
89 | }
90 | }
91 |
92 | @Override
93 | public Object clone() throws CloneNotSupportedException {
94 | // squid:S2157 "Cloneables" should implement "clone
95 | return super.clone();
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftVarbyte.java:
--------------------------------------------------------------------------------
1 | package com.amazon.redshift.util;
2 |
3 | // Right now most of methods in base class.
4 | // In future, if there are differences in bytes conversion of VARBYTE and GEOGRAPHY
5 | // then we can add more methods in this class.
6 | public class RedshiftVarbyte extends RedshiftByteTypes{
7 | }
8 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/RedshiftWarning.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2004, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.sql.SQLWarning;
9 |
10 | public class RedshiftWarning extends SQLWarning {
11 |
12 | private ServerErrorMessage serverError;
13 |
14 | public RedshiftWarning(ServerErrorMessage err) {
15 | super(err.toString(), err.getSQLState());
16 | this.serverError = err;
17 | }
18 |
19 | public String getMessage() {
20 | return serverError.getMessage();
21 | }
22 |
23 | public ServerErrorMessage getServerErrorMessage() {
24 | return serverError;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/util/URLCoder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2018, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.util;
7 |
8 | import java.io.UnsupportedEncodingException;
9 | import java.net.URLDecoder;
10 | import java.net.URLEncoder;
11 |
12 | /**
13 | * This class helps with URL encoding and decoding. UTF-8 encoding is used by default to make
14 | * encoding consistent across the driver, and encoding might be changed via {@code
15 | * redshift.url.encoding} property
16 | *
17 | * Note: this should not be used outside of Redshift source, this is not a public API of the
18 | * driver.
19 | */
20 | public final class URLCoder {
21 | private static final String ENCODING_FOR_URL =
22 | System.getProperty("redshift.url.encoding", "UTF-8");
23 |
24 | /**
25 | * Decodes {@code x-www-form-urlencoded} string into Java string.
26 | *
27 | * @param encoded encoded value
28 | * @return decoded value
29 | * @see URLDecoder#decode(String, String)
30 | */
31 | public static String decode(String encoded) {
32 | try {
33 | return URLDecoder.decode(encoded, ENCODING_FOR_URL);
34 | } catch (UnsupportedEncodingException e) {
35 | throw new IllegalStateException(
36 | "Unable to decode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e);
37 | }
38 | }
39 |
40 | /**
41 | * Encodes Java string into {@code x-www-form-urlencoded} format
42 | *
43 | * @param plain input value
44 | * @return encoded value
45 | * @see URLEncoder#encode(String, String)
46 | */
47 | public static String encode(String plain) {
48 | try {
49 | return URLEncoder.encode(plain, "UTF-8");
50 | } catch (UnsupportedEncodingException e) {
51 | throw new IllegalStateException(
52 | "Unable to encode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e);
53 | }
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/xa/RedshiftXADataSource.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2009, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.xa;
7 |
8 | import com.amazon.redshift.core.BaseConnection;
9 | import com.amazon.redshift.ds.common.BaseDataSource;
10 |
11 | import java.sql.Connection;
12 | import java.sql.SQLException;
13 |
14 | import javax.naming.Reference;
15 | import javax.sql.XAConnection;
16 | import javax.sql.XADataSource;
17 |
18 | /**
19 | * XA-enabled DataSource implementation.
20 | *
21 | * @author Heikki Linnakangas (heikki.linnakangas@iki.fi)
22 | */
23 | public class RedshiftXADataSource extends BaseDataSource implements XADataSource {
24 | /**
25 | * Gets a connection to the Redshift database. The database is identified by the DataSource
26 | * properties serverName, databaseName, and portNumber. The user to connect as is identified by
27 | * the DataSource properties user and password.
28 | *
29 | * @return A valid database connection.
30 | * @throws SQLException Occurs when the database connection cannot be established.
31 | */
32 | public XAConnection getXAConnection() throws SQLException {
33 | return getXAConnection(getUser(), getPassword());
34 | }
35 |
36 | /**
37 | * Gets a XA-enabled connection to the Redshift database. The database is identified by the
38 | * DataSource properties serverName, databaseName, and portNumber. The user to connect as is
39 | * identified by the arguments user and password, which override the DataSource properties by the
40 | * same name.
41 | *
42 | * @return A valid database connection.
43 | * @throws SQLException Occurs when the database connection cannot be established.
44 | */
45 | public XAConnection getXAConnection(String user, String password) throws SQLException {
46 | Connection con = super.getConnection(user, password);
47 | return new RedshiftXAConnection((BaseConnection) con);
48 | }
49 |
50 | public String getDescription() {
51 | return "XA-enabled DataSource from " + com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME;
52 | }
53 |
54 | /**
55 | * Generates a reference using the appropriate object factory.
56 | */
57 | protected Reference createReference() {
58 | return new Reference(getClass().getName(), RedshiftXADataSourceFactory.class.getName(), null);
59 | }
60 |
61 | }
62 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/xa/RedshiftXADataSourceFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2007, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.xa;
7 |
8 | import com.amazon.redshift.ds.common.RedshiftObjectFactory;
9 |
10 | import java.util.Hashtable;
11 |
12 | import javax.naming.Context;
13 | import javax.naming.Name;
14 | import javax.naming.Reference;
15 |
16 | /**
17 | * An ObjectFactory implementation for RedshiftXADataSource-objects.
18 | */
19 |
20 | public class RedshiftXADataSourceFactory extends RedshiftObjectFactory {
21 | /*
22 | * All the other Redshift DataSource use RedshiftObjectFactory directly, but we can't do that with
23 | * RedshiftXADataSource because referencing RedshiftXADataSource from RedshiftObjectFactory would break
24 | * "JDBC2 Enterprise" edition build which doesn't include RedshiftXADataSource.
25 | */
26 |
27 | public Object getObjectInstance(Object obj, Name name, Context nameCtx,
28 | Hashtable, ?> environment) throws Exception {
29 | Reference ref = (Reference) obj;
30 | String className = ref.getClassName();
31 | if (className.equals("com.amazon.redshift.xa.RedshiftXADataSource")) {
32 | return loadXADataSource(ref);
33 | } else {
34 | return null;
35 | }
36 | }
37 |
38 | private Object loadXADataSource(Reference ref) {
39 | RedshiftXADataSource ds = new RedshiftXADataSource();
40 | return loadBaseDataSource(ds, ref);
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/main/java/com/amazon/redshift/xa/RedshiftXAException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2009, PostgreSQL Global Development Group
3 | * See the LICENSE file in the project root for more information.
4 | */
5 |
6 | package com.amazon.redshift.xa;
7 |
8 | import javax.transaction.xa.XAException;
9 |
10 | /**
11 | * A convenience subclass of XAException
which makes it easy to create an instance of
12 | * XAException
with a human-readable message, a Throwable
cause, and an XA
13 | * error code.
14 | *
15 | * @author Michael S. Allman
16 | */
17 | public class RedshiftXAException extends XAException {
18 | RedshiftXAException(String message, int errorCode) {
19 | super(message);
20 |
21 | this.errorCode = errorCode;
22 | }
23 |
24 | RedshiftXAException(String message, Throwable cause, int errorCode) {
25 | super(message);
26 |
27 | initCause(cause);
28 | this.errorCode = errorCode;
29 | }
30 |
31 | RedshiftXAException(Throwable cause, int errorCode) {
32 | super(errorCode);
33 |
34 | initCause(cause);
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/src/main/resources/META-INF/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 1997, PostgreSQL Global Development Group
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | 1. Redistributions of source code must retain the above copyright notice,
8 | this list of conditions and the following disclaimer.
9 | 2. Redistributions in binary form must reproduce the above copyright notice,
10 | this list of conditions and the following disclaimer in the documentation
11 | and/or other materials provided with the distribution.
12 |
13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
14 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
17 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23 | POSSIBILITY OF SUCH DAMAGE.
24 |
--------------------------------------------------------------------------------
/src/main/resources/META-INF/services/java.sql.Driver:
--------------------------------------------------------------------------------
1 | com.amazon.redshift.Driver
2 |
--------------------------------------------------------------------------------
/src/main/resources/com/amazon/redshift/plugin/adfs.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/amazon-redshift-jdbc-driver/635b66e99d4ac70415c99d020ed2da330dcc0c6d/src/main/resources/com/amazon/redshift/plugin/adfs.exe
--------------------------------------------------------------------------------
/src/main/resources/redshift_jdbc_driver.properties:
--------------------------------------------------------------------------------
1 | version=${version}
--------------------------------------------------------------------------------
/src/main/test/java/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/amazon-redshift-jdbc-driver/635b66e99d4ac70415c99d020ed2da330dcc0c6d/src/main/test/java/.gitkeep
--------------------------------------------------------------------------------