├── .nvmrc ├── .github ├── CODEOWNERS ├── renovate.json ├── workflows │ ├── release-drafter.yml │ ├── auto-approve.yml │ ├── site.yml │ └── ci.yml └── release-drafter.yml ├── project ├── build.properties ├── plugins.sbt └── BuildHelper.scala ├── minio └── export │ ├── console.log │ └── dir1 │ ├── hello.txt │ └── user.csv ├── test-data └── bucket-1 │ ├── console.log │ └── dir1 │ ├── hello.txt │ └── user.csv ├── docs ├── package.json ├── sidebars.js ├── testing.md ├── credentials.md └── index.md ├── zio-s3 └── src │ ├── test │ └── scala │ │ └── zio │ │ └── s3 │ │ ├── S3LayerTest.scala │ │ ├── S3SettingsTest.scala │ │ ├── S3ProvidersTest.scala │ │ └── S3Test.scala │ └── main │ └── scala │ └── zio │ └── s3 │ ├── settings.scala │ ├── errors.scala │ ├── s3model.scala │ ├── providers.scala │ ├── s3options.scala │ ├── package.scala │ ├── S3.scala │ ├── Test.scala │ └── Live.scala ├── .scalafmt.conf ├── docker-compose.yml ├── README.md ├── CLA.md ├── .gitignore ├── LICENSE └── sbt /.nvmrc: -------------------------------------------------------------------------------- 1 | 20.9.0 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @zio/zio-s3 2 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version = 1.10.11 2 | -------------------------------------------------------------------------------- /minio/export/console.log: -------------------------------------------------------------------------------- 1 | this is a log file 2 | on multi line 3 | -------------------------------------------------------------------------------- /minio/export/dir1/hello.txt: -------------------------------------------------------------------------------- 1 | Hello ZIO s3 2 | this is a beautiful day -------------------------------------------------------------------------------- /test-data/bucket-1/console.log: -------------------------------------------------------------------------------- 1 | this is a log file 2 | on multi line 3 | -------------------------------------------------------------------------------- /test-data/bucket-1/dir1/hello.txt: -------------------------------------------------------------------------------- 1 | Hello ZIO s3 2 | this is a beautiful day -------------------------------------------------------------------------------- /docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@zio.dev/zio-s3", 3 | "description": "ZIO S3 Documentation", 4 | "license": "Apache-2.0" 5 | } 6 | -------------------------------------------------------------------------------- /minio/export/dir1/user.csv: -------------------------------------------------------------------------------- 1 | John,Doe,120 jefferson st.,Riverside, NJ, 08075 2 | Jack,McGinnis,220 hobo Av.,Phila, PA,09119 3 | Pete,Douglas,10 wall street,New York, NY,09119 4 | Marie,White,20 time square,Bronx, NY,08220 5 | -------------------------------------------------------------------------------- /test-data/bucket-1/dir1/user.csv: -------------------------------------------------------------------------------- 1 | John,Doe,120 jefferson st.,Riverside, NJ, 08075 2 | Jack,McGinnis,220 hobo Av.,Phila, PA,09119 3 | Pete,Douglas,10 wall street,New York, NY,09119 4 | Marie,White,20 time square,Bronx, NY,08220 5 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "automerge": true, 3 | "rebaseWhen": "conflicted", 4 | "labels": ["type: dependencies"], 5 | "packageRules": [ 6 | { 7 | "matchManagers": [ 8 | "sbt" 9 | ], 10 | "enabled": false 11 | } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | branches: ['series/2.x'] 6 | 7 | jobs: 8 | update_release_draft: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: release-drafter/release-drafter@v5 12 | env: 13 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 14 | -------------------------------------------------------------------------------- /docs/sidebars.js: -------------------------------------------------------------------------------- 1 | const sidebars = { 2 | sidebar: [ 3 | { 4 | type: "category", 5 | label: "ZIO S3", 6 | collapsed: false, 7 | link: { type: "doc", id: "index" }, 8 | items: [ 9 | "credentials", 10 | "testing" 11 | ] 12 | } 13 | ] 14 | }; 15 | 16 | module.exports = sidebars; 17 | -------------------------------------------------------------------------------- /.github/workflows/auto-approve.yml: -------------------------------------------------------------------------------- 1 | name: Auto approve 2 | 3 | on: 4 | pull_request_target 5 | 6 | jobs: 7 | auto-approve: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: hmarr/auto-approve-action@v3.1.0 11 | if: github.actor == 'scala-steward' || github.actor == 'renovate[bot]' 12 | with: 13 | github-token: "${{ secrets.GITHUB_TOKEN }}" 14 | -------------------------------------------------------------------------------- /zio-s3/src/test/scala/zio/s3/S3LayerTest.scala: -------------------------------------------------------------------------------- 1 | package zio.s3 2 | 3 | import java.net.URI 4 | import software.amazon.awssdk.regions.Region 5 | import zio.test.Assertion._ 6 | import zio.test._ 7 | 8 | object S3LayerTest extends ZIOSpecDefault { 9 | 10 | override def spec: Spec[Any, Nothing] = 11 | suite("S3LayerSpec")( 12 | test("using ZIO[R with Scope, E, A] in liveZIO compiles") { 13 | typeCheck( 14 | """liveZIO(Region.CA_CENTRAL_1, providers.default, Some(URI.create("http://localhost:9000")))""" 15 | ).map(assert(_)(isRight)) 16 | } 17 | ) 18 | } 19 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | version = "2.6.1" 2 | 3 | align = most 4 | assumeStandardLibraryStripMargin = true 5 | danglingParentheses = true 6 | docstrings = JavaDoc 7 | lineEndings = unix 8 | maxColumn = 120 9 | 10 | continuationIndent { 11 | callSite = 2 12 | defnSite = 2 13 | } 14 | 15 | newlines { 16 | alwaysBeforeTopLevelStatements = true 17 | alwaysBeforeMultilineDef = true 18 | } 19 | 20 | spaces { 21 | afterKeywordBeforeParen = true 22 | afterSymbolicDefs = true 23 | inImportCurlyBraces = true 24 | } 25 | 26 | rewrite.rules = [ 27 | AsciiSortImports, 28 | AvoidInfix, 29 | PreferCurlyFors, 30 | RedundantBraces, 31 | RedundantParens, 32 | SortModifiers 33 | ] -------------------------------------------------------------------------------- /docs/testing.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: testing 3 | title: "Testing" 4 | --- 5 | 6 | A stub implementation of s3 storage is provided for testing purpose and use internally a filesystem to simulate s3 storage 7 | 8 | ```scala 9 | import zio.nio.core.file.{Path => ZPath} 10 | import zio.s3._ 11 | 12 | // build s3 Layer 13 | val stubS3: ZLayer[Any, Nothing, S3] = stub(ZPath("/tmp/s3-data")) 14 | 15 | // list all buckets available by using S3 Stub Layer 16 | // will list all directories of `/tmp/s3-data` 17 | listBuckets.provideLayer(stubS3) 18 | ``` 19 | 20 | More information here on how to use [ZLayer https://zio.dev/docs/howto/howto_use_layers](https://zio.dev/docs/howto/howto_use_layers) 21 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: 'v$RESOLVED_VERSION' 2 | tag-template: 'v$RESOLVED_VERSION' 3 | template: | 4 | # What's Changed 5 | $CHANGES 6 | categories: 7 | - title: 'Breaking' 8 | label: 'type: breaking' 9 | - title: 'New' 10 | label: 'type: feature' 11 | - title: 'Bug Fixes' 12 | label: 'type: bug' 13 | - title: 'Maintenance' 14 | label: 'type: maintenance' 15 | - title: 'Documentation' 16 | label: 'type: docs' 17 | - title: 'Dependency Updates' 18 | label: 'type: dependencies' 19 | 20 | version-resolver: 21 | major: 22 | labels: 23 | - 'type: breaking' 24 | minor: 25 | labels: 26 | - 'type: feature' 27 | patch: 28 | labels: 29 | - 'type: bug' 30 | - 'type: maintenance' 31 | - 'type: docs' 32 | - 'type: dependencies' 33 | - 'type: security' 34 | 35 | exclude-labels: 36 | - 'skip-changelog' 37 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.eed3si9n" % "sbt-unidoc" % "0.4.3") 2 | addSbtPlugin("ch.epfl.scala" % "sbt-bloop" % "1.6.0") 3 | addSbtPlugin("ch.epfl.scala" % "sbt-scalafix" % "0.14.3") 4 | addSbtPlugin("com.eed3si9n" % "sbt-buildinfo" % "0.13.1") 5 | addSbtPlugin("com.geirsson" % "sbt-ci-release" % "1.5.5") 6 | addSbtPlugin("com.github.cb372" % "sbt-explicit-dependencies" % "0.2.16") 7 | addSbtPlugin("org.portable-scala" % "sbt-crossproject" % "1.3.2") 8 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.6.5") 9 | addSbtPlugin("org.scalameta" % "sbt-mdoc" % "2.7.1") 10 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.5.4") 11 | addSbtPlugin("dev.zio" % "zio-sbt-website" % "0.3.4") 12 | 13 | addDependencyTreePlugin 14 | -------------------------------------------------------------------------------- /zio-s3/src/test/scala/zio/s3/S3SettingsTest.scala: -------------------------------------------------------------------------------- 1 | package zio.s3 2 | 3 | import software.amazon.awssdk.auth.credentials.AwsBasicCredentials 4 | import software.amazon.awssdk.regions.Region 5 | import zio.s3.errors._ 6 | import zio.test._ 7 | 8 | object S3SettingsTest extends ZIOSpecDefault { 9 | 10 | def spec: Spec[Any, InvalidSettings] = 11 | suite("Settings")( 12 | test("invalid region") { 13 | for { 14 | failure <- S3Settings 15 | .from(Region.of("invalid"), AwsBasicCredentials.create("key", "secret")) 16 | .foldCause(_.failureOption.map(_.message).mkString, _ => "") 17 | } yield assertTrue(failure == "Invalid aws region provided : invalid") 18 | }, 19 | test("valid region") { 20 | for { 21 | success <- S3Settings.from(Region.US_EAST_2, AwsBasicCredentials.create("key", "secret")) 22 | } yield assertTrue( 23 | success.s3Region.region -> success.credentials == 24 | Region.US_EAST_2 -> AwsBasicCredentials.create("key", "secret") 25 | ) 26 | }, 27 | test("unsafe Region") { 28 | assertTrue(S3Region.unsafeFromString("blah").region == Region.of("blah")) 29 | } 30 | ) 31 | } 32 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | 3 | services: 4 | minio: 5 | image: quay.io/minio/minio 6 | ports: 7 | - "9000:9000" 8 | - "9001:9001" 9 | volumes: 10 | - ./minio/data:/data 11 | - ./minio/export:/export 12 | environment: 13 | - MINIO_ROOT_USER=TESTKEY 14 | - MINIO_ROOT_PASSWORD=TESTSECRET 15 | healthcheck: 16 | test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 17 | interval: 5s 18 | timeout: 5s 19 | retries: 5 20 | start_period: 10s 21 | command: server /data --console-address ":9001" 22 | 23 | mc: 24 | image: quay.io/minio/mc 25 | volumes: 26 | - ./minio/export:/export 27 | depends_on: 28 | minio: 29 | condition: service_healthy 30 | environment: 31 | - MINIO_ROOT_USER=TESTKEY 32 | - MINIO_ROOT_PASSWORD=TESTSECRET 33 | entrypoint: > 34 | /bin/sh -c " 35 | echo Waiting for minio service to be ready...; 36 | curl --retry 30 --retry-delay 2 -s -o /dev/null http://minio:9000/minio/health/live 37 | 38 | echo Minio is ready; 39 | /usr/bin/mc config host add my-minio http://minio:9000 $${MINIO_ROOT_USER} $${MINIO_ROOT_PASSWORD}; 40 | /usr/bin/mc mb -p my-minio/bucket-1; 41 | /usr/bin/mc mirror /export/ my-minio/bucket-1; 42 | /usr/bin/mc ls my-minio/bucket-1; 43 | " -------------------------------------------------------------------------------- /docs/credentials.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: credentials 3 | title: "Credentials" 4 | --- 5 | 6 | zio-s3 expose credentials providers from aws https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/credentials.html 7 | If credentials cannot be found in one or multiple providers selected the operation will fail with `InvalidCredentials` 8 | 9 | ```scala 10 | import software.amazon.awssdk.auth.credentials.AwsBasicCredentials 11 | import zio._ 12 | import software.amazon.awssdk.regions.Region 13 | import software.amazon.awssdk.services.s3.model.S3Exception 14 | import zio.s3._ 15 | import zio.s3.providers._ 16 | 17 | // build S3 Layer from basic credentials 18 | val s3: Layer[S3Exception, S3] = 19 | live(Region.AF_SOUTH_1, AwsBasicCredentials.create("key", "secret")) 20 | 21 | // build S3 Layer from System properties or Environment variables 22 | val s3: Layer[S3Exception, S3] = 23 | liveZIO(Region.AF_SOUTH_1, system <> env) 24 | 25 | // build S3 Layer from Instance profile credentials 26 | val s3: Layer[S3Exception, S3] = 27 | liveZIO(Region.AF_SOUTH_1, instanceProfile) 28 | 29 | // build S3 Layer from web identity token credentials with STS. awssdk sts module required to be on classpath 30 | val s3: Layer[S3Exception, S3] = liveZIO(Region.AF_SOUTH_1, webIdentity) 31 | 32 | // build S3 Layer from default available credentials providers 33 | val s3: Layer[S3Exception, S3] = liveZIO(Region.AF_SOUTH_1, default) 34 | 35 | // use custom logic to fetch aws credentials 36 | val zcredentials: ZIO[R, S3Exception, AwsCredentials] = ??? // specific implementation to fetch credentials 37 | val s3: ZLayer[Any, S3Exception, S3] = settings(Region.AF_SOUTH_1, zcredentials) >>> live 38 | ``` 39 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/settings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2020 John A. De Goes and the ZIO Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio.s3 18 | 19 | import software.amazon.awssdk.auth.credentials._ 20 | import software.amazon.awssdk.regions.Region 21 | import zio.{ IO, ZIO } 22 | import zio.s3.errors._ 23 | 24 | sealed abstract class S3Region(val region: Region) 25 | 26 | object S3Region { self => 27 | 28 | def from(region: Region): Either[InvalidSettings, S3Region] = 29 | region match { 30 | case r if Region.regions().contains(r) => Right(new S3Region(r) {}) 31 | case r => Left(InvalidSettings(s"Invalid aws region provided : ${r.id}")) 32 | } 33 | 34 | /** 35 | * Only use for supporting other region for different s3 compatible storage provider such as OVH 36 | * Your S3 region might be invalid and will result into runtime error. 37 | * @param r unsafe region 38 | */ 39 | def unsafeFromString(r: String): S3Region = 40 | new S3Region(Region.of(r)) {} 41 | } 42 | 43 | final case class S3Settings(s3Region: S3Region, credentials: AwsCredentials) 44 | 45 | object S3Settings { 46 | 47 | def from(region: Region, credentials: AwsCredentials): IO[InvalidSettings, S3Settings] = 48 | ZIO.fromEither(S3Region.from(region)).map(S3Settings(_, credentials)) 49 | } 50 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/errors.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2020 John A. De Goes and the ZIO Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio.s3 18 | 19 | import software.amazon.awssdk.core.exception.{ SdkException, SdkServiceException } 20 | import software.amazon.awssdk.services.s3.model.S3Exception 21 | import zio.Cause 22 | 23 | import java.nio.charset.CharacterCodingException 24 | import scala.util.control.NonFatal 25 | 26 | object errors { 27 | 28 | final case class SdkError(error: SdkException) 29 | extends S3Exception(S3Exception.builder().message(error.getMessage).cause(error)) 30 | 31 | final case class InvalidCredentials(message: String) extends S3Exception(S3Exception.builder().message(message)) 32 | 33 | final case class InvalidSettings(message: String) extends S3Exception(S3Exception.builder().message(message)) 34 | 35 | final case class ConnectionError(message: String, cause: Throwable) 36 | extends S3Exception(S3Exception.builder().message(message)) 37 | 38 | final case class InvalidPartSize(message: String, size: Int) 39 | extends S3Exception(S3Exception.builder().message(message)) 40 | 41 | final case class DecodingException(cause: CharacterCodingException) 42 | extends S3Exception(S3Exception.builder().cause(cause)) 43 | 44 | object syntax { 45 | 46 | implicit class S3ExceptionOps(ex: Throwable) { 47 | 48 | def asS3Exception(): Cause[S3Exception] = 49 | ex match { 50 | case e: SdkServiceException => 51 | Cause.fail( 52 | S3Exception 53 | .builder() 54 | .statusCode(e.statusCode()) 55 | .requestId(e.requestId()) 56 | .message(e.getMessage) 57 | .cause(e) 58 | .build() 59 | .asInstanceOf[S3Exception] 60 | ) 61 | case NonFatal(e) => 62 | Cause.fail(S3Exception.builder().message(e.getMessage).cause(e).build().asInstanceOf[S3Exception]) 63 | case other => Cause.die(other) 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/s3model.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2020 John A. De Goes and the ZIO Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio.s3 18 | 19 | import software.amazon.awssdk.services.s3.model.{ Bucket, HeadObjectResponse, ListObjectsV2Response } 20 | import zio.Chunk 21 | 22 | import java.time.Instant 23 | import scala.jdk.CollectionConverters._ 24 | 25 | final case class S3Bucket(name: String, creationDate: Instant) 26 | 27 | object S3Bucket { 28 | type S3BucketListing = Chunk[S3Bucket] 29 | 30 | def fromBucket(bucket: Bucket): S3Bucket = 31 | new S3Bucket(bucket.name(), bucket.creationDate()) 32 | 33 | def fromBuckets(l: List[Bucket]): S3BucketListing = 34 | Chunk.fromIterable(l.map(fromBucket)) 35 | } 36 | 37 | final case class S3ObjectListing( 38 | bucketName: String, 39 | delimiter: Option[String], 40 | starAfter: Option[String], 41 | objectSummaries: Chunk[S3ObjectSummary], 42 | nextContinuationToken: Option[String], 43 | prefix: Option[String] 44 | ) 45 | 46 | object S3ObjectListing { 47 | 48 | def from(bucketName: String, nextContinuationToken: Option[String]): S3ObjectListing = 49 | S3ObjectListing(bucketName, None, None, Chunk.empty, nextContinuationToken, None) 50 | 51 | def fromResponse(r: ListObjectsV2Response): S3ObjectListing = 52 | S3ObjectListing( 53 | r.name(), 54 | Option(r.delimiter()), 55 | Option(r.startAfter()), 56 | Chunk 57 | .fromIterable(r.contents().asScala.toList) 58 | .map(o => S3ObjectSummary(r.name(), o.key(), o.lastModified(), o.size())), 59 | Option(r.nextContinuationToken()), 60 | Option(r.prefix()).collect { case x if x.nonEmpty => x } 61 | ) 62 | 63 | } 64 | 65 | final case class S3ObjectSummary(bucketName: String, key: String, lastModified: Instant, size: Long) 66 | 67 | /** 68 | * @param metadata the user-defined metadata without the "x-amz-meta-" prefix 69 | * @param contentType the content type of the object (application/json, application/zip, text/plain, ...) 70 | * @param contentLength the size of the object in bytes 71 | * @param eTag the etag for the response as hex string 72 | */ 73 | final case class ObjectMetadata( 74 | metadata: Map[String, String], 75 | contentType: String, 76 | contentLength: Long, 77 | eTag: String 78 | ) 79 | 80 | object ObjectMetadata { 81 | 82 | def fromResponse(r: HeadObjectResponse): ObjectMetadata = 83 | ObjectMetadata( 84 | r.metadata().asScala.toMap, 85 | r.contentType(), 86 | r.contentLength(), 87 | // ETag is including quotes 88 | r.eTag().replace("\"", "") 89 | ) 90 | } 91 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/providers.scala: -------------------------------------------------------------------------------- 1 | package zio.s3 2 | 3 | import software.amazon.awssdk.auth.credentials._ 4 | import zio.{ IO, Scope, UIO, ZIO } 5 | import zio.s3.errors._ 6 | 7 | object providers { 8 | 9 | def const(credential: AwsCredentials): UIO[AwsCredentialsProvider] = 10 | ZIO.succeed[AwsCredentialsProvider](() => credential) 11 | 12 | def basic(accessKeyId: String, secretAccessKey: String): UIO[AwsCredentialsProvider] = 13 | const(AwsBasicCredentials.create(accessKeyId, secretAccessKey)) 14 | 15 | def session(accessKeyId: String, secretAccessKey: String, sessionToken: String): UIO[AwsCredentialsProvider] = 16 | const(AwsSessionCredentials.create(accessKeyId, secretAccessKey, sessionToken)) 17 | 18 | val system: IO[InvalidCredentials, SystemPropertyCredentialsProvider] = 19 | ZIO 20 | .succeed(SystemPropertyCredentialsProvider.create()) 21 | .tap(c => ZIO.attemptBlocking(c.resolveCredentials())) 22 | .mapError(err => InvalidCredentials(err.getMessage)) 23 | 24 | val env: IO[InvalidCredentials, EnvironmentVariableCredentialsProvider] = 25 | ZIO 26 | .succeed(EnvironmentVariableCredentialsProvider.create()) 27 | .tap(c => 28 | ZIO 29 | .attemptBlocking(c.resolveCredentials()) 30 | .mapError(err => InvalidCredentials(err.getMessage)) 31 | ) 32 | 33 | val profile: ZIO[Scope, InvalidCredentials, ProfileCredentialsProvider] = 34 | profile(None) 35 | 36 | def profile(name: Option[String]): ZIO[Scope, InvalidCredentials, ProfileCredentialsProvider] = 37 | ZIO 38 | .fromAutoCloseable(ZIO.succeed(ProfileCredentialsProvider.create(name.orNull))) 39 | .tap(c => 40 | ZIO 41 | .attemptBlocking(c.resolveCredentials()) 42 | .mapError(err => InvalidCredentials(err.getMessage)) 43 | ) 44 | 45 | val container: ZIO[Scope, InvalidCredentials, ContainerCredentialsProvider] = 46 | ZIO 47 | .fromAutoCloseable( 48 | ZIO.succeed( 49 | ContainerCredentialsProvider 50 | .builder() 51 | .build() 52 | ) 53 | ) 54 | .tap(c => ZIO.attemptBlocking(c.resolveCredentials())) 55 | .mapError(err => InvalidCredentials(err.getMessage)) 56 | 57 | val instanceProfile: ZIO[Scope, InvalidCredentials, InstanceProfileCredentialsProvider] = 58 | ZIO 59 | .fromAutoCloseable( 60 | ZIO.succeed( 61 | InstanceProfileCredentialsProvider 62 | .create() 63 | ) 64 | ) 65 | .tap(c => ZIO.attemptBlocking(c.resolveCredentials())) 66 | .mapError(err => InvalidCredentials(err.getMessage)) 67 | 68 | /** 69 | * Use of this layer requires the awssdk sts module to be on the classpath, 70 | * by default zio-s3 required this library 71 | */ 72 | val webIdentity: ZIO[Scope, InvalidCredentials, WebIdentityTokenFileCredentialsProvider] = 73 | ZIO 74 | .succeed( 75 | WebIdentityTokenFileCredentialsProvider 76 | .create() 77 | ) 78 | .tap(c => ZIO.attemptBlocking(c.resolveCredentials())) 79 | .mapError(err => InvalidCredentials(err.getMessage)) 80 | 81 | /** 82 | * Use default chaining strategy to fetch credentials 83 | */ 84 | val default: ZIO[Scope, InvalidCredentials, AwsCredentialsProvider] = 85 | ZIO.fromAutoCloseable( 86 | ZIO.succeed(DefaultCredentialsProvider.create()) 87 | ) 88 | } 89 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/s3options.scala: -------------------------------------------------------------------------------- 1 | package zio.s3 2 | 3 | import software.amazon.awssdk.services.s3.model.ObjectCannedACL 4 | 5 | /** 6 | * The options of the listing object inside a bucket. 7 | * 8 | * @param prefix filter all object identifier which start with this `prefix` 9 | * @param maxKeys max total number of objects, default value is 1000 elements 10 | * @param delimiter A delimiter is a character you use to group keys, default value is empty 11 | * @param starAfter Starts listing after this specified key. StartAfter can be any key in the bucket, default value is empty 12 | */ 13 | final case class ListObjectOptions( 14 | prefix: Option[String], 15 | maxKeys: Long, 16 | delimiter: Option[String], 17 | starAfter: Option[String] 18 | ) 19 | 20 | object ListObjectOptions { 21 | val default: ListObjectOptions = ListObjectOptions(None, MaxKeys.Max, None, None) 22 | 23 | def from(prefix: String, maxKeys: Long): ListObjectOptions = 24 | ListObjectOptions(Option(prefix), maxKeys, None, None) 25 | 26 | def fromMaxKeys(maxKeys: Long): ListObjectOptions = 27 | ListObjectOptions(None, maxKeys, None, None) 28 | 29 | def fromStartAfter(startAfter: String): ListObjectOptions = 30 | ListObjectOptions(None, MaxKeys.Max, None, Option(startAfter)) 31 | } 32 | 33 | object MaxKeys { 34 | val Max: Long = 1000L 35 | } 36 | 37 | /** 38 | * The options of the multipart upload and the put object request. 39 | * 40 | * @param metadata the user-defined metadata without the "x-amz-meta-" prefix 41 | * @param cannedAcl a canned acl, defaults to "private" 42 | * @param contentType the content type of the object (application/json, application/zip, text/plain, ...) 43 | */ 44 | final case class UploadOptions( 45 | metadata: Map[String, String], 46 | cannedAcl: ObjectCannedACL, 47 | contentType: Option[String] 48 | ) 49 | 50 | object UploadOptions { 51 | val default: UploadOptions = UploadOptions(Map.empty, ObjectCannedACL.PRIVATE, None) 52 | 53 | def from(metadata: Map[String, String], contentType: String): UploadOptions = 54 | UploadOptions(metadata, ObjectCannedACL.PRIVATE, Option(contentType)) 55 | 56 | def fromContentType(contentType: String): UploadOptions = 57 | UploadOptions(Map.empty, ObjectCannedACL.PRIVATE, Option(contentType)) 58 | 59 | def fromMetadata(metadata: Map[String, String]): UploadOptions = 60 | UploadOptions(metadata, ObjectCannedACL.PRIVATE, None) 61 | } 62 | 63 | /** 64 | * The upload options that are specific to multipart uploads 65 | * 66 | * @param uploadOptions [[UploadOptions]] 67 | * @param partSize the size of the part in bytes, the minimum is 5 MB 68 | */ 69 | final case class MultipartUploadOptions( 70 | uploadOptions: UploadOptions, 71 | partSize: Int 72 | ) 73 | 74 | object MultipartUploadOptions { 75 | val default: MultipartUploadOptions = MultipartUploadOptions(UploadOptions.default, PartSize.Min) 76 | 77 | def fromUploadOptions(options: UploadOptions): MultipartUploadOptions = 78 | MultipartUploadOptions(options, PartSize.Min) 79 | 80 | def fromPartSize(partSize: Int): MultipartUploadOptions = 81 | MultipartUploadOptions(UploadOptions.default, partSize) 82 | } 83 | 84 | object PartSize { 85 | final val Kilo: Int = 1024 86 | final val Mega: Int = 1024 * Kilo 87 | final val Giga: Int = 1024 * Mega 88 | 89 | //part size limit is 5Mb, required by amazon api 90 | final val Min: Int = 5 * Mega 91 | } 92 | -------------------------------------------------------------------------------- /.github/workflows/site.yml: -------------------------------------------------------------------------------- 1 | # This file was autogenerated using `zio-sbt-website` via `sbt generateGithubWorkflow` 2 | # task and should be included in the git repository. Please do not edit it manually. 3 | 4 | name: Website 5 | 'on': 6 | workflow_dispatch: {} 7 | release: 8 | types: 9 | - published 10 | push: 11 | branches: 12 | - series/2.x 13 | pull_request: {} 14 | jobs: 15 | build: 16 | name: Build and Test 17 | runs-on: ubuntu-latest 18 | if: ${{ github.event_name == 'pull_request' }} 19 | steps: 20 | - name: Git Checkout 21 | uses: actions/checkout@v3.3.0 22 | with: 23 | fetch-depth: '0' 24 | - name: Setup Action 25 | uses: coursier/setup-action@v1 26 | with: 27 | jvm: temurin:21 28 | apps: sbt 29 | - name: Check if the README file is up to date 30 | run: sbt docs/checkReadme 31 | - name: Check artifacts build process 32 | run: sbt +publishLocal 33 | - name: Check website build process 34 | run: sbt docs/clean; sbt docs/buildWebsite 35 | publish-docs: 36 | name: Publish Docs 37 | runs-on: ubuntu-latest 38 | if: ${{ ((github.event_name == 'release') && (github.event.action == 'published')) || (github.event_name == 'workflow_dispatch') }} 39 | steps: 40 | - name: Git Checkout 41 | uses: actions/checkout@v3.3.0 42 | with: 43 | fetch-depth: '0' 44 | - name: Setup Action 45 | uses: coursier/setup-action@v1 46 | with: 47 | jvm: temurin:21 48 | apps: sbt 49 | - name: Setup NodeJs 50 | uses: actions/setup-node@v3 51 | with: 52 | node-version: 16.x 53 | registry-url: https://registry.npmjs.org 54 | - name: Publish Docs to NPM Registry 55 | run: sbt docs/publishToNpm 56 | env: 57 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 58 | generate-readme: 59 | name: Generate README 60 | runs-on: ubuntu-latest 61 | if: ${{ (github.event_name == 'push') || ((github.event_name == 'release') && (github.event_name == 'published')) }} 62 | steps: 63 | - name: Git Checkout 64 | uses: actions/checkout@v3.3.0 65 | with: 66 | ref: ${{ github.head_ref }} 67 | fetch-depth: '0' 68 | - name: Setup Action 69 | uses: coursier/setup-action@v1 70 | with: 71 | jvm: temurin:21 72 | apps: sbt 73 | - name: Generate Readme 74 | run: sbt docs/generateReadme 75 | - name: Commit Changes 76 | run: | 77 | git config --local user.email "github-actions[bot]@users.noreply.github.com" 78 | git config --local user.name "github-actions[bot]" 79 | git add README.md 80 | git commit -m "Update README.md" || echo "No changes to commit" 81 | - name: Create Pull Request 82 | uses: peter-evans/create-pull-request@v4.2.3 83 | with: 84 | body: |- 85 | Autogenerated changes after running the `sbt docs/generateReadme` command of the [zio-sbt-website](https://zio.dev/zio-sbt) plugin. 86 | 87 | I will automatically update the README.md file whenever there is new change for README.md, e.g. 88 | - After each release, I will update the version in the installation section. 89 | - After any changes to the "docs/index.md" file, I will update the README.md file accordingly. 90 | branch: zio-sbt-website/update-readme 91 | commit-message: Update README.md 92 | delete-branch: true 93 | title: Update README.md 94 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | env: 4 | JDK_JAVA_OPTIONS: -XX:+PrintCommandLineFlags # JDK_JAVA_OPTIONS is _the_ env. variable to use for modern Java 5 | JVM_OPTS: -XX:+PrintCommandLineFlags # for Java 8 only (sadly, it is not modern enough for JDK_JAVA_OPTIONS) 6 | 7 | on: 8 | pull_request: 9 | push: 10 | branches: ['series/2.x'] 11 | release: 12 | types: 13 | - published 14 | 15 | jobs: 16 | lint: 17 | runs-on: ubuntu-latest 18 | timeout-minutes: 30 19 | steps: 20 | - name: Checkout current branch 21 | uses: actions/checkout@v3.3.0 22 | with: 23 | fetch-depth: 0 24 | - name: Setup Action 25 | uses: coursier/setup-action@v1 26 | with: 27 | jvm: temurin:21 28 | apps: sbt 29 | - name: Cache scala dependencies 30 | uses: coursier/cache-action@v6 31 | - name: Lint code 32 | run: sbt check 33 | 34 | mdoc: 35 | runs-on: ubuntu-latest 36 | timeout-minutes: 60 37 | steps: 38 | - name: Checkout current branch 39 | uses: actions/checkout@v3.3.0 40 | - name: Setup Action 41 | uses: coursier/setup-action@v1 42 | with: 43 | jvm: temurin:21 44 | apps: sbt 45 | - name: Cache scala dependencies 46 | uses: coursier/cache-action@v6 47 | - name: Check Document Generation 48 | run: sbt docs/compileDocs 49 | 50 | test: 51 | runs-on: ubuntu-latest 52 | timeout-minutes: 30 53 | strategy: 54 | fail-fast: false 55 | matrix: 56 | java: ['11', '17', '21'] 57 | scala: ['2.12.20', '2.13.16', '3.3.6'] 58 | steps: 59 | - name: Checkout current branch 60 | uses: actions/checkout@v3.3.0 61 | with: 62 | fetch-depth: 0 63 | - name: Setup Action 64 | uses: coursier/setup-action@v1 65 | with: 66 | jvm: temurin:${{ matrix.java }} 67 | apps: sbt 68 | - name: Cache scala dependencies 69 | uses: coursier/cache-action@v6 70 | - name: Start containers 71 | run: | 72 | docker compose -f "docker-compose.yml" up -d --build 73 | - name: Test 74 | run: ./sbt ++${{ matrix.scala }} test 75 | 76 | testJvms: 77 | runs-on: ubuntu-latest 78 | timeout-minutes: 30 79 | strategy: 80 | fail-fast: false 81 | matrix: 82 | java: ['11', '17', '21'] 83 | steps: 84 | - name: Checkout current branch 85 | uses: actions/checkout@v3.3.0 86 | with: 87 | fetch-depth: 0 88 | - name: Setup Action 89 | uses: coursier/setup-action@v1 90 | with: 91 | jvm: temurin:${{ matrix.java }} 92 | apps: sbt 93 | - name: Cache scala dependencies 94 | uses: coursier/cache-action@v6 95 | - name: Start containers 96 | run: | 97 | docker compose -f "docker-compose.yml" up -d --build 98 | - name: Test 99 | run: ./sbt test 100 | 101 | ci: 102 | runs-on: ubuntu-latest 103 | needs: [lint, mdoc, test, testJvms] 104 | steps: 105 | - name: Report successful build 106 | run: echo "ci passed" 107 | 108 | publish: 109 | runs-on: ubuntu-latest 110 | timeout-minutes: 30 111 | needs: [ci] 112 | if: github.event_name != 'pull_request' 113 | steps: 114 | - name: Checkout current branch 115 | uses: actions/checkout@v3.3.0 116 | with: 117 | fetch-depth: 0 118 | - name: Setup Action 119 | uses: coursier/setup-action@v1 120 | with: 121 | jvm: temurin:21 122 | apps: sbt 123 | - name: Cache scala dependencies 124 | uses: coursier/cache-action@v6 125 | - name: Release artifacts 126 | run: sbt ci-release 127 | env: 128 | PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }} 129 | PGP_SECRET: ${{ secrets.PGP_SECRET }} 130 | SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }} 131 | SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }} 132 | -------------------------------------------------------------------------------- /zio-s3/src/test/scala/zio/s3/S3ProvidersTest.scala: -------------------------------------------------------------------------------- 1 | package zio.s3 2 | 3 | import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, AwsSessionCredentials } 4 | import software.amazon.awssdk.regions.Region 5 | import zio.s3.providers._ 6 | import zio.test.Assertion._ 7 | import zio.test.TestAspect._ 8 | import zio.test._ 9 | import zio.{ Scope, UIO, ZIO } 10 | 11 | object S3ProvidersTest extends ZIOSpecDefault { 12 | 13 | def setProps(props: (String, String)*): UIO[Unit] = 14 | ZIO.succeed { 15 | props.foreach { 16 | case (k, v) => 17 | System.setProperty(k, v) 18 | } 19 | } 20 | 21 | def unsetProps(keys: String*): UIO[Unit] = 22 | ZIO.succeed { 23 | keys.foreach(System.clearProperty) 24 | } 25 | 26 | def spec: Spec[TestEnvironment with Scope, Any] = 27 | suite("Providers")( 28 | test("basic credentials") { 29 | ZIO 30 | .scoped[Any](basic("k", "v").map(_.resolveCredentials())) 31 | .map(res => assertTrue(res == AwsBasicCredentials.create("k", "v"))) 32 | }, 33 | test("session credentials") { 34 | ZIO 35 | .scoped[Any](session("k", "v", "t").map(_.resolveCredentials())) 36 | .map(res => assertTrue(res == AwsSessionCredentials.create("k", "v", "t"))) 37 | }, 38 | test("basic credentials default fallback const") { 39 | ZIO 40 | .scoped[Any]((env <> basic("k", "v")).map(_.resolveCredentials())) 41 | .map(res => assertTrue(res == AwsBasicCredentials.create("k", "v"))) 42 | }, 43 | test("cred in system properties") { 44 | for { 45 | cred <- ZIO.scoped[Any](system.flatMap(p => ZIO.attempt(p.resolveCredentials()))) 46 | } yield assertTrue(cred == AwsBasicCredentials.create("k1", "s1")) 47 | } @@ flaky @@ around( 48 | setProps(("aws.accessKeyId", "k1"), ("aws.secretAccessKey", "s1")), 49 | unsetProps("aws.accessKeyId", "aws.secretAccessKey") 50 | ), 51 | test("no cred in system properties") { 52 | for { 53 | failure <- ZIO.scoped[Any](system).flip.map(_.getMessage) 54 | } yield assert(failure)(isNonEmptyString) 55 | } @@ around( 56 | unsetProps("aws.accessKeyId", "aws.secretAccessKey"), 57 | ZIO.unit 58 | ), 59 | test("no cred in environment properties") { 60 | for { 61 | failure <- ZIO.scoped[Any](env).flip.map(_.getMessage) 62 | } yield assert(failure)(isNonEmptyString) 63 | }, 64 | test("no cred in profile") { 65 | for { 66 | failure <- ZIO.scoped[Any](profile).flip.map(_.getMessage) 67 | } yield assert(failure)(isNonEmptyString) 68 | }, 69 | test("no cred in named profile") { 70 | for { 71 | failure <- ZIO.scoped[Any](profile(Some("name"))).flip.map(_.getMessage) 72 | } yield assert(failure)(isNonEmptyString) 73 | }, 74 | test("no cred in container") { 75 | for { 76 | failure <- ZIO.scoped[Any](container).flip.map(_.getMessage) 77 | } yield assert(failure)(isNonEmptyString) 78 | }, 79 | test("no cred in instance profile credentials") { 80 | for { 81 | failure <- ZIO.scoped[Any](instanceProfile).flip.map(_.getMessage) 82 | } yield assert(failure)(isNonEmptyString) 83 | }, 84 | test("no cred in webidentity credentials") { 85 | for { 86 | failure <- ZIO.scoped[Any](webIdentity).flip.map(_.getMessage) 87 | } yield assert(failure)(isNonEmptyString) 88 | }, 89 | test("settings from invalid creds") { 90 | for { 91 | failure <- ZIO 92 | .scoped[Any]( 93 | settings( 94 | Region.AF_SOUTH_1, 95 | ZIO.scoped[Any](system).map(_.resolveCredentials()) 96 | ).build 97 | ) 98 | .flip 99 | } yield assert(failure.getMessage)(isNonEmptyString) 100 | }, 101 | test("no cred when chain all providers") { 102 | for { 103 | failure <- ZIO.scoped[Any](default.flatMap(c => ZIO.attempt(c.resolveCredentials()))).flip.map(_.getMessage) 104 | } yield assert(failure)(isNonEmptyString) 105 | } 106 | ) @@ sequential 107 | } 108 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: index 3 | title: "Introduction to ZIO S3" 4 | sidebar_label: "ZIO S3" 5 | --- 6 | 7 | [ZIO S3](https://github.com/zio/zio-s3) is a thin wrapper over S3 async client for ZIO. 8 | 9 | @PROJECT_BADGES@ 10 | 11 | ## Introduction 12 | 13 | ZIO-S3 is a thin wrapper over the s3 async java client. It exposes the main operations of the s3 java client. 14 | 15 | ```scala 16 | import software.amazon.awssdk.auth.credentials.AwsBasicCredentials 17 | import zio.Chunk 18 | import zio.s3._ 19 | import zio.stream.{ZSink, ZStream} 20 | import software.amazon.awssdk.services.s3.model.S3Exception 21 | 22 | // list all buckets available 23 | listBuckets.provideLayer( 24 | live("us-east-1", AwsBasicCredentials.create("accessKeyId", "secretAccessKey")) 25 | ) 26 | 27 | // list all objects of all buckets 28 | val l2: ZStream[S3, S3Exception, String] = (for { 29 | bucket <- ZStream.fromIterableZIO(listBuckets) 30 | obj <- listAllObjects(bucket.name) 31 | } yield obj.bucketName + "/" + obj.key).provideLayer( 32 | live("us-east-1", AwsBasicCredentials.create("accessKeyId", "secretAccessKey")) 33 | ) 34 | ``` 35 | 36 | All available s3 combinators and operations are available in the package object `zio.s3`, you only need to `import zio.s3._` 37 | 38 | ## Installation 39 | 40 | In order to use this library, we need to add the following line in our `build.sbt` file: 41 | 42 | ```scala 43 | libraryDependencies += "dev.zio" %% "zio-s3" % "@VERSION@" 44 | ``` 45 | 46 | ## Example 1 47 | 48 | Let's try an example of creating a bucket and adding an object into it. To run this example, we need to run an instance of _Minio_ which is object storage compatible with S3: 49 | 50 | ```bash 51 | docker run -p 9000:9000 -e MINIO_ROOT_USER=MyKey -e MINIO_ROOT_PASSWORD=MySecret minio/minio server --compat /data 52 | ``` 53 | 54 | In this example we create a bucket and then add a JSON object to it and then retrieve that: 55 | 56 | ```scala mdoc:compile-only 57 | import software.amazon.awssdk.auth.credentials.AwsBasicCredentials 58 | import software.amazon.awssdk.regions.Region 59 | import zio._ 60 | import zio.s3._ 61 | import zio.stream.{ZStream, ZPipeline} 62 | import zio.Chunk 63 | 64 | import java.net.URI 65 | 66 | object ZIOS3Example extends ZIOAppDefault { 67 | 68 | val myApp = for { 69 | _ <- createBucket("docs") 70 | json = Chunk.fromArray("""{ "id" : 1 , "name" : "A1" }""".getBytes) 71 | _ <- putObject( 72 | bucketName = "docs", 73 | key = "doc1", 74 | contentLength = json.length.toLong, 75 | content = ZStream.fromChunk(json), 76 | options = UploadOptions.fromContentType("application/json") 77 | ) 78 | _ <- getObject("docs", "doc1") 79 | .via(ZPipeline.utf8Decode) 80 | .foreach(Console.printLine(_)) 81 | } yield () 82 | 83 | def run = 84 | myApp 85 | .provide( 86 | live( 87 | Region.CA_CENTRAL_1, 88 | AwsBasicCredentials.create("MyKey", "MySecret"), 89 | Some(URI.create("http://localhost:9000")), 90 | forcePathStyle = Some(true) // Required for path-style S3 requests (MinIO by default uses them) 91 | ) 92 | ) 93 | } 94 | ``` 95 | 96 | ## Example 2 97 | 98 | ```scala mdoc:compile-only 99 | import software.amazon.awssdk.services.s3.model.S3Exception 100 | import zio._ 101 | import zio.stream.{ ZSink, ZStream } 102 | import zio.s3._ 103 | 104 | // upload 105 | val json: Chunk[Byte] = Chunk.fromArray("""{ "id" : 1 , "name" : "A1" }""".getBytes) 106 | val up: ZIO[S3, S3Exception, Unit] = putObject( 107 | "bucket-1", 108 | "user.json", 109 | json.length.toLong, 110 | ZStream.fromChunk(json), 111 | UploadOptions.fromContentType("application/json") 112 | ) 113 | 114 | // multipartUpload 115 | import java.io.FileInputStream 116 | import java.nio.file.Paths 117 | 118 | val is = ZStream.fromInputStream(new FileInputStream(Paths.get("/my/path/to/myfile.zip").toFile)) 119 | val proc2: ZIO[S3, S3Exception, Unit] = 120 | multipartUpload( 121 | "bucket-1", 122 | "upload/myfile.zip", 123 | is, 124 | MultipartUploadOptions.fromUploadOptions(UploadOptions.fromContentType("application/zip")) 125 | )(4) 126 | 127 | // download 128 | import java.io.OutputStream 129 | 130 | val os: OutputStream = ??? 131 | val proc3: ZIO[S3, Exception, Long] = getObject("bucket-1", "upload/myfile.zip").run(ZSink.fromOutputStream(os)) 132 | ``` 133 | 134 | ## Support any commands? 135 | 136 | If you need a method which is not wrapped by the library, you can have access to underlying S3 client in a safe manner by using 137 | 138 | ```scala 139 | import java.util.concurrent.CompletableFuture 140 | import zio.s3._ 141 | import software.amazon.awssdk.services.s3.S3AsyncClient 142 | 143 | def execute[T](f: S3AsyncClient => CompletableFuture[T]) 144 | ``` 145 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2020 John A. De Goes and the ZIO Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio 18 | 19 | import software.amazon.awssdk.auth.credentials.{ AwsCredentials, AwsCredentialsProvider } 20 | import software.amazon.awssdk.regions.Region 21 | import software.amazon.awssdk.services.s3.S3AsyncClient 22 | import software.amazon.awssdk.services.s3.model.S3Exception 23 | import zio.nio.file.{ Path => ZPath } 24 | import zio.s3.S3Bucket.S3BucketListing 25 | import zio.s3.errors._ 26 | import zio.s3.providers.const 27 | import zio.stream.ZStream 28 | 29 | import java.net.URI 30 | import java.util.concurrent.CompletableFuture 31 | 32 | package object s3 { 33 | type S3Stream[A] = ZStream[S3, S3Exception, A] 34 | 35 | def settings[R](region: Region, cred: ZIO[R, S3Exception, AwsCredentials]): ZLayer[R, S3Exception, S3Settings] = 36 | ZLayer(cred.flatMap(S3Settings.from(region, _))) 37 | 38 | def live( 39 | region: Region, 40 | credentials: AwsCredentials, 41 | uriEndpoint: Option[URI] = None, 42 | forcePathStyle: Option[Boolean] = None 43 | ): Layer[S3Exception, S3] = 44 | liveZIO(region, const(credentials), uriEndpoint, forcePathStyle) 45 | 46 | def liveZIO[R]( 47 | region: Region, 48 | provider: RIO[R, AwsCredentialsProvider], 49 | uriEndpoint: Option[URI] = None, 50 | forcePathStyle: Option[Boolean] = None 51 | ): ZLayer[R, S3Exception, S3] = 52 | ZLayer.scoped[R]( 53 | ZIO 54 | .fromEither(S3Region.from(region)) 55 | .flatMap(Live.connect[R](_, provider, uriEndpoint, forcePathStyle)) 56 | ) 57 | 58 | val live: ZLayer[S3Settings, ConnectionError, S3] = ZLayer.scoped( 59 | ZIO.serviceWithZIO[S3Settings](s => Live.connect(s.s3Region, const(s.credentials), None)) 60 | ) 61 | 62 | def stub(path: ZPath): ZLayer[Any, Nothing, S3] = 63 | ZLayer.fromZIO(Test.connect(path)) 64 | 65 | def listAllObjects(bucketName: String): S3Stream[S3ObjectSummary] = 66 | ZStream.serviceWithStream[S3](_.listAllObjects(bucketName)) 67 | 68 | def listAllObjects(bucketName: String, options: ListObjectOptions): S3Stream[S3ObjectSummary] = 69 | ZStream.serviceWithStream[S3](_.listAllObjects(bucketName, options)) 70 | 71 | def paginate(initialListing: S3ObjectListing): S3Stream[S3ObjectListing] = 72 | ZStream.serviceWithStream[S3](_.paginate(initialListing)) 73 | 74 | def streamLines(bucketName: String, key: String): S3Stream[String] = 75 | ZStream.serviceWithStream[S3](_.streamLines(bucketName, key)) 76 | 77 | def createBucket(bucketName: String): ZIO[S3, S3Exception, Unit] = 78 | ZIO.serviceWithZIO(_.createBucket(bucketName)) 79 | 80 | def deleteBucket(bucketName: String): ZIO[S3, S3Exception, Unit] = 81 | ZIO.serviceWithZIO(_.deleteBucket(bucketName)) 82 | 83 | def isBucketExists(bucketName: String): ZIO[S3, S3Exception, Boolean] = 84 | ZIO.serviceWithZIO(_.isBucketExists(bucketName)) 85 | 86 | val listBuckets: ZIO[S3, S3Exception, S3BucketListing] = 87 | ZIO.serviceWithZIO(_.listBuckets) 88 | 89 | def deleteObject(bucketName: String, key: String): ZIO[S3, S3Exception, Unit] = 90 | ZIO.serviceWithZIO(_.deleteObject(bucketName, key)) 91 | 92 | def getObject(bucketName: String, key: String): ZStream[S3, S3Exception, Byte] = 93 | ZStream.serviceWithStream(_.getObject(bucketName, key)) 94 | 95 | def getObjectMetadata(bucketName: String, key: String): ZIO[S3, S3Exception, ObjectMetadata] = 96 | ZIO.serviceWithZIO(_.getObjectMetadata(bucketName, key)) 97 | 98 | /** 99 | * Same as listObjects with default values for an empty prefix and sets the maximum number of object max to `1000` 100 | * 101 | * @param bucketName name of the bucket 102 | */ 103 | def listObjects(bucketName: String): ZIO[S3, S3Exception, S3ObjectListing] = 104 | ZIO.serviceWithZIO(_.listObjects(bucketName)) 105 | 106 | def listObjects(bucketName: String, options: ListObjectOptions): ZIO[S3, S3Exception, S3ObjectListing] = 107 | ZIO.serviceWithZIO(_.listObjects(bucketName, options)) 108 | 109 | def getNextObjects(listing: S3ObjectListing): ZIO[S3, S3Exception, S3ObjectListing] = 110 | ZIO.serviceWithZIO(_.getNextObjects(listing)) 111 | 112 | def putObject[R]( 113 | bucketName: String, 114 | key: String, 115 | contentLength: Long, 116 | content: ZStream[R, Throwable, Byte], 117 | options: UploadOptions = UploadOptions.default, 118 | contentMD5: Option[String] = None 119 | ): ZIO[S3 with R, S3Exception, Unit] = 120 | ZIO.serviceWithZIO[S3](_.putObject(bucketName, key, contentLength, content, options, contentMD5)) 121 | 122 | /** 123 | * Same as multipartUpload with default parallelism = 1 124 | * 125 | * @param bucketName name of the bucket 126 | * @param key unique object identifier 127 | * @param content object data 128 | * @param options the optional configurations of the multipart upload 129 | */ 130 | def multipartUpload[R]( 131 | bucketName: String, 132 | key: String, 133 | content: ZStream[R, Throwable, Byte], 134 | options: MultipartUploadOptions = MultipartUploadOptions.default 135 | )(parallelism: Int): ZIO[S3 with R, S3Exception, Unit] = 136 | ZIO.serviceWithZIO[S3](_.multipartUpload(bucketName, key, content, options)(parallelism)) 137 | 138 | def execute[T](f: S3AsyncClient => CompletableFuture[T]): ZIO[S3, S3Exception, T] = 139 | ZIO.serviceWithZIO(_.execute(f)) 140 | } 141 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [//]: # (This file was autogenerated using `zio-sbt-website` plugin via `sbt generateReadme` command.) 2 | [//]: # (So please do not edit it manually. Instead, change "docs/index.md" file or sbt setting keys) 3 | [//]: # (e.g. "readmeDocumentation" and "readmeSupport".) 4 | 5 | # ZIO S3 6 | 7 | [ZIO S3](https://github.com/zio/zio-s3) is a thin wrapper over S3 async client for ZIO. 8 | 9 | [![Production Ready](https://img.shields.io/badge/Project%20Stage-Production%20Ready-brightgreen.svg)](https://github.com/zio/zio/wiki/Project-Stages) ![CI Badge](https://github.com/zio/zio-s3/workflows/CI/badge.svg) [![Sonatype Releases](https://img.shields.io/nexus/r/https/oss.sonatype.org/dev.zio/zio-s3_2.13.svg?label=Sonatype%20Release)](https://oss.sonatype.org/content/repositories/releases/dev/zio/zio-s3_2.13/) [![Sonatype Snapshots](https://img.shields.io/nexus/s/https/oss.sonatype.org/dev.zio/zio-s3_2.13.svg?label=Sonatype%20Snapshot)](https://oss.sonatype.org/content/repositories/snapshots/dev/zio/zio-s3_2.13/) [![javadoc](https://javadoc.io/badge2/dev.zio/zio-s3-docs_2.13/javadoc.svg)](https://javadoc.io/doc/dev.zio/zio-s3-docs_2.13) [![ZIO S3](https://img.shields.io/github/stars/zio/zio-s3?style=social)](https://github.com/zio/zio-s3) 10 | 11 | ## Introduction 12 | 13 | ZIO-S3 is a thin wrapper over the s3 async java client. It exposes the main operations of the s3 java client. 14 | 15 | ```scala 16 | import software.amazon.awssdk.auth.credentials.AwsBasicCredentials 17 | import zio.Chunk 18 | import zio.s3._ 19 | import zio.stream.{ZSink, ZStream} 20 | import software.amazon.awssdk.services.s3.model.S3Exception 21 | 22 | // list all buckets available 23 | listBuckets.provideLayer( 24 | live("us-east-1", AwsBasicCredentials.create("accessKeyId", "secretAccessKey")) 25 | ) 26 | 27 | // list all objects of all buckets 28 | val l2: ZStream[S3, S3Exception, String] = (for { 29 | bucket <- ZStream.fromIterableZIO(listBuckets) 30 | obj <- listAllObjects(bucket.name) 31 | } yield obj.bucketName + "/" + obj.key).provideLayer( 32 | live("us-east-1", AwsBasicCredentials.create("accessKeyId", "secretAccessKey")) 33 | ) 34 | ``` 35 | 36 | All available s3 combinators and operations are available in the package object `zio.s3`, you only need to `import zio.s3._` 37 | 38 | ## Installation 39 | 40 | In order to use this library, we need to add the following line in our `build.sbt` file: 41 | 42 | ```scala 43 | libraryDependencies += "dev.zio" %% "zio-s3" % "0.4.3" 44 | ``` 45 | 46 | ## Example 1 47 | 48 | Let's try an example of creating a bucket and adding an object into it. To run this example, we need to run an instance of _Minio_ which is object storage compatible with S3: 49 | 50 | ```bash 51 | docker run -p 9000:9000 -e MINIO_ROOT_USER=MyKey -e MINIO_ROOT_PASSWORD=MySecret minio/minio server --compat /data 52 | ``` 53 | 54 | In this example we create a bucket and then add a JSON object to it and then retrieve that: 55 | 56 | ```scala 57 | import software.amazon.awssdk.auth.credentials.AwsBasicCredentials 58 | import software.amazon.awssdk.regions.Region 59 | import zio._ 60 | import zio.s3._ 61 | import zio.stream.{ZStream, ZPipeline} 62 | import zio.Chunk 63 | 64 | import java.net.URI 65 | 66 | object ZIOS3Example extends ZIOAppDefault { 67 | 68 | val myApp = for { 69 | _ <- createBucket("docs") 70 | json = Chunk.fromArray("""{ "id" : 1 , "name" : "A1" }""".getBytes) 71 | _ <- putObject( 72 | bucketName = "docs", 73 | key = "doc1", 74 | contentLength = json.length.toLong, 75 | content = ZStream.fromChunk(json), 76 | options = UploadOptions.fromContentType("application/json") 77 | ) 78 | _ <- getObject("docs", "doc1") 79 | .via(ZPipeline.utf8Decode) 80 | .foreach(Console.printLine(_)) 81 | } yield () 82 | 83 | def run = 84 | myApp 85 | .provide( 86 | live( 87 | Region.CA_CENTRAL_1, 88 | AwsBasicCredentials.create("MyKey", "MySecret"), 89 | Some(URI.create("http://localhost:9000")), 90 | forcePathStyle = Some(true) // Required for path-style S3 requests (MinIO by default uses them) 91 | ) 92 | ) 93 | } 94 | ``` 95 | 96 | ## Example 2 97 | 98 | ```scala 99 | import software.amazon.awssdk.services.s3.model.S3Exception 100 | import zio._ 101 | import zio.stream.{ ZSink, ZStream } 102 | import zio.s3._ 103 | 104 | // upload 105 | val json: Chunk[Byte] = Chunk.fromArray("""{ "id" : 1 , "name" : "A1" }""".getBytes) 106 | val up: ZIO[S3, S3Exception, Unit] = putObject( 107 | "bucket-1", 108 | "user.json", 109 | json.length.toLong, 110 | ZStream.fromChunk(json), 111 | UploadOptions.fromContentType("application/json") 112 | ) 113 | 114 | // multipartUpload 115 | import java.io.FileInputStream 116 | import java.nio.file.Paths 117 | 118 | val is = ZStream.fromInputStream(new FileInputStream(Paths.get("/my/path/to/myfile.zip").toFile)) 119 | val proc2: ZIO[S3, S3Exception, Unit] = 120 | multipartUpload( 121 | "bucket-1", 122 | "upload/myfile.zip", 123 | is, 124 | MultipartUploadOptions.fromUploadOptions(UploadOptions.fromContentType("application/zip")) 125 | )(4) 126 | 127 | // download 128 | import java.io.OutputStream 129 | 130 | val os: OutputStream = ??? 131 | val proc3: ZIO[S3, Exception, Long] = getObject("bucket-1", "upload/myfile.zip").run(ZSink.fromOutputStream(os)) 132 | ``` 133 | 134 | ## Support any commands? 135 | 136 | If you need a method which is not wrapped by the library, you can have access to underlying S3 client in a safe manner by using 137 | 138 | ```scala 139 | import java.util.concurrent.CompletableFuture 140 | import zio.s3._ 141 | import software.amazon.awssdk.services.s3.S3AsyncClient 142 | 143 | def execute[T](f: S3AsyncClient => CompletableFuture[T]) 144 | ``` 145 | 146 | ## Documentation 147 | 148 | Learn more on the [ZIO S3 homepage](https://zio.dev/zio-s3/)! 149 | 150 | ## Contributing 151 | 152 | For the general guidelines, see ZIO [contributor's guide](https://zio.dev/about/contributing). 153 | 154 | ## Code of Conduct 155 | 156 | See the [Code of Conduct](https://zio.dev/about/code-of-conduct) 157 | 158 | ## Support 159 | 160 | Come chat with us on [![Badge-Discord]][Link-Discord]. 161 | 162 | [Badge-Discord]: https://img.shields.io/discord/629491597070827530?logo=discord "chat on discord" 163 | [Link-Discord]: https://discord.gg/2ccFBr4 "Discord" 164 | 165 | ## License 166 | 167 | [License](LICENSE) 168 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/S3.scala: -------------------------------------------------------------------------------- 1 | package zio.s3 2 | 3 | import software.amazon.awssdk.services.s3.S3AsyncClient 4 | import software.amazon.awssdk.services.s3.model.S3Exception 5 | import zio.s3.S3Bucket.S3BucketListing 6 | import zio.s3.errors.DecodingException 7 | import zio.stream.{ Stream, ZPipeline, ZStream } 8 | import zio.{ IO, ZIO } 9 | 10 | import java.nio.charset.CharacterCodingException 11 | import java.util.concurrent.CompletableFuture 12 | 13 | /** 14 | * The `S3` module provides access to a s3 amazon storage. 15 | * All operations are async since we are relying on the amazon async client 16 | */ 17 | trait S3 { self => 18 | 19 | /** 20 | * Create a bucket 21 | * 22 | * @param bucketName name of the bucket 23 | */ 24 | def createBucket(bucketName: String): IO[S3Exception, Unit] 25 | 26 | /** 27 | * Delete bucket, the operation fail if bucket is not present 28 | * 29 | * @param bucketName name of the bucket 30 | */ 31 | def deleteBucket(bucketName: String): IO[S3Exception, Unit] 32 | 33 | /** 34 | * Check if bucket exists 35 | * 36 | * @param bucketName name of the bucket 37 | */ 38 | def isBucketExists(bucketName: String): IO[S3Exception, Boolean] 39 | 40 | /** 41 | * List all available buckets 42 | */ 43 | val listBuckets: IO[S3Exception, S3BucketListing] 44 | 45 | /** 46 | * delete an object from a bucket, if not present it will succeed 47 | * 48 | * @param bucketName name of the bucket 49 | * @param key object identifier to remove 50 | */ 51 | def deleteObject(bucketName: String, key: String): IO[S3Exception, Unit] 52 | 53 | /** 54 | * Read an object from a bucket, the operation fail if object is not present 55 | * 56 | * @param bucketName name of the bucket 57 | * @param key object identifier to read 58 | * @return 59 | */ 60 | def getObject(bucketName: String, key: String): Stream[S3Exception, Byte] 61 | 62 | /** 63 | * Retrieves metadata from an object without returning the object itself. 64 | * This operation is useful if you're only interested in an object's metadata. 65 | * @param bucketName name of the bucket 66 | * @param key object identifier to read 67 | * @return the [[ObjectMetadata]] 68 | */ 69 | def getObjectMetadata(bucketName: String, key: String): IO[S3Exception, ObjectMetadata] 70 | 71 | /** 72 | * list all object for a specific bucket 73 | * 74 | * @param bucketName name of the bucket 75 | */ 76 | def listObjects(bucketName: String): IO[S3Exception, S3ObjectListing] = 77 | listObjects(bucketName, ListObjectOptions.default) 78 | 79 | def listObjects(bucketName: String, options: ListObjectOptions): IO[S3Exception, S3ObjectListing] 80 | 81 | /** 82 | * Fetch the next object listing from a specific object listing. 83 | * 84 | * @param listing listing to use as a start 85 | */ 86 | def getNextObjects(listing: S3ObjectListing): IO[S3Exception, S3ObjectListing] 87 | 88 | /** 89 | * Store data object into a specific bucket 90 | * 91 | * ==Example of creating a contentMD5 option== 92 | * 93 | * The md5 option is required when the target bucket is configured with object locking, otherwise 94 | * the AWS S3 API will not accept the [[putObject]] request. 95 | * 96 | * {{{ 97 | * import software.amazon.awssdk.utils.Md5Utils 98 | * import scala.util.Random 99 | * 100 | * val bytes = Random.nextString(65536).getBytes() 101 | * val contentMD5 = Some(Md5Utils.md5AsBase64(bytes)) 102 | * }}} 103 | * 104 | * @param bucketName name of the bucket 105 | * @param key unique object identifier 106 | * @param contentLength length of the data in bytes 107 | * @param content object data 108 | * @param contentMD5 a String Option containing the MD5 hash of the content encoded as base64 109 | * @return 110 | */ 111 | def putObject[R]( 112 | bucketName: String, 113 | key: String, 114 | contentLength: Long, 115 | content: ZStream[R, Throwable, Byte], 116 | options: UploadOptions = UploadOptions.default, 117 | contentMD5: Option[String] = None 118 | ): ZIO[R, S3Exception, Unit] 119 | 120 | /** 121 | * * 122 | * 123 | * Store data object into a specific bucket, minimum size of the data is 5 Mb to use multipart upload (restriction from amazon API) 124 | * 125 | * @param bucketName name of the bucket 126 | * @param key unique object identifier 127 | * @param content object data 128 | * @param options the optional configurations of the multipart upload 129 | * @param parallelism the number of parallel requests to upload chunks 130 | */ 131 | def multipartUpload[R]( 132 | bucketName: String, 133 | key: String, 134 | content: ZStream[R, Throwable, Byte], 135 | options: MultipartUploadOptions = MultipartUploadOptions.default 136 | )(parallelism: Int): ZIO[R, S3Exception, Unit] 137 | 138 | /** 139 | * Read an object by lines 140 | * 141 | * @param bucketName name of the bucket 142 | * @param key: unique key of the object 143 | */ 144 | def streamLines(bucketName: String, key: String): Stream[S3Exception, String] = 145 | (self.getObject(bucketName, key) >>> ZPipeline.utf8Decode >>> ZPipeline.splitLines).refineOrDie { 146 | case ex: S3Exception => ex 147 | case ex: CharacterCodingException => DecodingException(ex) 148 | } 149 | 150 | /** 151 | * List all descendant objects of a bucket 152 | * Fetch all objects recursively of all nested directory by traversing all of them 153 | * 154 | * @param bucketName name of the bucket 155 | * 156 | * MaxKeys have a default value to 1000 elements 157 | */ 158 | def listAllObjects(bucketName: String): Stream[S3Exception, S3ObjectSummary] = 159 | listAllObjects(bucketName, ListObjectOptions.default) 160 | 161 | def listAllObjects(bucketName: String, options: ListObjectOptions): Stream[S3Exception, S3ObjectSummary] = 162 | ZStream 163 | .fromZIO(self.listObjects(bucketName, options)) 164 | .flatMap( 165 | paginate(_).mapConcat(_.objectSummaries) 166 | ) 167 | 168 | /** 169 | * List all objects by traversing all nested directories 170 | * 171 | * @param initialListing object listing to start with 172 | * @return 173 | */ 174 | def paginate(initialListing: S3ObjectListing): Stream[S3Exception, S3ObjectListing] = 175 | ZStream.paginateZIO(initialListing) { 176 | case current @ S3ObjectListing(_, _, _, _, None, _) => ZIO.succeed(current -> None) 177 | case current => self.getNextObjects(current).map(next => current -> Some(next)) 178 | } 179 | 180 | /** 181 | * * 182 | * expose safely amazon s3 async client 183 | * 184 | * @param f call any operations on s3 async client 185 | * @tparam T value type to return 186 | */ 187 | def execute[T](f: S3AsyncClient => CompletableFuture[T]): IO[S3Exception, T] 188 | } 189 | -------------------------------------------------------------------------------- /CLA.md: -------------------------------------------------------------------------------- 1 | # ZIO Contributor License Agreement 2 | 3 | Thank you for your interest in contributing to the ZIO open source project. 4 | 5 | This contributor agreement ("Agreement") describes the terms and conditions under which you may Submit a Contribution to Us. By Submitting a Contribution to Us, you accept the terms and conditions in the Agreement. If you do not accept the terms and conditions in the Agreement, you must not Submit any Contribution to Us. 6 | 7 | This is a legally binding document, so please read it carefully before accepting the terms and conditions. If you accept this Agreement, the then-current version of this Agreement shall apply each time you Submit a Contribution. The Agreement may cover more than one software project managed by Us. 8 | 9 | ## 1. Definitions 10 | 11 | "We" or "Us" means Ziverge, Inc., and its duly appointed and authorized representatives. 12 | 13 | "You" means the individual or entity who Submits a Contribution to Us. 14 | 15 | "Contribution" means any work of authorship that is Submitted by You to Us in which You own or assert ownership of the Copyright. You may not Submit a Contribution if you do not own the Copyright in the entire work of authorship. 16 | 17 | "Copyright" means all rights protecting works of authorship owned or controlled by You, including copyright, moral and neighboring rights, as appropriate, for the full term of their existence including any extensions by You. 18 | 19 | "Material" means the work of authorship which is made available by Us to third parties. When this Agreement covers more than one software project, the Material means the work of authorship to which the Contribution was Submitted. After You Submit the Contribution, it may be included in the Material. 20 | 21 | "Submit" means any form of electronic, verbal, or written communication sent to Us or our representatives, including but not limited to electronic mailing lists, electronic mail, source code control systems, pull requests, and issue tracking systems that are managed by, or on behalf of, Us for the purpose of discussing and improving the Material, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution." 22 | 23 | "Submission Date" means the date on which You Submit a Contribution to Us. 24 | 25 | "Effective Date" means the earliest date You execute this Agreement by Submitting a Contribution to Us. 26 | 27 | ## 2. Grant of Rights 28 | 29 | ### 2.1 Copyright License 30 | 31 | 2.1.1. You retain ownership of the Copyright in Your Contribution and have the same rights to use or license the Contribution which You would have had without entering into the Agreement. 32 | 33 | 2.1.2. To the maximum extent permitted by the relevant law, You grant to Us a perpetual, worldwide, non-exclusive, transferable, royalty-free, irrevocable license under the Copyright covering the Contribution, with the right to sublicense such rights through multiple tiers of sublicensees, to reproduce, modify, display, perform and distribute the Contribution as part of the Material; provided that this license is conditioned upon compliance with Section 2.3. 34 | 35 | ### 2.2 Patent License 36 | 37 | For patent claims including, without limitation, method, process, and apparatus claims which You own, control or have the right to grant, now or in the future, You grant to Us a perpetual, worldwide, non-exclusive, transferable, royalty-free, irrevocable patent license, with the right to sublicense these rights to multiple tiers of sublicensees, to make, have made, use, sell, offer for sale, import and otherwise transfer the Contribution and the Contribution in combination with the Material (and portions of such combination). This license is granted only to the extent that the exercise of the licensed rights infringes such patent claims; and provided that this license is conditioned upon compliance with Section 2.3. 38 | 39 | ### 2.3 Outbound License 40 | 41 | Based on the grant of rights in Sections 2.1 and 2.2, if We include Your Contribution in a Material, We may license the Contribution under any license, including copyleft, permissive, commercial, or proprietary licenses. As a condition on the exercise of this right, We agree to also license the Contribution under the terms of the license or licenses which We are using for the Material on the Submission Date. 42 | 43 | ### 2.4 Moral Rights 44 | 45 | If moral rights apply to the Contribution, to the maximum extent permitted by law, You waive and agree not to assert such moral rights against Us or our successors in interest, or any of our licensees, either direct or indirect. 46 | 47 | ### 2.5 Our Rights 48 | 49 | You acknowledge that We are not obligated to use Your Contribution as part of the Material and may decide to include any Contribution We consider appropriate. 50 | 51 | ### 2.6 Reservation of Rights 52 | 53 | Any rights not expressly licensed under this section are expressly reserved by You. 54 | 55 | ## 3. Agreement 56 | 57 | You confirm that: 58 | 59 | a. You have the legal authority to enter into this Agreement. 60 | 61 | b. You own the Copyright and patent claims covering the Contribution which are required to grant the rights under Section 2. 62 | 63 | c. The grant of rights under Section 2 does not violate any grant of rights which You have made to third parties, including Your employer. If You are an employee, You have had Your employer approve this Agreement or sign the Entity version of this document. If You are less than eighteen years old, please have Your parents or guardian sign the Agreement. 64 | 65 | d. You have followed the instructions in, if You do not own the Copyright in the entire work of authorship Submitted. 66 | 67 | ## 4. Disclaimer 68 | 69 | EXCEPT FOR THE EXPRESS WARRANTIES IN SECTION 3, THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE EXPRESSLY DISCLAIMED BY YOU TO US. TO THE EXTENT THAT ANY SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION TO THE MINIMUM PERIOD PERMITTED BY LAW. 70 | 71 | ## 5. Consequential Damage Waiver 72 | 73 | TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU BE LIABLE FOR ANY LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF DATA, INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL AND EXEMPLARY DAMAGES ARISING OUT OF THIS AGREEMENT REGARDLESS OF THE LEGAL OR EQUITABLE THEORY (CONTRACT, TORT OR OTHERWISE) UPON WHICH THE CLAIM IS BASED. 74 | 75 | ## 6. Miscellaneous 76 | 77 | 6.1. This Agreement will be governed by and construed in accordance with the laws of the state of Maryland, in the United States of America, excluding its conflicts of law provisions. Under certain circumstances, the governing law in this section might be superseded by the United Nations Convention on Contracts for the International Sale of Goods ("UN Convention") and the parties intend to avoid the application of the UN Convention to this Agreement and, thus, exclude the application of the UN Convention in its entirety to this Agreement. 78 | 79 | 6.2. This Agreement sets out the entire agreement between You and Us for Your Contributions to Us and overrides all other agreements or understandings. 80 | 81 | 6.3. If You or We assign the rights or obligations received through this Agreement to a third party, as a condition of the assignment, that third party must agree in writing to abide by all the rights and obligations in the Agreement. 82 | 83 | 6.4. The failure of either party to require performance by the other party of any provision of this Agreement in one situation shall not affect the right of a party to require such performance at any time in the future. A waiver of performance under a provision in one situation shall not be considered a waiver of the performance of the provision in the future or a waiver of the provision in its entirety. 84 | 85 | 6.5. If any provision of this Agreement is found void and unenforceable, such provision will be replaced to the extent possible with a provision that comes closest to the meaning of the original provision and which is enforceable. The terms and conditions set forth in this Agreement shall apply notwithstanding any failure of essential purpose of this Agreement or any limited remedy to the maximum extent possible under law. 86 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dropbox settings and caches 2 | .dropbox 3 | .dropbox.attr 4 | .dropbox.cache 5 | # -*- mode: gitignore; -*- 6 | *~ 7 | \#*\# 8 | /.emacs.desktop 9 | /.emacs.desktop.lock 10 | *.elc 11 | auto-save-list 12 | tramp 13 | .\#* 14 | 15 | # Org-mode 16 | .org-id-locations 17 | *_archive 18 | 19 | # flymake-mode 20 | *_flymake.* 21 | 22 | .bsp/ 23 | 24 | # eshell files 25 | /eshell/history 26 | /eshell/lastdir 27 | 28 | # elpa packages 29 | /elpa/ 30 | 31 | # reftex files 32 | *.rel 33 | 34 | # AUCTeX auto folder 35 | /auto/ 36 | 37 | # cask packages 38 | .cask/ 39 | dist/ 40 | 41 | # Flycheck 42 | flycheck_*.el 43 | 44 | # server auth directory 45 | /server/ 46 | 47 | # projectiles files 48 | .projectile 49 | 50 | # directory configuration 51 | .dir-locals.el 52 | *~ 53 | 54 | # temporary files which can be created if a process still has a handle open of a deleted file 55 | .fuse_hidden* 56 | 57 | # KDE directory preferences 58 | .directory 59 | 60 | # Linux trash folder which might appear on any partition or disk 61 | .Trash-* 62 | 63 | # .nfs files are created when an open file is removed but is still being accessed 64 | .nfs* 65 | # General 66 | .DS_Store 67 | .AppleDouble 68 | .LSOverride 69 | 70 | # Icon must end with two \r 71 | Icon 72 | 73 | # Thumbnails 74 | ._* 75 | 76 | # Files that might appear in the root of a volume 77 | .DocumentRevisions-V100 78 | .fseventsd 79 | .Spotlight-V100 80 | .TemporaryItems 81 | .Trashes 82 | .VolumeIcon.icns 83 | .com.apple.timemachine.donotpresent 84 | 85 | # Directories potentially created on remote AFP share 86 | .AppleDB 87 | .AppleDesktop 88 | Network Trash Folder 89 | Temporary Items 90 | .apdisk 91 | # Cache files for Sublime Text 92 | *.tmlanguage.cache 93 | *.tmPreferences.cache 94 | *.stTheme.cache 95 | 96 | # Workspace files are user-specific 97 | *.sublime-workspace 98 | 99 | # Project files should be checked into the repository, unless a significant 100 | # proportion of contributors will probably not be using Sublime Text 101 | # *.sublime-project 102 | 103 | # SFTP configuration file 104 | sftp-config.json 105 | 106 | # Package control specific files 107 | Package Control.last-run 108 | Package Control.ca-list 109 | Package Control.ca-bundle 110 | Package Control.system-ca-bundle 111 | Package Control.cache/ 112 | Package Control.ca-certs/ 113 | Package Control.merged-ca-bundle 114 | Package Control.user-ca-bundle 115 | oscrypto-ca-bundle.crt 116 | bh_unicode_properties.cache 117 | 118 | # Sublime-github package stores a github token in this file 119 | # https://packagecontrol.io/packages/sublime-github 120 | GitHub.sublime-settings 121 | # Ignore tags created by etags, ctags, gtags (GNU global) and cscope 122 | TAGS 123 | .TAGS 124 | !TAGS/ 125 | tags 126 | .tags 127 | !tags/ 128 | gtags.files 129 | GTAGS 130 | GRTAGS 131 | GPATH 132 | GSYMS 133 | cscope.files 134 | cscope.out 135 | cscope.in.out 136 | cscope.po.out 137 | 138 | *.tmproj 139 | *.tmproject 140 | tmtags 141 | # Swap 142 | [._]*.s[a-v][a-z] 143 | [._]*.sw[a-p] 144 | [._]s[a-rt-v][a-z] 145 | [._]ss[a-gi-z] 146 | [._]sw[a-p] 147 | 148 | # Session 149 | Session.vim 150 | 151 | # Temporary 152 | .netrwhist 153 | *~ 154 | # Auto-generated tag files 155 | tags 156 | # Persistent undo 157 | [._]*.un~ 158 | # Windows thumbnail cache files 159 | Thumbs.db 160 | ehthumbs.db 161 | ehthumbs_vista.db 162 | 163 | # Dump file 164 | *.stackdump 165 | 166 | # Folder config file 167 | [Dd]esktop.ini 168 | 169 | # Recycle Bin used on file shares 170 | \$RECYCLE.BIN/ 171 | 172 | # Windows Installer files 173 | *.cab 174 | *.msi 175 | *.msix 176 | *.msm 177 | *.msp 178 | 179 | # Windows shortcuts 180 | *.lnk 181 | 182 | .metadata 183 | bin/ 184 | tmp/ 185 | *.tmp 186 | *.bak 187 | *.swp 188 | *~.nib 189 | local.properties 190 | .settings/ 191 | .loadpath 192 | .recommenders 193 | 194 | # External tool builders 195 | .externalToolBuilders/ 196 | 197 | # Locally stored "Eclipse launch configurations" 198 | *.launch 199 | 200 | # PyDev specific (Python IDE for Eclipse) 201 | *.pydevproject 202 | 203 | # CDT-specific (C/C++ Development Tooling) 204 | .cproject 205 | 206 | # CDT- autotools 207 | .autotools 208 | 209 | # Java annotation processor (APT) 210 | .factorypath 211 | 212 | # PDT-specific (PHP Development Tools) 213 | .buildpath 214 | 215 | # sbteclipse plugin 216 | .target 217 | 218 | # Tern plugin 219 | .tern-project 220 | 221 | # TeXlipse plugin 222 | .texlipse 223 | 224 | # STS (Spring Tool Suite) 225 | .springBeans 226 | 227 | # Code Recommenders 228 | .recommenders/ 229 | 230 | # Annotation Processing 231 | .apt_generated/ 232 | 233 | # Scala IDE specific (Scala & Java development for Eclipse) 234 | .cache-main 235 | .scala_dependencies 236 | .worksheet 237 | # Ensime specific 238 | .ensime 239 | .ensime_cache/ 240 | .ensime_lucene/ 241 | # default application storage directory used by the IDE Performance Cache feature 242 | .data/ 243 | 244 | # used for ADF styles caching 245 | temp/ 246 | 247 | # default output directories 248 | classes/ 249 | deploy/ 250 | javadoc/ 251 | 252 | # lock file, a part of Oracle Credential Store Framework 253 | cwallet.sso.lck# JEnv local Java version configuration file 254 | .java-version 255 | 256 | # Used by previous versions of JEnv 257 | .jenv-version 258 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 259 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 260 | 261 | # CMake 262 | cmake-build-*/ 263 | 264 | # File-based project format 265 | *.iws 266 | 267 | # IntelliJ 268 | out/ 269 | 270 | # mpeltonen/sbt-idea plugin 271 | .idea_modules/ 272 | 273 | # JIRA plugin 274 | atlassian-ide-plugin.xml 275 | 276 | # Crashlytics plugin (for Android Studio and IntelliJ) 277 | com_crashlytics_export_strings.xml 278 | crashlytics.properties 279 | crashlytics-build.properties 280 | fabric.properties 281 | 282 | # Editor-based Rest Client 283 | nbproject/private/ 284 | build/ 285 | nbbuild/ 286 | dist/ 287 | nbdist/ 288 | .nb-gradle/ 289 | # Built application files 290 | *.apk 291 | *.ap_ 292 | 293 | # Files for the ART/Dalvik VM 294 | *.dex 295 | 296 | # Java class files 297 | *.class 298 | 299 | # Generated files 300 | bin/ 301 | gen/ 302 | out/ 303 | 304 | # Gradle files 305 | .gradle/ 306 | build/ 307 | 308 | # Local configuration file (sdk path, etc) 309 | local.properties 310 | 311 | # Proguard folder generated by Eclipse 312 | proguard/ 313 | 314 | # Log Files 315 | *.log 316 | 317 | # Android Studio Navigation editor temp files 318 | .navigation/ 319 | 320 | # Android Studio captures folder 321 | captures/ 322 | 323 | # IntelliJ 324 | *.iml 325 | .idea 326 | 327 | # Keystore files 328 | # Uncomment the following line if you do not want to check your keystore files in. 329 | #*.jks 330 | 331 | # External native build folder generated in Android Studio 2.2 and later 332 | .externalNativeBuild 333 | 334 | # Google Services (e.g. APIs or Firebase) 335 | google-services.json 336 | 337 | # Freeline 338 | freeline.py 339 | freeline/ 340 | freeline_project_description.json 341 | 342 | # fastlane 343 | fastlane/report.xml 344 | fastlane/Preview.html 345 | fastlane/screenshots 346 | fastlane/test_output 347 | fastlane/readme.md 348 | .gradle 349 | /build/ 350 | 351 | # Ignore Gradle GUI config 352 | gradle-app.setting 353 | 354 | # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored) 355 | !gradle-wrapper.jar 356 | 357 | # Cache of project 358 | .gradletasknamecache 359 | 360 | # # Work around https://youtrack.jetbrains.com/issue/IDEA-116898 361 | # gradle/wrapper/gradle-wrapper.properties 362 | # Compiled class file 363 | *.class 364 | 365 | # Log file 366 | *.log 367 | 368 | # BlueJ files 369 | *.ctxt 370 | 371 | # Mobile Tools for Java (J2ME) 372 | .mtj.tmp/ 373 | 374 | # Package Files # 375 | *.jar 376 | *.war 377 | *.nar 378 | *.ear 379 | *.zip 380 | *.tar.gz 381 | *.rar 382 | 383 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 384 | hs_err_pid* 385 | target/ 386 | pom.xml.tag 387 | pom.xml.releaseBackup 388 | pom.xml.versionsBackup 389 | pom.xml.next 390 | release.properties 391 | dependency-reduced-pom.xml 392 | buildNumber.properties 393 | .mvn/timing.properties 394 | .mvn/wrapper/maven-wrapper.jar 395 | # Simple Build Tool 396 | # http://www.scala-sbt.org/release/docs/Getting-Started/Directories.html#configuring-version-control 397 | 398 | dist/* 399 | target/ 400 | lib_managed/ 401 | src_managed/ 402 | project/boot/ 403 | project/plugins/project/ 404 | .history 405 | .cache 406 | .lib/ 407 | *.class 408 | *.log 409 | 410 | .metals/ 411 | metals.sbt 412 | .bloop/ 413 | project/secret 414 | 415 | # mdoc 416 | website/node_modules 417 | website/build 418 | website/i18n/en.json 419 | website/static/api 420 | 421 | #minio 422 | .minio.sys/ 423 | 424 | .vscode/ 425 | /zio-s3/src/test/scala/zio/s3/S3App.scala 426 | -------------------------------------------------------------------------------- /project/BuildHelper.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | import Keys._ 3 | import explicitdeps.ExplicitDepsPlugin.autoImport._ 4 | import sbtcrossproject.CrossPlugin.autoImport._ 5 | import org.portablescala.sbtplatformdeps.PlatformDepsPlugin.autoImport._ 6 | import sbtbuildinfo._ 7 | import BuildInfoKeys._ 8 | import scalafix.sbt.ScalafixPlugin.autoImport._ 9 | 10 | object BuildHelper { 11 | val Scala212 = "2.12.20" 12 | val Scala213 = "2.13.16" 13 | val ScalaDotty = "3.3.6" 14 | 15 | private val stdOptions = Seq( 16 | "-deprecation", 17 | "-encoding", 18 | "UTF-8", 19 | "-feature", 20 | "-unchecked" 21 | ) 22 | 23 | private val std2xOptions = Seq( 24 | "-language:higherKinds", 25 | "-language:existentials", 26 | "-explaintypes", 27 | "-Yrangepos", 28 | "-Xlint:_,-missing-interpolator,-type-parameter-shadow", 29 | "-Ywarn-numeric-widen", 30 | "-Ywarn-value-discard" 31 | ) ++ customOptions 32 | 33 | private def optimizerOptions(optimize: Boolean) = 34 | if (optimize) 35 | Seq( 36 | "-opt:l:inline", 37 | "-opt-inline-from:zio.internal.**" 38 | ) 39 | else Nil 40 | 41 | private def propertyFlag(property: String, default: Boolean) = 42 | sys.props.get(property).map(_.toBoolean).getOrElse(default) 43 | 44 | private def customOptions = 45 | if (propertyFlag("fatal.warnings", true)) 46 | Seq("-Xfatal-warnings") 47 | else 48 | Nil 49 | 50 | def buildInfoSettings(packageName: String) = 51 | Seq( 52 | buildInfoKeys := Seq[BuildInfoKey](name, version, scalaVersion, sbtVersion, isSnapshot), 53 | buildInfoPackage := packageName, 54 | buildInfoObject := "BuildInfo" 55 | ) 56 | 57 | val dottySettings = Seq( 58 | // Keep this consistent with the version in .circleci/config.yml 59 | crossScalaVersions += ScalaDotty, 60 | scalacOptions ++= { 61 | if (scalaVersion.value == ScalaDotty) 62 | Seq("-noindent") 63 | else 64 | Seq() 65 | }, 66 | Compile / doc / sources := { 67 | val old = (Compile / doc / sources).value 68 | if (scalaVersion.value == ScalaDotty) 69 | Nil 70 | else 71 | old 72 | }, 73 | Test / parallelExecution := { 74 | val old = (Test / parallelExecution).value 75 | if (scalaVersion.value == ScalaDotty) 76 | false 77 | else 78 | old 79 | } 80 | ) 81 | 82 | def extraOptions(scalaVersion: String, optimize: Boolean) = 83 | CrossVersion.partialVersion(scalaVersion) match { 84 | case Some((3, _)) => 85 | Seq( 86 | "-language:implicitConversions", 87 | "-Xignore-scala2-macros" 88 | ) 89 | case Some((2, 13)) => 90 | Seq( 91 | "-Ywarn-unused:params,-implicits" 92 | ) ++ std2xOptions ++ optimizerOptions(optimize) 93 | case Some((2, 12)) => 94 | Seq( 95 | "-opt-warnings", 96 | "-Ywarn-extra-implicit", 97 | "-Ywarn-unused:_,imports", 98 | "-Ywarn-unused:imports", 99 | "-Ypartial-unification", 100 | "-Yno-adapted-args", 101 | "-Ywarn-inaccessible", 102 | "-Ywarn-infer-any", 103 | "-Ywarn-nullary-override", 104 | "-Ywarn-nullary-unit", 105 | "-Ywarn-unused:params,-implicits", 106 | "-Xfuture", 107 | "-Xsource:2.13", 108 | "-Xmax-classfile-name", 109 | "242" 110 | ) ++ std2xOptions ++ optimizerOptions(optimize) 111 | case _ => Seq.empty 112 | } 113 | 114 | def stdSettings(prjName: String) = 115 | Seq( 116 | name := s"$prjName", 117 | crossScalaVersions := Seq(Scala212, Scala213), 118 | ThisBuild / scalaVersion := Scala213, 119 | scalacOptions := stdOptions ++ extraOptions(scalaVersion.value, optimize = !isSnapshot.value), 120 | //semanticdbEnabled := scalaVersion.value != ScalaDotty, // enable SemanticDB 121 | //semanticdbOptions += "-P:semanticdb:synthetics:on", 122 | //semanticdbVersion := scalafixSemanticdb.revision, // use Scalafix compatible version 123 | ThisBuild / scalafixScalaBinaryVersion := CrossVersion.binaryScalaVersion(scalaVersion.value), 124 | ThisBuild / scalafixDependencies ++= List( 125 | "com.github.liancheng" %% "organize-imports" % "0.6.0", 126 | "com.github.vovapolu" %% "scaluzzi" % "0.1.23" 127 | ), 128 | Test / parallelExecution := true, 129 | incOptions ~= (_.withLogRecompileOnMacro(false)), 130 | autoAPIMappings := true, 131 | unusedCompileDependenciesFilter -= moduleFilter("org.scala-js", "scalajs-library"), 132 | Compile / unmanagedSourceDirectories ++= { 133 | CrossVersion.partialVersion(scalaVersion.value) match { 134 | case Some((2, x)) if x <= 11 => 135 | Seq( 136 | Seq(file(sourceDirectory.value.getPath + "/main/scala-2.11")), 137 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.11")), 138 | CrossType.Full.sharedSrcDir(baseDirectory.value, "test").toList.map(f => file(f.getPath + "-2.11")), 139 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.x")), 140 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.11-2.12")) 141 | ).flatten 142 | case Some((2, x)) if x == 12 => 143 | Seq( 144 | Seq(file(sourceDirectory.value.getPath + "/main/scala-2.12")), 145 | Seq(file(sourceDirectory.value.getPath + "/main/scala-2.12+")), 146 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.12+")), 147 | CrossType.Full.sharedSrcDir(baseDirectory.value, "test").toList.map(f => file(f.getPath + "-2.12+")), 148 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.x")), 149 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.12-2.13")), 150 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.11-2.12")) 151 | ).flatten 152 | case Some((2, x)) if x >= 13 => 153 | Seq( 154 | Seq(file(sourceDirectory.value.getPath + "/main/scala-2.12")), 155 | Seq(file(sourceDirectory.value.getPath + "/main/scala-2.12+")), 156 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.12+")), 157 | CrossType.Full.sharedSrcDir(baseDirectory.value, "test").toList.map(f => file(f.getPath + "-2.12+")), 158 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.x")), 159 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.12-2.13")), 160 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.13+")) 161 | ).flatten 162 | case Some((3, _)) => 163 | Seq( 164 | Seq(file(sourceDirectory.value.getPath + "/main/scala-2.12")), 165 | Seq(file(sourceDirectory.value.getPath + "/main/scala-2.12+")), 166 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.12+")), 167 | CrossType.Full.sharedSrcDir(baseDirectory.value, "test").toList.map(f => file(f.getPath + "-2.12+")), 168 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-dotty")), 169 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.13+")) 170 | ).flatten 171 | case _ => 172 | Nil 173 | } 174 | }, 175 | Test / unmanagedSourceDirectories ++= { 176 | CrossVersion.partialVersion(scalaVersion.value) match { 177 | case Some((2, x)) if x >= 12 => 178 | Seq( 179 | Seq(file(sourceDirectory.value.getPath + "/test/scala-2.12")), 180 | Seq(file(sourceDirectory.value.getPath + "/test/scala-2.12+")), 181 | CrossType.Full.sharedSrcDir(baseDirectory.value, "test").toList.map(f => file(f.getPath + "-2.x")) 182 | ).flatten 183 | case Some((3, _)) => 184 | Seq( 185 | Seq(file(sourceDirectory.value.getPath + "/test/scala-2.12+")), 186 | CrossType.Full.sharedSrcDir(baseDirectory.value, "main").toList.map(f => file(f.getPath + "-2.12+")), 187 | CrossType.Full.sharedSrcDir(baseDirectory.value, "test").toList.map(f => file(f.getPath + "-dotty")) 188 | ).flatten 189 | case _ => 190 | Nil 191 | } 192 | 193 | } 194 | ) 195 | 196 | implicit class ModuleHelper(p: Project) { 197 | def module: Project = p.in(file(p.id)).settings(stdSettings(p.id)) 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/Test.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2020 John A. De Goes and the ZIO Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio.s3 18 | 19 | import software.amazon.awssdk.services.s3.S3AsyncClient 20 | import software.amazon.awssdk.services.s3.model.S3Exception 21 | import software.amazon.awssdk.utils.{ BinaryUtils, Md5Utils } 22 | import zio._ 23 | import zio.nio.channels.AsynchronousFileChannel 24 | import zio.nio.file.{ Files, Path => ZPath } 25 | import zio.s3.S3Bucket._ 26 | import zio.stream.{ Stream, ZStream } 27 | 28 | import java.io.{ FileInputStream, FileNotFoundException } 29 | import java.nio.charset.StandardCharsets 30 | import java.nio.file.{ NoSuchFileException, StandardOpenOption } 31 | import java.nio.file.attribute.BasicFileAttributes 32 | import java.util.UUID 33 | import java.util.concurrent.CompletableFuture 34 | 35 | /** 36 | * Stub Service which is back by a filesystem storage 37 | */ 38 | object Test { 39 | 40 | private def fileNotFound(err: Throwable): S3Exception = 41 | S3Exception 42 | .builder() 43 | .message("Key does not exist.") 44 | .cause(err) 45 | .statusCode(404) 46 | .build() 47 | .asInstanceOf[S3Exception] 48 | 49 | def connect(path: ZPath): UIO[S3] = { 50 | type ContentType = String 51 | type Metadata = Map[String, String] 52 | 53 | Ref.make(Map.empty[String, (ContentType, Metadata)]).map { refDb => 54 | new S3 { 55 | override def createBucket(bucketName: String): IO[S3Exception, Unit] = 56 | Files.createDirectory(path / bucketName).orDie 57 | 58 | override def deleteBucket(bucketName: String): IO[S3Exception, Unit] = 59 | Files.delete(path / bucketName).orDie 60 | 61 | override def isBucketExists(bucketName: String): IO[S3Exception, Boolean] = 62 | Files.exists(path / bucketName) 63 | 64 | override val listBuckets: IO[S3Exception, S3BucketListing] = 65 | Files 66 | .list(path) 67 | .filterZIO(p => Files.readAttributes[BasicFileAttributes](p).map(_.isDirectory)) 68 | .mapZIO { p => 69 | Files 70 | .readAttributes[BasicFileAttributes](p) 71 | .map(attr => S3Bucket(p.filename.toString, attr.creationTime().toInstant)) 72 | } 73 | .runCollect 74 | .orDie 75 | 76 | override def deleteObject(bucketName: String, key: String): IO[S3Exception, Unit] = 77 | Files.deleteIfExists(path / bucketName / key).orDie.unit 78 | 79 | override def getObject(bucketName: String, key: String): Stream[S3Exception, Byte] = 80 | ZStream 81 | .scoped[Any](ZIO.fromAutoCloseable(ZIO.attempt(new FileInputStream((path / bucketName / key).toFile)))) 82 | .flatMap(ZStream.fromInputStream(_, 2048)) 83 | .refineOrDie { 84 | case e: FileNotFoundException => fileNotFound(e) 85 | } 86 | 87 | override def getObjectMetadata(bucketName: String, key: String): IO[S3Exception, ObjectMetadata] = 88 | (for { 89 | res <- refDb.get.map(_.getOrElse(bucketName + key, "" -> Map.empty[String, String])) 90 | (contentType, metadata) = res 91 | contents <- Files 92 | .readAllBytes(path / bucketName / key) 93 | .catchAll(_ => ZIO.succeed(Chunk.fromArray("".getBytes))) 94 | file <- Files 95 | .readAttributes[BasicFileAttributes](path / bucketName / key) 96 | .map(p => 97 | ObjectMetadata( 98 | metadata, 99 | contentType, 100 | p.size(), 101 | BinaryUtils.toHex(Md5Utils.computeMD5Hash(contents.asString(StandardCharsets.UTF_8).getBytes)) 102 | ) 103 | ) 104 | } yield file) 105 | .refineOrDie { 106 | case e: NoSuchFileException => fileNotFound(e) 107 | } 108 | 109 | override def listObjects( 110 | bucketName: String, 111 | options: ListObjectOptions 112 | ): IO[S3Exception, S3ObjectListing] = 113 | Files 114 | .find(path / bucketName) { 115 | case (_, fileAttr) => 116 | fileAttr.isRegularFile 117 | } 118 | .mapZIO { filePath => 119 | Files.readAttributes[BasicFileAttributes](filePath).map { attrs => 120 | attrs -> (path / bucketName).relativize(filePath).toString() 121 | } 122 | } 123 | .filter { 124 | case (_, relativePath) => 125 | options.prefix.fold(true)(relativePath.startsWith) 126 | } 127 | .filter { 128 | case (_, relativePath) => 129 | options.delimiter.fold(true) { delim => 130 | relativePath 131 | .stripPrefix(options.prefix.getOrElse("")) 132 | .stripSuffix(delim) 133 | .indexOf(delim) < 0 134 | } 135 | } 136 | .map { 137 | case (attr, relativePath) => 138 | S3ObjectSummary( 139 | bucketName, 140 | relativePath, 141 | attr.lastModifiedTime().toInstant, 142 | attr.size() 143 | ) 144 | } 145 | .runCollect 146 | .map( 147 | _.sortBy(_.key) 148 | .mapAccum(options.starAfter) { 149 | case (Some(startWith), o) => 150 | if (startWith.startsWith(o.key)) 151 | None -> Chunk.empty 152 | else 153 | Some(startWith) -> Chunk.empty 154 | case (_, o) => 155 | None -> Chunk(o) 156 | } 157 | ._2 158 | .flatten 159 | ) 160 | .map { 161 | case list if list.size > options.maxKeys => 162 | S3ObjectListing( 163 | bucketName, 164 | options.delimiter, 165 | options.starAfter, 166 | list.take(options.maxKeys.toInt), 167 | Some(UUID.randomUUID().toString), 168 | None 169 | ) 170 | case list => 171 | S3ObjectListing(bucketName, options.delimiter, options.starAfter, list, None, None) 172 | } 173 | .orDie 174 | 175 | override def getNextObjects(listing: S3ObjectListing): IO[S3Exception, S3ObjectListing] = 176 | listing.nextContinuationToken match { 177 | case Some(token) if token.nonEmpty => listObjects(listing.bucketName, ListObjectOptions.fromMaxKeys(100)) 178 | case _ => ZIO.dieMessage("Empty token is invalid") 179 | } 180 | 181 | override def putObject[R]( 182 | bucketName: String, 183 | key: String, 184 | contentLength: Long, 185 | content: ZStream[R, Throwable, Byte], 186 | options: UploadOptions, 187 | contentMD5: Option[String] = None 188 | ): ZIO[R, S3Exception, Unit] = 189 | (for { 190 | _ <- refDb.update(db => 191 | db + (bucketName + key -> (options.contentType 192 | .getOrElse("application/octet-stream") -> options.metadata)) 193 | ) 194 | filePath = path / bucketName / key 195 | _ <- filePath.parent 196 | .map(parentPath => Files.createDirectories(parentPath)) 197 | .getOrElse(ZIO.unit) 198 | 199 | _ <- ZIO.scoped[R]( 200 | AsynchronousFileChannel 201 | .open( 202 | filePath, 203 | StandardOpenOption.WRITE, 204 | StandardOpenOption.TRUNCATE_EXISTING, 205 | StandardOpenOption.CREATE 206 | ) 207 | .flatMap(channel => 208 | content 209 | .mapChunks(Chunk.succeed) 210 | .runFoldZIO(0L) { case (pos, c) => channel.writeChunk(c, pos).as(pos + c.length) } 211 | ) 212 | ) 213 | } yield ()).orDie 214 | 215 | override def execute[T](f: S3AsyncClient => CompletableFuture[T]): IO[S3Exception, T] = 216 | ZIO.dieMessage("Not implemented error - please don't call execute() S3 Test mode") 217 | 218 | override def multipartUpload[R]( 219 | bucketName: String, 220 | key: String, 221 | content: ZStream[R, Throwable, Byte], 222 | options: MultipartUploadOptions 223 | )(parallelism: Int): ZIO[R, S3Exception, Unit] = { 224 | val _contentType = options.uploadOptions.contentType.orElse(Some("binary/octet-stream")) 225 | 226 | for { 227 | _ <- ZIO.dieMessage(s"parallelism must be > 0. $parallelism is invalid").unless(parallelism > 0) 228 | _ <- 229 | ZIO 230 | .dieMessage( 231 | s"Invalid part size ${Math.floor(options.partSize.toDouble / PartSize.Mega.toDouble * 100d) / 100d} Mb, minimum size is ${PartSize.Min / PartSize.Mega} Mb" 232 | ) 233 | .unless(options.partSize >= PartSize.Min) 234 | _ <- putObject( 235 | bucketName, 236 | key, 237 | 0, 238 | content.rechunk(options.partSize), 239 | options.uploadOptions.copy(contentType = _contentType) 240 | ) 241 | } yield () 242 | } 243 | } 244 | } 245 | } 246 | } 247 | -------------------------------------------------------------------------------- /zio-s3/src/main/scala/zio/s3/Live.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017-2020 John A. De Goes and the ZIO Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio.s3 18 | 19 | import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider 20 | import software.amazon.awssdk.core.async.{ AsyncRequestBody, AsyncResponseTransformer, SdkPublisher } 21 | import software.amazon.awssdk.core.exception.SdkException 22 | import software.amazon.awssdk.services.s3.model._ 23 | import software.amazon.awssdk.services.s3.{ S3AsyncClient, S3AsyncClientBuilder } 24 | import zio._ 25 | import zio.interop.reactivestreams._ 26 | import zio.s3.Live.{ StreamAsyncResponseTransformer, StreamResponse } 27 | import zio.s3.S3Bucket.S3BucketListing 28 | import zio.s3.errors._ 29 | import zio.s3.errors.syntax._ 30 | import zio.stream.{ Stream, ZSink, ZStream } 31 | 32 | import java.net.URI 33 | import java.nio.ByteBuffer 34 | import java.util.concurrent.CompletableFuture 35 | import scala.jdk.CollectionConverters._ 36 | 37 | /** 38 | * Service use to wrap the unsafe amazon s3 client and access safely to s3 storage 39 | * 40 | * @param unsafeClient: Amazon Async S3 Client 41 | */ 42 | final class Live(unsafeClient: S3AsyncClient) extends S3 { 43 | 44 | override def createBucket(bucketName: String): IO[S3Exception, Unit] = 45 | execute(_.createBucket(CreateBucketRequest.builder().bucket(bucketName).build())).unit 46 | 47 | override def deleteBucket(bucketName: String): IO[S3Exception, Unit] = 48 | execute(_.deleteBucket(DeleteBucketRequest.builder().bucket(bucketName).build())).unit 49 | 50 | override def isBucketExists(bucketName: String): IO[S3Exception, Boolean] = 51 | execute(_.headBucket(HeadBucketRequest.builder().bucket(bucketName).build())) 52 | .as(true) 53 | .catchSome { 54 | case _: NoSuchBucketException => ZIO.succeed(false) 55 | } 56 | 57 | override val listBuckets: IO[S3Exception, S3BucketListing] = 58 | execute(_.listBuckets()) 59 | .map(r => S3Bucket.fromBuckets(r.buckets().asScala.toList)) 60 | 61 | override def getObject(bucketName: String, key: String): Stream[S3Exception, Byte] = 62 | ZStream 63 | .fromZIO( 64 | execute( 65 | _.getObject[StreamResponse]( 66 | GetObjectRequest.builder().bucket(bucketName).key(key).build(), 67 | StreamAsyncResponseTransformer(new CompletableFuture[StreamResponse]()) 68 | ) 69 | ) 70 | ) 71 | .flatMap(identity) 72 | .flattenChunks 73 | .mapErrorCause(_.flatMap(_.asS3Exception())) 74 | 75 | override def getObjectMetadata(bucketName: String, key: String): IO[S3Exception, ObjectMetadata] = 76 | execute(_.headObject(HeadObjectRequest.builder().bucket(bucketName).key(key).build())) 77 | .map(ObjectMetadata.fromResponse) 78 | 79 | override def deleteObject(bucketName: String, key: String): IO[S3Exception, Unit] = 80 | execute(_.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(key).build())).unit 81 | 82 | override def listObjects(bucketName: String, options: ListObjectOptions): IO[S3Exception, S3ObjectListing] = 83 | execute( 84 | _.listObjectsV2( 85 | ListObjectsV2Request 86 | .builder() 87 | .maxKeys(options.maxKeys.intValue()) 88 | .bucket(bucketName) 89 | .delimiter(options.delimiter.orNull) 90 | .startAfter(options.starAfter.orNull) 91 | .prefix(options.prefix.orNull) 92 | .build() 93 | ) 94 | ).map(S3ObjectListing.fromResponse) 95 | 96 | override def getNextObjects(listing: S3ObjectListing): IO[S3Exception, S3ObjectListing] = 97 | listing.nextContinuationToken 98 | .fold[ZIO[Any, S3Exception, S3ObjectListing]]( 99 | ZIO.succeed(listing.copy(nextContinuationToken = None, objectSummaries = Chunk.empty)) 100 | ) { token => 101 | execute( 102 | _.listObjectsV2( 103 | ListObjectsV2Request 104 | .builder() 105 | .bucket(listing.bucketName) 106 | .continuationToken(token) 107 | .prefix(listing.prefix.orNull) 108 | .build() 109 | ) 110 | ).map(S3ObjectListing.fromResponse) 111 | } 112 | 113 | override def putObject[R]( 114 | bucketName: String, 115 | key: String, 116 | contentLength: Long, 117 | content: ZStream[R, Throwable, Byte], 118 | options: UploadOptions, 119 | contentMD5: Option[String] = None 120 | ): ZIO[R, S3Exception, Unit] = 121 | content 122 | .mapErrorCause(_.flatMap(_.asS3Exception())) 123 | .mapChunks(c => Chunk(ByteBuffer.wrap(c.toArray))) 124 | .toPublisher 125 | .flatMap { publisher => 126 | execute( 127 | _.putObject( 128 | { 129 | val builder = PutObjectRequest 130 | .builder() 131 | .bucket(bucketName) 132 | .contentLength(contentLength) 133 | .key(key) 134 | .metadata(options.metadata.asJava) 135 | .acl(options.cannedAcl) 136 | 137 | List( 138 | (b: PutObjectRequest.Builder) => 139 | options.contentType 140 | .fold(b)(b.contentType), 141 | (b: PutObjectRequest.Builder) => 142 | contentMD5 143 | .fold(b)(b.contentMD5) 144 | ).foldLeft(builder) { case (b, f) => f(b) }.build() 145 | }, 146 | AsyncRequestBody.fromPublisher(publisher) 147 | ) 148 | ) 149 | } 150 | .unit 151 | 152 | def multipartUpload[R]( 153 | bucketName: String, 154 | key: String, 155 | content: ZStream[R, Throwable, Byte], 156 | options: MultipartUploadOptions 157 | )(parallelism: Int): ZIO[R, S3Exception, Unit] = 158 | for { 159 | _ <- ZIO.dieMessage(s"parallelism must be > 0. $parallelism is invalid").unless(parallelism > 0) 160 | _ <- 161 | ZIO 162 | .dieMessage( 163 | s"Invalid part size ${Math.floor(options.partSize.toDouble / PartSize.Mega.toDouble * 100d) / 100d} Mb, minimum size is ${PartSize.Min / PartSize.Mega} Mb" 164 | ) 165 | .unless(options.partSize >= PartSize.Min) 166 | 167 | uploadId <- execute( 168 | _.createMultipartUpload { 169 | val builder = CreateMultipartUploadRequest 170 | .builder() 171 | .bucket(bucketName) 172 | .key(key) 173 | .metadata(options.uploadOptions.metadata.asJava) 174 | .acl(options.uploadOptions.cannedAcl) 175 | options.uploadOptions.contentType 176 | .fold(builder)(builder.contentType) 177 | .build() 178 | } 179 | ).map(_.uploadId()) 180 | 181 | parts <- ZStream 182 | .scoped[R]( 183 | content 184 | .rechunk(options.partSize) 185 | .mapChunks(Chunk.single) 186 | .peel(ZSink.head[Chunk[Byte]]) 187 | ) 188 | .flatMap { 189 | case (Some(head), rest) => ZStream(head) ++ rest 190 | case (None, _) => ZStream(Chunk.empty) 191 | } 192 | .zipWithIndex 193 | .mapZIOPar(parallelism) { 194 | case (chunk, partNumber) => 195 | execute( 196 | _.uploadPart( 197 | UploadPartRequest 198 | .builder() 199 | .bucket(bucketName) 200 | .key(key) 201 | .partNumber(partNumber.toInt + 1) 202 | .uploadId(uploadId) 203 | .contentLength(chunk.length.toLong) 204 | .build(), 205 | AsyncRequestBody.fromBytes(chunk.toArray) 206 | ) 207 | ).map(r => CompletedPart.builder().partNumber(partNumber.toInt + 1).eTag(r.eTag()).build()) 208 | } 209 | .runCollect 210 | .mapErrorCause(_.flatMap(_.asS3Exception())) 211 | 212 | _ <- execute( 213 | _.completeMultipartUpload( 214 | CompleteMultipartUploadRequest 215 | .builder() 216 | .bucket(bucketName) 217 | .key(key) 218 | .multipartUpload(CompletedMultipartUpload.builder().parts(parts.asJavaCollection).build()) 219 | .uploadId(uploadId) 220 | .build() 221 | ) 222 | ) 223 | } yield () 224 | 225 | def execute[T](f: S3AsyncClient => CompletableFuture[T]): ZIO[Any, S3Exception, T] = 226 | ZIO.fromCompletionStage(f(unsafeClient)).refineOrDie { 227 | case s3: S3Exception => s3 228 | case sdk: SdkException => SdkError(sdk) 229 | } 230 | } 231 | 232 | object Live { 233 | 234 | def connect[R]( 235 | region: S3Region, 236 | provider: RIO[R with Scope, AwsCredentialsProvider], 237 | uriEndpoint: Option[URI], 238 | forcePathStyle: Option[Boolean] = None 239 | ): ZIO[R with Scope, ConnectionError, S3] = 240 | for { 241 | credentials <- provider.mapError(e => ConnectionError(e.getMessage, e.getCause)) 242 | builder <- ZIO.succeed { 243 | val builder = S3AsyncClient 244 | .builder() 245 | .credentialsProvider(credentials) 246 | .region(region.region) 247 | uriEndpoint.foreach(builder.endpointOverride) 248 | forcePathStyle.foreach(builder.forcePathStyle(_)) 249 | builder 250 | } 251 | service <- connect(builder) 252 | } yield service 253 | 254 | def connect[R](builder: S3AsyncClientBuilder): ZIO[R with Scope, ConnectionError, S3] = 255 | ZIO 256 | .fromAutoCloseable(ZIO.attempt(builder.build())) 257 | .mapBoth(e => ConnectionError(e.getMessage, e.getCause), new Live(_)) 258 | 259 | type StreamResponse = ZStream[Any, Throwable, Chunk[Byte]] 260 | 261 | final private[s3] case class StreamAsyncResponseTransformer(cf: CompletableFuture[StreamResponse]) 262 | extends AsyncResponseTransformer[GetObjectResponse, StreamResponse] { 263 | override def prepare(): CompletableFuture[StreamResponse] = cf 264 | 265 | override def onResponse(response: GetObjectResponse): Unit = () 266 | 267 | override def onStream(publisher: SdkPublisher[ByteBuffer]): Unit = { 268 | cf.complete(publisher.toZIOStream().map(Chunk.fromByteBuffer)) 269 | () 270 | } 271 | 272 | override def exceptionOccurred(error: Throwable): Unit = { 273 | cf.completeExceptionally(error) 274 | () 275 | } 276 | } 277 | 278 | } 279 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /zio-s3/src/test/scala/zio/s3/S3Test.scala: -------------------------------------------------------------------------------- 1 | package zio.s3 2 | 3 | import software.amazon.awssdk.auth.credentials.AwsBasicCredentials 4 | import software.amazon.awssdk.regions.Region 5 | import software.amazon.awssdk.services.s3.model.{ ObjectCannedACL, S3Exception } 6 | import software.amazon.awssdk.utils.{ BinaryUtils, Md5Utils } 7 | import zio.nio.file.{ Path => ZPath } 8 | import zio.stream.{ ZPipeline, ZStream } 9 | import zio.test.Assertion._ 10 | import zio.test.TestAspect.sequential 11 | import zio.test._ 12 | import zio.{ Chunk, Scope, ZLayer } 13 | 14 | import java.net.URI 15 | import java.util.UUID 16 | import scala.util.Random 17 | 18 | object S3LiveSpec extends ZIOSpecDefault { 19 | 20 | private val s3 = 21 | zio.s3 22 | .live( 23 | Region.CA_CENTRAL_1, 24 | AwsBasicCredentials.create("TESTKEY", "TESTSECRET"), 25 | Some(URI.create("http://127.0.0.1:9000")) 26 | ) 27 | .mapError(TestFailure.die) 28 | 29 | override def spec: Spec[TestEnvironment with Scope, Any] = 30 | S3Suite.spec("S3LiveSpec").provideLayerShared(s3) 31 | } 32 | 33 | object S3LiveHostnameSpec extends ZIOSpecDefault { 34 | 35 | private val s3 = 36 | zio.s3 37 | .live( 38 | Region.CA_CENTRAL_1, 39 | AwsBasicCredentials.create("TESTKEY", "TESTSECRET"), 40 | Some(URI.create("http://localhost:9000")), 41 | forcePathStyle = Some(true) 42 | ) 43 | .mapError(TestFailure.die) 44 | 45 | override def spec: Spec[TestEnvironment with Scope, Any] = 46 | S3Suite.spec("S3LiveHostnameSpec").provideLayerShared(s3) 47 | } 48 | 49 | object S3TestSpec extends ZIOSpecDefault { 50 | private val root = ZPath("../test-data") 51 | 52 | private val s3: ZLayer[Any, Nothing, S3] = zio.s3.stub(root) 53 | 54 | override def spec: Spec[TestEnvironment with Scope, Any] = 55 | S3Suite.spec("S3TestSpec").provideLayerShared(s3) 56 | } 57 | 58 | object InvalidS3LayerTestSpec extends ZIOSpecDefault { 59 | 60 | private val s3: ZLayer[Scope, S3Exception, S3] = 61 | zio.s3.liveZIO(Region.EU_CENTRAL_1, providers.default) 62 | 63 | override def spec: Spec[TestEnvironment with Scope, Any] = 64 | suite("InvalidS3LayerTest") { 65 | test("listBuckets") { 66 | listBuckets.provideLayer(s3).either.map(assert(_)(isLeft(isSubtype[S3Exception](anything)))) 67 | } 68 | } 69 | 70 | } 71 | 72 | object S3Suite { 73 | val bucketName = "bucket-1" 74 | 75 | private[this] def randomNEStream(): (Int, ZStream[Any, Nothing, Byte]) = { 76 | val size = PartSize.Min + Random.nextInt(100) 77 | val bytes = new Array[Byte](size) 78 | Random.nextBytes(bytes) 79 | (size, ZStream.fromChunks(Chunk.fromArray(bytes))) 80 | } 81 | 82 | def spec(label: String): Spec[S3, Exception] = 83 | suite(label)( 84 | test("listAllObjects") { 85 | for { 86 | list <- listAllObjects(bucketName).runCollect 87 | } yield assert(list.map(_.key))(hasSameElements(List("console.log", "dir1/hello.txt", "dir1/user.csv"))) 88 | }, 89 | test("list buckets") { 90 | for { 91 | buckets <- listBuckets 92 | } yield assertTrue(buckets.map(_.name) == Chunk.single(bucketName)) 93 | }, 94 | test("list objects") { 95 | for { 96 | succeed <- listObjects(bucketName) 97 | } yield assertTrue(succeed.bucketName == bucketName) && 98 | assert(succeed.objectSummaries.map(s => s.bucketName -> s.key))( 99 | hasSameElements( 100 | List( 101 | (bucketName, "console.log"), 102 | (bucketName, "dir1/hello.txt"), 103 | (bucketName, "dir1/user.csv") 104 | ) 105 | ) 106 | ) 107 | }, 108 | test("list objects with prefix") { 109 | for { 110 | succeed <- listObjects(bucketName, ListObjectOptions.from("console", 10)) 111 | } yield assert(succeed)( 112 | hasField("bucketName", (l: S3ObjectListing) => l.bucketName, equalTo(bucketName)) && 113 | hasField( 114 | "objectSummaries", 115 | (l: S3ObjectListing) => l.objectSummaries.map(o => o.bucketName -> o.key), 116 | equalTo(Chunk.single((bucketName, "console.log"))) 117 | ) 118 | ) 119 | }, 120 | test("list objects with path-like prefix") { 121 | for { 122 | succeed <- listObjects(bucketName, ListObjectOptions.from("dir1", 10)) 123 | } yield assert(succeed.objectSummaries.map(_.key))( 124 | hasSameElements(List("dir1/hello.txt", "dir1/user.csv")) 125 | ) 126 | }, 127 | test("list objects with not match prefix") { 128 | for { 129 | succeed <- listObjects(bucketName, ListObjectOptions.from("blah", 10)) 130 | } yield assertTrue(succeed.bucketName -> succeed.objectSummaries == bucketName -> Chunk.empty) 131 | }, 132 | test("list objects with prefix and delimiter") { 133 | for { 134 | succeed <- listObjects(bucketName, ListObjectOptions(Some("dir1/"), 10, Some("/"), None)) 135 | } yield assertTrue( 136 | succeed.bucketName -> succeed.objectSummaries.map(_.key) == 137 | bucketName -> Chunk("dir1/hello.txt", "dir1/user.csv") 138 | ) 139 | }, 140 | test("list objects with delimiter") { 141 | for { 142 | succeed <- listObjects(bucketName, ListObjectOptions(None, 10, Some("/"), None)) 143 | } yield assertTrue( 144 | succeed.bucketName -> succeed.objectSummaries.map(_.key) == 145 | bucketName -> Chunk("console.log") 146 | ) 147 | }, 148 | test("list objects with startAfter dir1/hello.txt") { 149 | for { 150 | succeed <- listObjects(bucketName, ListObjectOptions.fromStartAfter("dir1/hello.txt")) 151 | } yield assertTrue( 152 | succeed.bucketName -> succeed.objectSummaries.map(_.key).sorted == bucketName -> Chunk("dir1/user.csv") 153 | ) 154 | }, 155 | test("create bucket") { 156 | val bucketTmp = UUID.randomUUID().toString 157 | for { 158 | succeed <- createBucket(bucketTmp) 159 | _ <- deleteBucket(bucketTmp) 160 | } yield assert(succeed)(isUnit) 161 | }, 162 | test("create empty bucket name fail") { 163 | 164 | for { 165 | succeed <- createBucket("") 166 | .foldCause(_ => false, _ => true) 167 | } yield assertTrue(!succeed) 168 | }, 169 | test("create bucket already exist") { 170 | for { 171 | succeed <- createBucket(bucketName) 172 | .foldCause(_ => false, _ => true) 173 | } yield assertTrue(!succeed) 174 | }, 175 | test("delete bucket") { 176 | val bucketTmp = UUID.randomUUID().toString 177 | 178 | for { 179 | _ <- createBucket(bucketTmp) 180 | succeed <- deleteBucket(bucketTmp) 181 | } yield assert(succeed)(isUnit) 182 | }, 183 | test("delete bucket dont exist") { 184 | for { 185 | succeed <- deleteBucket(UUID.randomUUID().toString).foldCause(_ => false, _ => true) 186 | } yield assertTrue(!succeed) 187 | }, 188 | test("exists bucket") { 189 | for { 190 | succeed <- isBucketExists(bucketName) 191 | } yield assertTrue(succeed) 192 | 193 | }, 194 | test("exists bucket - invalid identifier") { 195 | for { 196 | succeed <- isBucketExists(UUID.randomUUID().toString) 197 | } yield assertTrue(!succeed) 198 | }, 199 | test("delete object - invalid identifier") { 200 | for { 201 | succeed <- deleteObject(bucketName, UUID.randomUUID().toString) 202 | } yield assert(succeed)(isUnit) 203 | }, 204 | test("get object") { 205 | for { 206 | content <- getObject(bucketName, "dir1/hello.txt") 207 | .via(ZPipeline.utf8Decode) 208 | .runCollect 209 | contentString = content.mkString 210 | } yield assertTrue( 211 | contentString == 212 | """|Hello ZIO s3 213 | |this is a beautiful day""".stripMargin 214 | ) 215 | }, 216 | test("get object - invalid identifier") { 217 | for { 218 | succeed <- getObject(bucketName, UUID.randomUUID().toString) 219 | .via(ZPipeline.utf8Decode) 220 | .runCollect 221 | .refineToOrDie[S3Exception] 222 | .fold(ex => ex.statusCode() == 404, _ => false) 223 | } yield assertTrue(succeed) 224 | }, 225 | test("get object metadata - invalid identifier") { 226 | for { 227 | succeed <- getObjectMetadata(bucketName, UUID.randomUUID().toString) 228 | .refineToOrDie[S3Exception] 229 | .fold(ex => ex.statusCode() == 404, _ => false) 230 | } yield assertTrue(succeed) 231 | }, 232 | test("get nextObjects") { 233 | for { 234 | token <- listObjects(bucketName, ListObjectOptions.fromMaxKeys(1)).map(_.nextContinuationToken) 235 | listing <- getNextObjects(S3ObjectListing.from(bucketName, token)) 236 | } yield assertTrue(listing.objectSummaries.nonEmpty) 237 | }, 238 | test("get nextObjects - invalid token") { 239 | for { 240 | succeed <- getNextObjects(S3ObjectListing.from(bucketName, Some(""))).foldCause(_ => false, _ => true) 241 | } yield assertTrue(!succeed) 242 | 243 | }, 244 | test("put object") { 245 | val bytes = Random.nextString(65536).getBytes() 246 | val c = Chunk.fromArray(bytes) 247 | val contentLength = c.length.toLong 248 | val data = ZStream.fromChunks(c).rechunk(5) 249 | val tmpKey = Random.alphanumeric.take(10).mkString 250 | 251 | for { 252 | _ <- putObject( 253 | bucketName, 254 | tmpKey, 255 | contentLength, 256 | data, 257 | UploadOptions.default 258 | ) 259 | objectContentLength <- getObjectMetadata(bucketName, tmpKey).map(_.contentLength) <* 260 | deleteObject(bucketName, tmpKey) 261 | } yield assertTrue(objectContentLength == contentLength) 262 | 263 | }, 264 | test("multipart object") { 265 | val text = 266 | """Lorem ipsum dolor sit amet, consectetur adipiscing elit. 267 | |Donec semper eros quis felis scelerisque, quis lobortis felis cursus. 268 | |Nulla vulputate arcu nec luctus lobortis. 269 | |Duis non est posuere, feugiat augue et, tincidunt magna. 270 | |Etiam tempor dolor at lorem volutpat, at efficitur purus sagittis. 271 | |Curabitur sed nibh nec libero viverra posuere. 272 | |Aenean ullamcorper tortor ac ligula rutrum, euismod pulvinar justo faucibus. 273 | |Mauris dictum ligula ut lacus pellentesque porta. 274 | |Etiam molestie dolor ac purus consectetur, eget pellentesque mauris bibendum. 275 | |Sed at purus volutpat, tempor elit id, maximus neque. 276 | |Quisque pellentesque velit sed lectus placerat cursus. 277 | |Vestibulum quis urna non nibh ornare elementum. 278 | |Aenean a massa feugiat, fringilla dui eget, ultrices velit. 279 | |Aliquam pellentesque felis eget mi tincidunt dapibus vel at turpis.""".stripMargin 280 | 281 | val data = ZStream.fromChunks(Chunk.fromArray(text.getBytes)) 282 | val tmpKey = Random.alphanumeric.take(10).mkString 283 | 284 | for { 285 | _ <- multipartUpload(bucketName, tmpKey, data)(1) 286 | contentLength <- getObjectMetadata(bucketName, tmpKey).map(_.contentLength) <* 287 | deleteObject(bucketName, tmpKey) 288 | } yield assertTrue(contentLength > 0L) 289 | }, 290 | test("multipart with parrallelism = 1") { 291 | val (dataLength, data) = randomNEStream() 292 | val tmpKey = Random.alphanumeric.take(10).mkString 293 | 294 | for { 295 | _ <- multipartUpload(bucketName, tmpKey, data)(1) 296 | contentLength <- getObjectMetadata(bucketName, tmpKey).map(_.contentLength) <* 297 | deleteObject(bucketName, tmpKey) 298 | } yield assertTrue(contentLength == dataLength.toLong) 299 | }, 300 | test("multipart with invalid parallelism value 0") { 301 | val data = ZStream.empty 302 | val tmpKey = Random.alphanumeric.take(10).mkString 303 | val io = multipartUpload(bucketName, tmpKey, data)(0) 304 | io.exit.map(assert(_)(dies(hasMessage(equalTo("parallelism must be > 0. 0 is invalid"))))) 305 | }, 306 | test("multipart with invalid partSize value 0") { 307 | val tmpKey = Random.alphanumeric.take(10).mkString 308 | val invalidOption = MultipartUploadOptions.fromPartSize(0) 309 | val io = multipartUpload(bucketName, tmpKey, ZStream.empty, invalidOption)(1) 310 | io.exit.map(assert(_)(dies(hasMessage(equalTo(s"Invalid part size 0.0 Mb, minimum size is 5 Mb"))))) 311 | }, 312 | test("multipart object when the content is empty") { 313 | val data = ZStream.empty 314 | val tmpKey = Random.alphanumeric.take(10).mkString 315 | 316 | for { 317 | _ <- multipartUpload(bucketName, tmpKey, data)(1) 318 | contentLength <- getObjectMetadata(bucketName, tmpKey).map(_.contentLength) <* 319 | deleteObject(bucketName, tmpKey) 320 | } yield assertTrue(contentLength == 0L) 321 | }, 322 | test("multipart object when the content type is not provided") { 323 | val (_, data) = randomNEStream() 324 | val tmpKey = Random.alphanumeric.take(10).mkString 325 | 326 | for { 327 | _ <- multipartUpload(bucketName, tmpKey, data)(4) 328 | contentType <- getObjectMetadata(bucketName, tmpKey).map(_.contentType) <* 329 | deleteObject(bucketName, tmpKey) 330 | } yield assertTrue(contentType == "binary/octet-stream") 331 | }, 332 | test("multipart object when there is a content type and metadata") { 333 | val metadata = Map("key1" -> "value1") 334 | val (_, data) = randomNEStream() 335 | val tmpKey = Random.alphanumeric.take(10).mkString 336 | 337 | for { 338 | _ <- multipartUpload( 339 | bucketName, 340 | tmpKey, 341 | data, 342 | MultipartUploadOptions.fromUploadOptions( 343 | UploadOptions(metadata, ObjectCannedACL.PRIVATE, Some("application/json")) 344 | ) 345 | )(4) 346 | objectMetadata <- getObjectMetadata(bucketName, tmpKey) <* deleteObject(bucketName, tmpKey) 347 | } yield assertTrue(objectMetadata.contentType == "application/json") && 348 | assertTrue(objectMetadata.metadata.map { case (k, v) => k.toLowerCase -> v } == Map("key1" -> "value1")) 349 | }, 350 | test("multipart object when the chunk size and parallelism are customized") { 351 | val (dataSize, data) = randomNEStream() 352 | val tmpKey = Random.alphanumeric.take(10).mkString 353 | 354 | for { 355 | _ <- multipartUpload(bucketName, tmpKey, data, MultipartUploadOptions.fromPartSize(10 * PartSize.Mega))(4) 356 | contentLength <- getObjectMetadata(bucketName, tmpKey).map(_.contentLength) <* 357 | deleteObject(bucketName, tmpKey) 358 | } yield assertTrue(contentLength == dataSize.toLong) 359 | }, 360 | test("stream lines") { 361 | 362 | for { 363 | list <- streamLines(bucketName, "dir1/user.csv").runCollect 364 | } yield assertTrue(list.headOption.get == "John,Doe,120 jefferson st.,Riverside, NJ, 08075") && 365 | assertTrue(list.lastOption.get == "Marie,White,20 time square,Bronx, NY,08220") 366 | }, 367 | test("stream lines - invalid key") { 368 | for { 369 | succeed <- streamLines(bucketName, "blah").runCollect.fold(_ => false, _ => true) 370 | } yield assertTrue(!succeed) 371 | }, 372 | test("put object when the content type is not provided") { 373 | val (dataSize, data) = randomNEStream() 374 | val tmpKey = Random.alphanumeric.take(10).mkString 375 | 376 | for { 377 | _ <- putObject(bucketName, tmpKey, dataSize.toLong, data) 378 | contentLength <- getObjectMetadata(bucketName, tmpKey).map(_.contentLength) <* 379 | deleteObject(bucketName, tmpKey) 380 | } yield assertTrue(dataSize.toLong == contentLength) 381 | }, 382 | test("put object when there is a content type and metadata") { 383 | val _metadata = Map("key1" -> "value1") 384 | val (dataSize, data) = randomNEStream() 385 | val tmpKey = Random.alphanumeric.take(10).mkString 386 | 387 | for { 388 | _ <- putObject( 389 | bucketName, 390 | tmpKey, 391 | dataSize.toLong, 392 | data, 393 | UploadOptions.from(_metadata, "application/json") 394 | ) 395 | objectMetadata <- getObjectMetadata(bucketName, tmpKey) <* deleteObject(bucketName, tmpKey) 396 | } yield assertTrue(objectMetadata.contentType == "application/json") && 397 | assertTrue(objectMetadata.metadata.map { case (k, v) => k.toLowerCase -> v } == Map("key1" -> "value1")) 398 | }, 399 | test("put object when there is a contentMD5 option, content type and metadata") { 400 | val bytes = Random.nextString(65536).getBytes() 401 | val _metadata = Map("key1" -> "value1") 402 | val md5Base64 = Md5Utils.md5AsBase64(bytes) 403 | val c = Chunk.fromArray(bytes) 404 | val contentLength = c.length.toLong 405 | val data = ZStream.fromChunks(c).rechunk(5) 406 | val tmpKey = Random.alphanumeric.take(10).mkString 407 | 408 | for { 409 | _ <- putObject( 410 | bucketName, 411 | tmpKey, 412 | contentLength, 413 | data, 414 | UploadOptions.from(_metadata, "application/json"), 415 | Some(md5Base64) 416 | ) 417 | metadata <- getObjectMetadata(bucketName, tmpKey) <* 418 | deleteObject(bucketName, tmpKey) 419 | actualMD5 = BinaryUtils.toBase64(BinaryUtils.fromHex(metadata.eTag)) 420 | } yield assertTrue( 421 | metadata.contentLength == contentLength, 422 | actualMD5 == md5Base64, 423 | metadata.contentType == "application/json", 424 | metadata.metadata.map { case (k, v) => k.toLowerCase -> v } == Map("key1" -> "value1") 425 | ) 426 | 427 | } 428 | ) @@ sequential 429 | 430 | } 431 | -------------------------------------------------------------------------------- /sbt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # A more capable sbt runner, coincidentally also called sbt. 4 | # Author: Paul Phillips 5 | # https://github.com/paulp/sbt-extras 6 | 7 | set -o pipefail 8 | 9 | declare -r sbt_release_version="1.3.4" 10 | declare -r sbt_unreleased_version="1.3.4" 11 | 12 | declare -r latest_213="2.13.1" 13 | declare -r latest_212="2.12.10" 14 | declare -r latest_211="2.11.12" 15 | declare -r latest_210="2.10.7" 16 | declare -r latest_29="2.9.3" 17 | declare -r latest_28="2.8.2" 18 | 19 | declare -r buildProps="project/build.properties" 20 | 21 | declare -r sbt_launch_ivy_release_repo="https://repo.typesafe.com/typesafe/ivy-releases" 22 | declare -r sbt_launch_ivy_snapshot_repo="https://repo.scala-sbt.org/scalasbt/ivy-snapshots" 23 | declare -r sbt_launch_mvn_release_repo="https://repo.scala-sbt.org/scalasbt/maven-releases" 24 | declare -r sbt_launch_mvn_snapshot_repo="https://repo.scala-sbt.org/scalasbt/maven-snapshots" 25 | 26 | declare -r default_jvm_opts_common="-Xms512m -Xss2m -XX:MaxInlineLevel=18" 27 | declare -r noshare_opts="-Dsbt.global.base=project/.sbtboot -Dsbt.boot.directory=project/.boot -Dsbt.ivy.home=project/.ivy" 28 | 29 | declare sbt_jar sbt_dir sbt_create sbt_version sbt_script sbt_new 30 | declare sbt_explicit_version 31 | declare verbose noshare batch trace_level 32 | 33 | declare java_cmd="java" 34 | declare sbt_launch_dir="$HOME/.sbt/launchers" 35 | declare sbt_launch_repo 36 | 37 | # pull -J and -D options to give to java. 38 | declare -a java_args scalac_args sbt_commands residual_args 39 | 40 | # args to jvm/sbt via files or environment variables 41 | declare -a extra_jvm_opts extra_sbt_opts 42 | 43 | echoerr() { echo >&2 "$@"; } 44 | vlog() { [[ -n "$verbose" ]] && echoerr "$@"; } 45 | die() { 46 | echo "Aborting: $*" 47 | exit 1 48 | } 49 | 50 | setTrapExit() { 51 | # save stty and trap exit, to ensure echo is re-enabled if we are interrupted. 52 | SBT_STTY="$(stty -g 2>/dev/null)" 53 | export SBT_STTY 54 | 55 | # restore stty settings (echo in particular) 56 | onSbtRunnerExit() { 57 | [ -t 0 ] || return 58 | vlog "" 59 | vlog "restoring stty: $SBT_STTY" 60 | stty "$SBT_STTY" 61 | } 62 | 63 | vlog "saving stty: $SBT_STTY" 64 | trap onSbtRunnerExit EXIT 65 | } 66 | 67 | # this seems to cover the bases on OSX, and someone will 68 | # have to tell me about the others. 69 | get_script_path() { 70 | local path="$1" 71 | [[ -L "$path" ]] || { 72 | echo "$path" 73 | return 74 | } 75 | 76 | local -r target="$(readlink "$path")" 77 | if [[ "${target:0:1}" == "/" ]]; then 78 | echo "$target" 79 | else 80 | echo "${path%/*}/$target" 81 | fi 82 | } 83 | 84 | script_path="$(get_script_path "${BASH_SOURCE[0]}")" 85 | declare -r script_path 86 | script_name="${script_path##*/}" 87 | declare -r script_name 88 | 89 | init_default_option_file() { 90 | local overriding_var="${!1}" 91 | local default_file="$2" 92 | if [[ ! -r "$default_file" && "$overriding_var" =~ ^@(.*)$ ]]; then 93 | local envvar_file="${BASH_REMATCH[1]}" 94 | if [[ -r "$envvar_file" ]]; then 95 | default_file="$envvar_file" 96 | fi 97 | fi 98 | echo "$default_file" 99 | } 100 | 101 | sbt_opts_file="$(init_default_option_file SBT_OPTS .sbtopts)" 102 | jvm_opts_file="$(init_default_option_file JVM_OPTS .jvmopts)" 103 | 104 | build_props_sbt() { 105 | [[ -r "$buildProps" ]] && 106 | grep '^sbt\.version' "$buildProps" | tr '=\r' ' ' | awk '{ print $2; }' 107 | } 108 | 109 | set_sbt_version() { 110 | sbt_version="${sbt_explicit_version:-$(build_props_sbt)}" 111 | [[ -n "$sbt_version" ]] || sbt_version=$sbt_release_version 112 | export sbt_version 113 | } 114 | 115 | url_base() { 116 | local version="$1" 117 | 118 | case "$version" in 119 | 0.7.*) echo "https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/simple-build-tool" ;; 120 | 0.10.*) echo "$sbt_launch_ivy_release_repo" ;; 121 | 0.11.[12]) echo "$sbt_launch_ivy_release_repo" ;; 122 | 0.*-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]) # ie "*-yyyymmdd-hhMMss" 123 | echo "$sbt_launch_ivy_snapshot_repo" ;; 124 | 0.*) echo "$sbt_launch_ivy_release_repo" ;; 125 | *-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]T[0-9][0-9][0-9][0-9][0-9][0-9]) # ie "*-yyyymmddThhMMss" 126 | echo "$sbt_launch_mvn_snapshot_repo" ;; 127 | *) echo "$sbt_launch_mvn_release_repo" ;; 128 | esac 129 | } 130 | 131 | make_url() { 132 | local version="$1" 133 | 134 | local base="${sbt_launch_repo:-$(url_base "$version")}" 135 | 136 | case "$version" in 137 | 0.7.*) echo "$base/sbt-launch-0.7.7.jar" ;; 138 | 0.10.*) echo "$base/org.scala-tools.sbt/sbt-launch/$version/sbt-launch.jar" ;; 139 | 0.11.[12]) echo "$base/org.scala-tools.sbt/sbt-launch/$version/sbt-launch.jar" ;; 140 | 0.*) echo "$base/org.scala-sbt/sbt-launch/$version/sbt-launch.jar" ;; 141 | *) echo "$base/org/scala-sbt/sbt-launch/$version/sbt-launch-${version}.jar" ;; 142 | esac 143 | } 144 | 145 | addJava() { 146 | vlog "[addJava] arg = '$1'" 147 | java_args+=("$1") 148 | } 149 | addSbt() { 150 | vlog "[addSbt] arg = '$1'" 151 | sbt_commands+=("$1") 152 | } 153 | addScalac() { 154 | vlog "[addScalac] arg = '$1'" 155 | scalac_args+=("$1") 156 | } 157 | addResidual() { 158 | vlog "[residual] arg = '$1'" 159 | residual_args+=("$1") 160 | } 161 | 162 | addResolver() { addSbt "set resolvers += $1"; } 163 | 164 | addDebugger() { addJava "-Xdebug" && addJava "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=$1"; } 165 | 166 | setThisBuild() { 167 | vlog "[addBuild] args = '$*'" 168 | local key="$1" && shift 169 | addSbt "set $key in ThisBuild := $*" 170 | } 171 | setScalaVersion() { 172 | [[ "$1" == *"-SNAPSHOT" ]] && addResolver 'Resolver.sonatypeRepo("snapshots")' 173 | addSbt "++ $1" 174 | } 175 | setJavaHome() { 176 | java_cmd="$1/bin/java" 177 | setThisBuild javaHome "_root_.scala.Some(file(\"$1\"))" 178 | export JAVA_HOME="$1" 179 | export JDK_HOME="$1" 180 | export PATH="$JAVA_HOME/bin:$PATH" 181 | } 182 | 183 | getJavaVersion() { 184 | local -r str=$("$1" -version 2>&1 | grep -E -e '(java|openjdk) version' | awk '{ print $3 }' | tr -d '"') 185 | 186 | # java -version on java8 says 1.8.x 187 | # but on 9 and 10 it's 9.x.y and 10.x.y. 188 | if [[ "$str" =~ ^1\.([0-9]+)(\..*)?$ ]]; then 189 | echo "${BASH_REMATCH[1]}" 190 | elif [[ "$str" =~ ^([0-9]+)(\..*)?$ ]]; then 191 | echo "${BASH_REMATCH[1]}" 192 | elif [[ -n "$str" ]]; then 193 | echoerr "Can't parse java version from: $str" 194 | fi 195 | } 196 | 197 | checkJava() { 198 | # Warn if there is a Java version mismatch between PATH and JAVA_HOME/JDK_HOME 199 | 200 | [[ -n "$JAVA_HOME" && -e "$JAVA_HOME/bin/java" ]] && java="$JAVA_HOME/bin/java" 201 | [[ -n "$JDK_HOME" && -e "$JDK_HOME/lib/tools.jar" ]] && java="$JDK_HOME/bin/java" 202 | 203 | if [[ -n "$java" ]]; then 204 | pathJavaVersion=$(getJavaVersion java) 205 | homeJavaVersion=$(getJavaVersion "$java") 206 | if [[ "$pathJavaVersion" != "$homeJavaVersion" ]]; then 207 | echoerr "Warning: Java version mismatch between PATH and JAVA_HOME/JDK_HOME, sbt will use the one in PATH" 208 | echoerr " Either: fix your PATH, remove JAVA_HOME/JDK_HOME or use -java-home" 209 | echoerr " java version from PATH: $pathJavaVersion" 210 | echoerr " java version from JAVA_HOME/JDK_HOME: $homeJavaVersion" 211 | fi 212 | fi 213 | } 214 | 215 | java_version() { 216 | local -r version=$(getJavaVersion "$java_cmd") 217 | vlog "Detected Java version: $version" 218 | echo "$version" 219 | } 220 | 221 | # MaxPermSize critical on pre-8 JVMs but incurs noisy warning on 8+ 222 | default_jvm_opts() { 223 | local -r v="$(java_version)" 224 | if [[ $v -ge 10 ]]; then 225 | echo "$default_jvm_opts_common -XX:+UnlockExperimentalVMOptions -XX:+UseJVMCICompiler" 226 | elif [[ $v -ge 8 ]]; then 227 | echo "$default_jvm_opts_common" 228 | else 229 | echo "-XX:MaxPermSize=384m $default_jvm_opts_common" 230 | fi 231 | } 232 | 233 | build_props_scala() { 234 | if [[ -r "$buildProps" ]]; then 235 | versionLine="$(grep '^build.scala.versions' "$buildProps")" 236 | versionString="${versionLine##build.scala.versions=}" 237 | echo "${versionString%% .*}" 238 | fi 239 | } 240 | 241 | execRunner() { 242 | # print the arguments one to a line, quoting any containing spaces 243 | vlog "# Executing command line:" && { 244 | for arg; do 245 | if [[ -n "$arg" ]]; then 246 | if printf "%s\n" "$arg" | grep -q ' '; then 247 | printf >&2 "\"%s\"\n" "$arg" 248 | else 249 | printf >&2 "%s\n" "$arg" 250 | fi 251 | fi 252 | done 253 | vlog "" 254 | } 255 | 256 | setTrapExit 257 | 258 | if [[ -n "$batch" ]]; then 259 | "$@" /dev/null 2>&1; then 281 | curl --fail --silent --location "$url" --output "$jar" 282 | elif command -v wget >/dev/null 2>&1; then 283 | wget -q -O "$jar" "$url" 284 | fi 285 | } && [[ -r "$jar" ]] 286 | } 287 | 288 | acquire_sbt_jar() { 289 | { 290 | sbt_jar="$(jar_file "$sbt_version")" 291 | [[ -r "$sbt_jar" ]] 292 | } || { 293 | sbt_jar="$HOME/.ivy2/local/org.scala-sbt/sbt-launch/$sbt_version/jars/sbt-launch.jar" 294 | [[ -r "$sbt_jar" ]] 295 | } || { 296 | sbt_jar="$(jar_file "$sbt_version")" 297 | jar_url="$(make_url "$sbt_version")" 298 | 299 | echoerr "Downloading sbt launcher for ${sbt_version}:" 300 | echoerr " From ${jar_url}" 301 | echoerr " To ${sbt_jar}" 302 | 303 | download_url "${jar_url}" "${sbt_jar}" 304 | 305 | case "${sbt_version}" in 306 | 0.*) 307 | vlog "SBT versions < 1.0 do not have published MD5 checksums, skipping check" 308 | echo "" 309 | ;; 310 | *) verify_sbt_jar "${sbt_jar}" ;; 311 | esac 312 | } 313 | } 314 | 315 | verify_sbt_jar() { 316 | local jar="${1}" 317 | local md5="${jar}.md5" 318 | md5url="$(make_url "${sbt_version}").md5" 319 | 320 | echoerr "Downloading sbt launcher ${sbt_version} md5 hash:" 321 | echoerr " From ${md5url}" 322 | echoerr " To ${md5}" 323 | 324 | download_url "${md5url}" "${md5}" >/dev/null 2>&1 325 | 326 | if command -v md5sum >/dev/null 2>&1; then 327 | if echo "$(cat "${md5}") ${jar}" | md5sum -c -; then 328 | rm -rf "${md5}" 329 | return 0 330 | else 331 | echoerr "Checksum does not match" 332 | return 1 333 | fi 334 | elif command -v md5 >/dev/null 2>&1; then 335 | if [ "$(md5 -q "${jar}")" == "$(cat "${md5}")" ]; then 336 | rm -rf "${md5}" 337 | return 0 338 | else 339 | echoerr "Checksum does not match" 340 | return 1 341 | fi 342 | elif command -v openssl >/dev/null 2>&1; then 343 | if [ "$(openssl md5 -r "${jar}" | awk '{print $1}')" == "$(cat "${md5}")" ]; then 344 | rm -rf "${md5}" 345 | return 0 346 | else 347 | echoerr "Checksum does not match" 348 | return 1 349 | fi 350 | else 351 | echoerr "Could not find an MD5 command" 352 | return 1 353 | fi 354 | } 355 | 356 | usage() { 357 | set_sbt_version 358 | cat < display stack traces with a max of frames (default: -1, traces suppressed) 371 | -debug-inc enable debugging log for the incremental compiler 372 | -no-colors disable ANSI color codes 373 | -sbt-create start sbt even if current directory contains no sbt project 374 | -sbt-dir path to global settings/plugins directory (default: ~/.sbt/) 375 | -sbt-boot path to shared boot directory (default: ~/.sbt/boot in 0.11+) 376 | -ivy path to local Ivy repository (default: ~/.ivy2) 377 | -no-share use all local caches; no sharing 378 | -offline put sbt in offline mode 379 | -jvm-debug Turn on JVM debugging, open at the given port. 380 | -batch Disable interactive mode 381 | -prompt Set the sbt prompt; in expr, 's' is the State and 'e' is Extracted 382 | -script Run the specified file as a scala script 383 | 384 | # sbt version (default: sbt.version from $buildProps if present, otherwise $sbt_release_version) 385 | -sbt-version use the specified version of sbt (default: $sbt_release_version) 386 | -sbt-force-latest force the use of the latest release of sbt: $sbt_release_version 387 | -sbt-dev use the latest pre-release version of sbt: $sbt_unreleased_version 388 | -sbt-jar use the specified jar as the sbt launcher 389 | -sbt-launch-dir directory to hold sbt launchers (default: $sbt_launch_dir) 390 | -sbt-launch-repo repo url for downloading sbt launcher jar (default: $(url_base "$sbt_version")) 391 | 392 | # scala version (default: as chosen by sbt) 393 | -28 use $latest_28 394 | -29 use $latest_29 395 | -210 use $latest_210 396 | -211 use $latest_211 397 | -212 use $latest_212 398 | -213 use $latest_213 399 | -scala-home use the scala build at the specified directory 400 | -scala-version use the specified version of scala 401 | -binary-version use the specified scala version when searching for dependencies 402 | 403 | # java version (default: java from PATH, currently $(java -version 2>&1 | grep version)) 404 | -java-home alternate JAVA_HOME 405 | 406 | # passing options to the jvm - note it does NOT use JAVA_OPTS due to pollution 407 | # The default set is used if JVM_OPTS is unset and no -jvm-opts file is found 408 | $(default_jvm_opts) 409 | JVM_OPTS environment variable holding either the jvm args directly, or 410 | the reference to a file containing jvm args if given path is prepended by '@' (e.g. '@/etc/jvmopts') 411 | Note: "@"-file is overridden by local '.jvmopts' or '-jvm-opts' argument. 412 | -jvm-opts file containing jvm args (if not given, .jvmopts in project root is used if present) 413 | -Dkey=val pass -Dkey=val directly to the jvm 414 | -J-X pass option -X directly to the jvm (-J is stripped) 415 | 416 | # passing options to sbt, OR to this runner 417 | SBT_OPTS environment variable holding either the sbt args directly, or 418 | the reference to a file containing sbt args if given path is prepended by '@' (e.g. '@/etc/sbtopts') 419 | Note: "@"-file is overridden by local '.sbtopts' or '-sbt-opts' argument. 420 | -sbt-opts file containing sbt args (if not given, .sbtopts in project root is used if present) 421 | -S-X add -X to sbt's scalacOptions (-S is stripped) 422 | EOM 423 | exit 0 424 | } 425 | 426 | process_args() { 427 | require_arg() { 428 | local type="$1" 429 | local opt="$2" 430 | local arg="$3" 431 | 432 | if [[ -z "$arg" ]] || [[ "${arg:0:1}" == "-" ]]; then 433 | die "$opt requires <$type> argument" 434 | fi 435 | } 436 | while [[ $# -gt 0 ]]; do 437 | case "$1" in 438 | -h | -help) usage ;; 439 | -v) verbose=true && shift ;; 440 | -d) addSbt "--debug" && shift ;; 441 | -w) addSbt "--warn" && shift ;; 442 | -q) addSbt "--error" && shift ;; 443 | -x) shift ;; # currently unused 444 | -trace) require_arg integer "$1" "$2" && trace_level="$2" && shift 2 ;; 445 | -debug-inc) addJava "-Dxsbt.inc.debug=true" && shift ;; 446 | 447 | -no-colors) addJava "-Dsbt.log.noformat=true" && shift ;; 448 | -sbt-create) sbt_create=true && shift ;; 449 | -sbt-dir) require_arg path "$1" "$2" && sbt_dir="$2" && shift 2 ;; 450 | -sbt-boot) require_arg path "$1" "$2" && addJava "-Dsbt.boot.directory=$2" && shift 2 ;; 451 | -ivy) require_arg path "$1" "$2" && addJava "-Dsbt.ivy.home=$2" && shift 2 ;; 452 | -no-share) noshare=true && shift ;; 453 | -offline) addSbt "set offline in Global := true" && shift ;; 454 | -jvm-debug) require_arg port "$1" "$2" && addDebugger "$2" && shift 2 ;; 455 | -batch) batch=true && shift ;; 456 | -prompt) require_arg "expr" "$1" "$2" && setThisBuild shellPrompt "(s => { val e = Project.extract(s) ; $2 })" && shift 2 ;; 457 | -script) require_arg file "$1" "$2" && sbt_script="$2" && addJava "-Dsbt.main.class=sbt.ScriptMain" && shift 2 ;; 458 | 459 | -sbt-version) require_arg version "$1" "$2" && sbt_explicit_version="$2" && shift 2 ;; 460 | -sbt-force-latest) sbt_explicit_version="$sbt_release_version" && shift ;; 461 | -sbt-dev) sbt_explicit_version="$sbt_unreleased_version" && shift ;; 462 | -sbt-jar) require_arg path "$1" "$2" && sbt_jar="$2" && shift 2 ;; 463 | -sbt-launch-dir) require_arg path "$1" "$2" && sbt_launch_dir="$2" && shift 2 ;; 464 | -sbt-launch-repo) require_arg path "$1" "$2" && sbt_launch_repo="$2" && shift 2 ;; 465 | 466 | -28) setScalaVersion "$latest_28" && shift ;; 467 | -29) setScalaVersion "$latest_29" && shift ;; 468 | -210) setScalaVersion "$latest_210" && shift ;; 469 | -211) setScalaVersion "$latest_211" && shift ;; 470 | -212) setScalaVersion "$latest_212" && shift ;; 471 | -213) setScalaVersion "$latest_213" && shift ;; 472 | 473 | -scala-version) require_arg version "$1" "$2" && setScalaVersion "$2" && shift 2 ;; 474 | -binary-version) require_arg version "$1" "$2" && setThisBuild scalaBinaryVersion "\"$2\"" && shift 2 ;; 475 | -scala-home) require_arg path "$1" "$2" && setThisBuild scalaHome "_root_.scala.Some(file(\"$2\"))" && shift 2 ;; 476 | -java-home) require_arg path "$1" "$2" && setJavaHome "$2" && shift 2 ;; 477 | -sbt-opts) require_arg path "$1" "$2" && sbt_opts_file="$2" && shift 2 ;; 478 | -jvm-opts) require_arg path "$1" "$2" && jvm_opts_file="$2" && shift 2 ;; 479 | 480 | -D*) addJava "$1" && shift ;; 481 | -J*) addJava "${1:2}" && shift ;; 482 | -S*) addScalac "${1:2}" && shift ;; 483 | 484 | new) sbt_new=true && : ${sbt_explicit_version:=$sbt_release_version} && addResidual "$1" && shift ;; 485 | 486 | *) addResidual "$1" && shift ;; 487 | esac 488 | done 489 | } 490 | 491 | # process the direct command line arguments 492 | process_args "$@" 493 | 494 | # skip #-styled comments and blank lines 495 | readConfigFile() { 496 | local end=false 497 | until $end; do 498 | read -r || end=true 499 | [[ $REPLY =~ ^# ]] || [[ -z $REPLY ]] || echo "$REPLY" 500 | done <"$1" 501 | } 502 | 503 | # if there are file/environment sbt_opts, process again so we 504 | # can supply args to this runner 505 | if [[ -r "$sbt_opts_file" ]]; then 506 | vlog "Using sbt options defined in file $sbt_opts_file" 507 | while read -r opt; do extra_sbt_opts+=("$opt"); done < <(readConfigFile "$sbt_opts_file") 508 | elif [[ -n "$SBT_OPTS" && ! ("$SBT_OPTS" =~ ^@.*) ]]; then 509 | vlog "Using sbt options defined in variable \$SBT_OPTS" 510 | IFS=" " read -r -a extra_sbt_opts <<<"$SBT_OPTS" 511 | else 512 | vlog "No extra sbt options have been defined" 513 | fi 514 | 515 | [[ -n "${extra_sbt_opts[*]}" ]] && process_args "${extra_sbt_opts[@]}" 516 | 517 | # reset "$@" to the residual args 518 | set -- "${residual_args[@]}" 519 | argumentCount=$# 520 | 521 | # set sbt version 522 | set_sbt_version 523 | 524 | checkJava 525 | 526 | # only exists in 0.12+ 527 | setTraceLevel() { 528 | case "$sbt_version" in 529 | "0.7."* | "0.10."* | "0.11."*) echoerr "Cannot set trace level in sbt version $sbt_version" ;; 530 | *) setThisBuild traceLevel "$trace_level" ;; 531 | esac 532 | } 533 | 534 | # set scalacOptions if we were given any -S opts 535 | [[ ${#scalac_args[@]} -eq 0 ]] || addSbt "set scalacOptions in ThisBuild += \"${scalac_args[*]}\"" 536 | 537 | [[ -n "$sbt_explicit_version" && -z "$sbt_new" ]] && addJava "-Dsbt.version=$sbt_explicit_version" 538 | vlog "Detected sbt version $sbt_version" 539 | 540 | if [[ -n "$sbt_script" ]]; then 541 | residual_args=("$sbt_script" "${residual_args[@]}") 542 | else 543 | # no args - alert them there's stuff in here 544 | ((argumentCount > 0)) || { 545 | vlog "Starting $script_name: invoke with -help for other options" 546 | residual_args=(shell) 547 | } 548 | fi 549 | 550 | # verify this is an sbt dir, -create was given or user attempts to run a scala script 551 | [[ -r ./build.sbt || -d ./project || -n "$sbt_create" || -n "$sbt_script" || -n "$sbt_new" ]] || { 552 | cat <