├── project
├── build.properties
└── plugins.sbt
├── src
├── test
│ ├── resources
│ │ ├── reference.conf
│ │ ├── test.conf
│ │ └── logback-test.xml
│ ├── scala
│ │ └── io
│ │ │ └── findify
│ │ │ └── s3mock
│ │ │ ├── awscli
│ │ │ ├── AWSCliTest.scala
│ │ │ ├── PutBucketTest.scala
│ │ │ └── GetObjectTest.scala
│ │ │ ├── TypesafeConfigTest.scala
│ │ │ ├── ListBucketEmptyWorkdirTest.scala
│ │ │ ├── PutBucketTest.scala
│ │ │ ├── ListBucketsTest.scala
│ │ │ ├── alpakka
│ │ │ ├── MultipartUploadTest.scala
│ │ │ ├── AlpakkaExample.scala
│ │ │ ├── ListBucketTest.scala
│ │ │ └── GetObjectTest.scala
│ │ │ ├── MapMetadataStoreTest.scala
│ │ │ ├── JavaExampleTest.scala
│ │ │ ├── ChunkBufferTest.scala
│ │ │ ├── S3ChunkedProtocolTest.scala
│ │ │ ├── transfermanager
│ │ │ └── PutGetTest.scala
│ │ │ ├── CopyObjectTest.scala
│ │ │ ├── GetPutObjectWithMetadataTest.scala
│ │ │ ├── DeleteTest.scala
│ │ │ ├── S3MockTest.scala
│ │ │ ├── MultipartUploadTest.scala
│ │ │ ├── GetPutObjectTest.scala
│ │ │ └── ListBucketTest.scala
│ └── java
│ │ └── io
│ │ └── findify
│ │ └── s3mock
│ │ └── example
│ │ ├── JavaExample.java
│ │ └── JavaBuilderExample.java
└── main
│ ├── resources
│ └── application.conf
│ └── scala
│ └── io
│ └── findify
│ └── s3mock
│ ├── response
│ ├── CreateBucket.scala
│ ├── CopyObjectResult.scala
│ ├── InitiateMultipartUploadResult.scala
│ ├── CompleteMultipartUploadResult.scala
│ ├── DeleteObjectsResponse.scala
│ ├── ListAllMyBuckets.scala
│ └── ListBucket.scala
│ ├── error
│ ├── InternalErrorException.scala
│ ├── NoSuchBucketException.scala
│ └── NoSuchKeyException.scala
│ ├── request
│ ├── DeleteObjectsRequest.scala
│ ├── CreateBucketConfiguration.scala
│ └── CompleteMultipartUploadPart.scala
│ ├── Main.scala
│ ├── provider
│ ├── metadata
│ │ ├── MetadataStore.scala
│ │ ├── InMemoryMetadataStore.scala
│ │ └── MapMetadataStore.scala
│ ├── Provider.scala
│ ├── FileProvider.scala
│ └── InMemoryProvider.scala
│ ├── route
│ ├── ListBuckets.scala
│ ├── CreateBucket.scala
│ ├── DeleteObject.scala
│ ├── DeleteBucket.scala
│ ├── PutObjectMultipartStart.scala
│ ├── ListBucket.scala
│ ├── DeleteObjects.scala
│ ├── PutObjectMultipartComplete.scala
│ ├── PutObjectMultipart.scala
│ ├── CopyObject.scala
│ ├── GetObject.scala
│ └── PutObject.scala
│ ├── S3ChunkedProtocolStage.scala
│ └── S3Mock.scala
├── .travis.yml
├── .gitignore
├── LICENCE.md
├── CHANGELOG.md
└── README.md
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version = 0.13.15
--------------------------------------------------------------------------------
/src/test/resources/reference.conf:
--------------------------------------------------------------------------------
1 | foo {
2 | default: 10
3 | }
--------------------------------------------------------------------------------
/src/test/resources/test.conf:
--------------------------------------------------------------------------------
1 | foo {
2 | testConfig: "test"
3 | }
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: scala
2 | scala:
3 | - 2.12.2
4 | jdk:
5 | - oraclejdk8
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | project/project
3 | project/target
4 | target
5 | release.sh
--------------------------------------------------------------------------------
/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 | akka.http.parsing.illegal-header-warnings = off
2 |
3 | akka.http.server.parsing.max-content-length = 512 M
4 | akka.http.client.parsing.max-content-length = 512 M
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/CreateBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | /**
4 | * Created by shutty on 8/10/16.
5 | */
6 | case class CreateBucket(name:String)
7 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/error/InternalErrorException.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.error
2 |
3 |
4 | case class InternalErrorException(throwable: Throwable) extends Exception(s"Internal server error", throwable) {
5 | def toXML =
6 |
7 | InternalError
8 | {throwable.getMessage}
9 |
10 | }
11 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/request/DeleteObjectsRequest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.request
2 |
3 | /**
4 | * Created by shutty on 3/13/17.
5 | */
6 |
7 | case class DeleteObjectsRequest(objects: Seq[String])
8 |
9 | object DeleteObjectsRequest {
10 | def apply(node: scala.xml.Node) = {
11 | val objs = (node \ "Object").map(_ \ "Key").map(_.text)
12 | new DeleteObjectsRequest(objs)
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/Main.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import better.files.File
4 | import io.findify.s3mock.provider.FileProvider
5 |
6 | /**
7 | * Created by shutty on 8/9/16.
8 | */
9 | object Main {
10 | def main(args: Array[String]): Unit = {
11 | val server = new S3Mock(8001, new FileProvider(File.newTemporaryDirectory(prefix = "s3mock").pathAsString))
12 | server.start
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/error/NoSuchBucketException.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.error
2 |
3 | /**
4 | * Created by shutty on 8/11/16.
5 | */
6 | case class NoSuchBucketException(bucket:String) extends Exception(s"bucket does not exist: s3://$bucket") {
7 | def toXML =
8 | NoSuchBucket
9 | The specified bucket does not exist
10 | {bucket}
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/request/CreateBucketConfiguration.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.request
2 |
3 | /**
4 | * Created by shutty on 8/10/16.
5 | */
6 | case class CreateBucketConfiguration(locationConstraint:Option[String])
7 |
8 | object CreateBucketConfiguration {
9 | def apply(xml:scala.xml.Node) = {
10 | val region = xml.find(_.label == "locationConstraint").map(_.text)
11 | new CreateBucketConfiguration(region)
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/CopyObjectResult.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import akka.http.scaladsl.model.DateTime
4 |
5 |
6 | /**
7 | * Created by shutty on 12/3/16.
8 | */
9 | case class CopyObjectResult(lastModified: DateTime, etag: String) {
10 | def toXML =
11 |
12 | {lastModified.toString}Z
13 | "{etag}"
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | logLevel := Level.Warn
2 |
3 | addSbtPlugin("com.timushev.sbt" % "sbt-updates" % "0.3.0")
4 | addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.8.2")
5 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "1.1")
6 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.0.1")
7 | addSbtPlugin("com.github.gseitz" % "sbt-release" % "1.0.4")
8 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.4.1")
9 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5")
10 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/error/NoSuchKeyException.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.error
2 |
3 | /**
4 | * Created by shutty on 8/11/16.
5 | */
6 | case class NoSuchKeyException(bucket:String, key:String) extends Exception(s"key does not exist: s3://$bucket/$key") {
7 | def toXML =
8 |
9 | NoSuchKey
10 | The resource you requested does not exist
11 | /{bucket}/{key}
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/awscli/AWSCliTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.awscli
2 |
3 | import akka.actor.ActorSystem
4 | import akka.http.scaladsl.Http
5 | import akka.stream.ActorMaterializer
6 | import io.findify.s3mock.S3MockTest
7 |
8 | /**
9 | * Created by shutty on 8/28/16.
10 | */
11 | trait AWSCliTest extends S3MockTest {
12 | implicit val system = ActorSystem.create("awscli")
13 | implicit val mat = ActorMaterializer()
14 | val http = Http(system)
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/metadata/MetadataStore.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider.metadata
2 |
3 | import com.amazonaws.services.s3.model.ObjectMetadata
4 |
5 | /**
6 | * Created by shutty on 3/13/17.
7 | */
8 | trait MetadataStore {
9 | def put(bucket: String, key: String, meta: ObjectMetadata): Unit
10 | def get(bucket: String, key: String): Option[ObjectMetadata]
11 | def delete(bucket: String, key: String): Unit
12 | def remove(bucket: String): Unit
13 | }
14 |
--------------------------------------------------------------------------------
/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS} %-5level %logger{36} - %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/TypesafeConfigTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | /*
6 | a repro for
7 | https://github.com/findify/s3mock/issues/56
8 | Not yet fixed :(
9 | */
10 |
11 | class TypesafeConfigTest extends S3MockTest {
12 | override def behaviour(fixture: => Fixture) = {
13 |
14 | it should "load typesafe config files" ignore {
15 | val conf = ConfigFactory.parseResources("/test.conf")
16 | conf.getString("foo.testConfig") shouldBe "test"
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/InitiateMultipartUploadResult.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import java.net.URLDecoder
4 |
5 | /**
6 | * Created by shutty on 8/10/16.
7 | */
8 | case class InitiateMultipartUploadResult(bucket:String, key:String, uploadId:String) {
9 | def toXML =
10 |
11 | {bucket}
12 | {/* the key is the still URLencoded path */URLDecoder.decode(key, "UTF-8") }
13 | {uploadId}
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/CompleteMultipartUploadResult.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import java.net.URLDecoder
4 |
5 | /**
6 | * Created by shutty on 8/10/16.
7 | */
8 | case class CompleteMultipartUploadResult(bucket:String, key:String, etag:String) {
9 | def toXML =
10 |
11 | http://s3.amazonaws.com/{bucket}/{key}
12 | {bucket}
13 | {/* the key is the still URLencoded path */URLDecoder.decode(key, "UTF-8") }
14 | "{etag}"
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/DeleteObjectsResponse.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | /**
4 | * Created by shutty on 3/13/17.
5 | */
6 | case class DeleteObjectsResponse(deleted: Seq[String], error: Seq[String]) {
7 | def toXML = {
8 |
9 | { deleted.map(d => {d}) }
10 | { if (error.nonEmpty) {
11 |
12 | { error.map(e => {
13 | {e}
14 | InternalError
15 | Cannot delete
16 | })}
17 |
18 | }}
19 |
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/ListBucketEmptyWorkdirTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import scala.collection.JavaConverters._
4 |
5 | /**
6 | * Created by shutty on 8/30/16.
7 | */
8 | class ListBucketEmptyWorkdirTest extends S3MockTest {
9 | override def behaviour(fixture: => Fixture) = {
10 | val s3 = fixture.client
11 | it should "list bucket with empty prefix" in {
12 | s3.createBucket("list")
13 | s3.putObject("list", "foo1", "xxx")
14 | s3.putObject("list", "foo2", "xxx")
15 | val list = s3.listObjects("list").getObjectSummaries.asScala.toList
16 | list.map(_.getKey).forall(_.startsWith("foo")) shouldBe true
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/ListBuckets.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model._
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.provider.Provider
7 |
8 | /**
9 | * Created by shutty on 8/19/16.
10 | */
11 | case class ListBuckets(implicit provider:Provider) extends LazyLogging {
12 | def route() = get {
13 | complete {
14 | logger.debug("listing all buckets")
15 | HttpResponse(
16 | StatusCodes.OK,
17 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), provider.listBuckets.toXML.toString)
18 | )
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/PutBucketTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 | import scala.collection.JavaConversions._
3 | /**
4 | * Created by shutty on 8/10/16.
5 | */
6 | class PutBucketTest extends S3MockTest {
7 | override def behaviour(fixture: => Fixture) = {
8 | val s3 = fixture.client
9 | it should "create buckets" in {
10 | s3.listBuckets().isEmpty shouldBe true
11 | s3.createBucket("hello").getName shouldBe "hello"
12 | s3.listBuckets().exists(_.getName == "hello") shouldBe true
13 | }
14 | it should "create buckets with region" in {
15 | s3.createBucket("hello2", "us-west-1")
16 | s3.listBuckets().exists(_.getName == "hello2") shouldBe true
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/request/CompleteMultipartUploadPart.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.request
2 |
3 | /**
4 | * Created by shutty on 8/10/16.
5 | */
6 | case class CompleteMultipartUploadPart(partNumber:Int, etag:String)
7 | case class CompleteMultipartUpload(parts:List[CompleteMultipartUploadPart])
8 |
9 | object CompleteMultipartUploadPart {
10 | def apply(node: scala.xml.Node) = new CompleteMultipartUploadPart(
11 | partNumber = (node \ "PartNumber").text.toInt,
12 | etag = (node \ "ETag").text
13 | )
14 | }
15 |
16 | object CompleteMultipartUpload {
17 | def apply(node:scala.xml.Node) = {
18 | val child = node \ "Part"
19 | new CompleteMultipartUpload(
20 | parts = child.map(n => CompleteMultipartUploadPart(n)).toList
21 | )
22 | }
23 | }
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/awscli/PutBucketTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.awscli
2 |
3 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
4 | import scala.concurrent.Await
5 | import scala.concurrent.duration._
6 | import scala.collection.JavaConversions._
7 |
8 | /**
9 | * Created by shutty on 8/28/16.
10 | */
11 | class PutBucketTest extends AWSCliTest {
12 | override def behaviour(fixture: => Fixture) = {
13 | val s3 = fixture.client
14 | val port = fixture.port
15 | it should "create bucket with AWS CLI" in {
16 | val response = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.PUT, uri = s"http://127.0.0.1:$port/awscli")), 10.seconds)
17 | s3.listBuckets().exists(_.getName == "awscli") shouldBe true
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/ListAllMyBuckets.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import akka.http.scaladsl.model.DateTime
4 |
5 |
6 | /**
7 | * Created by shutty on 8/9/16.
8 | */
9 | case class Bucket(name:String, creationDate:DateTime)
10 | case class ListAllMyBuckets(ownerName:String, ownerUUID:String, buckets:List[Bucket]) {
11 | def toXML =
12 |
13 |
14 | {ownerUUID}
15 | {ownerName}
16 |
17 |
18 | {
19 | buckets.map(bucket =>
20 |
21 | {bucket.name}
22 | {bucket.creationDate.toString}Z
23 | )
24 | }
25 |
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/ListBucketsTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 | /**
3 | * Created by shutty on 8/9/16.
4 | */
5 | import akka.http.scaladsl.Http
6 | import akka.http.scaladsl.model._
7 |
8 | import scala.collection.JavaConverters._
9 | import scala.concurrent.Await
10 | import scala.concurrent.duration._
11 | class ListBucketsTest extends S3MockTest {
12 | override def behaviour(fixture: => Fixture) = {
13 | val s3 = fixture.client
14 | it should "list empty buckets" in {
15 | s3.listBuckets().isEmpty shouldBe true
16 | }
17 |
18 | it should "have correct xml content-type for bucket list" in {
19 | implicit val sys = fixture.system
20 | implicit val mat = fixture.mat
21 | val response = Await.result(Http().singleRequest(HttpRequest(
22 | method = HttpMethods.GET,
23 | uri = Uri(s"http://localhost:${fixture.port}/")
24 | )), 5.seconds)
25 | response.entity.contentType shouldBe ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`)
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/alpakka/MultipartUploadTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.alpakka
2 |
3 | import akka.stream.scaladsl.Source
4 | import akka.util.ByteString
5 | import io.findify.s3mock.S3MockTest
6 |
7 | import scala.concurrent.Await
8 | import scala.concurrent.duration._
9 |
10 | /**
11 | * Created by shutty on 5/19/17.
12 | */
13 | class MultipartUploadTest extends S3MockTest {
14 |
15 | override def behaviour(fixture: => Fixture) = {
16 | val s3 = fixture.client
17 | implicit val sys = fixture.system
18 | implicit val mat = fixture.mat
19 |
20 |
21 | it should "upload multipart files" in {
22 | s3.createBucket("alpakka1")
23 |
24 | val result = Await.result(Source.single(ByteString("testcontent1"))
25 | .runWith(fixture.alpakka.multipartUpload("alpakka1", "test1")), 5.seconds)
26 |
27 | result.bucket shouldBe "alpakka1"
28 | result.key shouldBe "test1"
29 |
30 | getContent(s3.getObject("alpakka1", "test1")) shouldBe "testcontent1"
31 | }
32 |
33 |
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/alpakka/AlpakkaExample.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.alpakka
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.ActorMaterializer
5 | import akka.stream.alpakka.s3.scaladsl.S3Client
6 | import akka.stream.scaladsl.Sink
7 | import akka.util.ByteString
8 | import com.typesafe.config.ConfigFactory
9 |
10 | import scala.collection.JavaConverters._
11 |
12 | class AlpakkaExample {
13 | def main(args: Array[String]): Unit = {
14 | val config = ConfigFactory.parseMap(Map(
15 | "akka.stream.alpakka.s3.proxy.host" -> "localhost",
16 | "akka.stream.alpakka.s3.proxy.port" -> 8001,
17 | "akka.stream.alpakka.s3.proxy.secure" -> false,
18 | "akka.stream.alpakka.s3.path-style-access" -> true
19 | ).asJava)
20 | implicit val system = ActorSystem.create("test", config)
21 | implicit val mat = ActorMaterializer()
22 | import system.dispatcher
23 | val s3a = S3Client()
24 | val contents = s3a.download("bucket", "key").runWith(Sink.reduce[ByteString](_ ++ _)).map(_.utf8String)
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/CreateBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.headers.Location
4 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
5 | import akka.http.scaladsl.server.Directives._
6 | import com.typesafe.scalalogging.LazyLogging
7 | import io.findify.s3mock.provider.Provider
8 | import io.findify.s3mock.request.CreateBucketConfiguration
9 |
10 | /**
11 | * Created by shutty on 8/19/16.
12 | */
13 | case class CreateBucket(implicit provider:Provider) extends LazyLogging {
14 | def route(bucket:String) = put {
15 | entity(as[String]) { xml =>
16 | complete {
17 | logger.info(s"PUT bucket $bucket")
18 | val conf = if (xml.isEmpty) new CreateBucketConfiguration(None) else CreateBucketConfiguration(scala.xml.XML.loadString(xml).head)
19 | val result = provider.createBucket(bucket, conf)
20 | HttpResponse(StatusCodes.OK).withHeaders(Location(s"/${result.name}"))
21 | }
22 | } ~ {
23 | complete {
24 | "ok"
25 | }
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/LICENCE.md:
--------------------------------------------------------------------------------
1 | # The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Findify AB
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/metadata/InMemoryMetadataStore.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider.metadata
2 |
3 | import com.amazonaws.services.s3.model.ObjectMetadata
4 |
5 | import scala.collection.concurrent.TrieMap
6 | import scala.collection.mutable
7 |
8 | class InMemoryMetadataStore extends MetadataStore {
9 |
10 | private val bucketMetadata = new TrieMap[String, mutable.Map[String, ObjectMetadata]]
11 |
12 | override def put(bucket: String, key: String, meta: ObjectMetadata): Unit = {
13 | val currentBucketMetadata = bucketMetadata.getOrElseUpdate(bucket, new TrieMap[String, ObjectMetadata]())
14 | currentBucketMetadata.put(key, meta)
15 | }
16 |
17 | override def get(bucket: String, key: String): Option[ObjectMetadata] = {
18 | bucketMetadata.get(bucket).flatMap(_.get(key))
19 | }
20 |
21 | override def delete(bucket: String, key: String): Unit = {
22 | val currentBucketMetadata = bucketMetadata.get(bucket)
23 | currentBucketMetadata.flatMap(_.remove(key))
24 | }
25 |
26 | override def remove(bucket: String): Unit = bucketMetadata.remove(bucket)
27 | }
28 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/MapMetadataStoreTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.util
4 |
5 | import com.amazonaws.services.s3.model.ObjectMetadata
6 | import io.findify.s3mock.provider.metadata.{InMemoryMetadataStore, MapMetadataStore, MetadataStore}
7 | import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
8 | /**
9 | * Created by shutty on 3/13/17.
10 | */
11 | class MapMetadataStoreTest extends FlatSpec with Matchers with BeforeAndAfterAll {
12 |
13 | for (metadataStore <- List((new MapMetadataStore("/tmp/s3"), "MapMetadataStore"),
14 | (new InMemoryMetadataStore, "InMemoryMetadataStore"))) {
15 | metadataStore._2 should behave like mdStoreBehaviour(metadataStore._1)
16 | }
17 |
18 | def mdStoreBehaviour(mm: => MetadataStore) = {
19 | it should "save md to a fresh store" in {
20 | val meta = new ObjectMetadata()
21 | val user = new util.HashMap[String, String]()
22 | user.put("foo", "bar")
23 | meta.setUserMetadata(user)
24 | mm.put("foo", "bar", meta)
25 | val m2 = mm.get("foo", "bar").get
26 | m2.getUserMetadata shouldBe meta.getUserMetadata
27 | }
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/DeleteObject.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.NoSuchKeyException
7 | import io.findify.s3mock.provider.Provider
8 |
9 | import scala.util.{Failure, Success, Try}
10 |
11 | /**
12 | * Created by shutty on 8/20/16.
13 | */
14 | case class DeleteObject(implicit provider: Provider) extends LazyLogging {
15 | def route(bucket:String, path:String) = delete {
16 | complete {
17 | Try(provider.deleteObject(bucket, path)) match {
18 | case Success(_) =>
19 | logger.info(s"deleted object $bucket/$path")
20 | HttpResponse(StatusCodes.NoContent)
21 | case Failure(NoSuchKeyException(_, _)) =>
22 | logger.info(s"cannot delete object $bucket/$path: no such key")
23 | HttpResponse(StatusCodes.NotFound)
24 | case Failure(ex) =>
25 | logger.error(s"cannot delete object $bucket/$path", ex)
26 | HttpResponse(StatusCodes.NotFound)
27 | }
28 |
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/src/test/java/io/findify/s3mock/example/JavaExample.java:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.example;
2 |
3 | import com.amazonaws.auth.AWSStaticCredentialsProvider;
4 | import com.amazonaws.auth.AnonymousAWSCredentials;
5 | import com.amazonaws.client.builder.AwsClientBuilder;
6 | import com.amazonaws.services.s3.AmazonS3;
7 | import com.amazonaws.services.s3.AmazonS3Client;
8 | import com.amazonaws.services.s3.AmazonS3ClientBuilder;
9 | import io.findify.s3mock.S3Mock;
10 |
11 | /**
12 | * Created by shutty on 8/12/16.
13 | */
14 | public class JavaExample {
15 | public static void main(String[] args) {
16 | S3Mock api = S3Mock.create(8001, "/tmp/s3");
17 | api.start();
18 |
19 | AmazonS3 client = AmazonS3ClientBuilder
20 | .standard()
21 | .withPathStyleAccessEnabled(true)
22 | .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("http://localhost:8001", "us-east-1"))
23 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
24 | .build();
25 | client.createBucket("testbucket");
26 | client.putObject("testbucket", "file/name", "contents");
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/DeleteBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
7 | import io.findify.s3mock.provider.Provider
8 |
9 | import scala.util.{Failure, Success, Try}
10 |
11 | /**
12 | * Created by shutty on 8/19/16.
13 | */
14 | case class DeleteBucket(implicit provider:Provider) extends LazyLogging {
15 | def route(bucket:String) = delete {
16 | complete {
17 | Try(provider.deleteBucket(bucket)) match {
18 | case Success(_) =>
19 | logger.debug(s"DELETE bucket $bucket: ok")
20 | HttpResponse(StatusCodes.NoContent)
21 | case Failure(e: NoSuchBucketException) =>
22 | logger.error(s"DELETE bucket $bucket failed: no such bucket")
23 | HttpResponse(
24 | StatusCodes.NotFound,
25 | entity = e.toXML.toString()
26 | )
27 | case Failure(t) =>
28 | HttpResponse(
29 | StatusCodes.InternalServerError,
30 | entity = InternalErrorException(t).toXML.toString()
31 | )
32 | }
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/JavaExampleTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import com.amazonaws.auth.BasicAWSCredentials
4 | import com.amazonaws.services.s3.AmazonS3Client
5 | import scala.collection.JavaConversions._
6 | import scala.io.Source
7 |
8 | /**
9 | * Created by shutty on 8/19/16.
10 | */
11 | class JavaExampleTest extends S3MockTest {
12 | override def behaviour(fixture: => Fixture) = {
13 | val s3 = fixture.client
14 | val port = fixture.port
15 | it should "upload files with anonymous credentials" in {
16 | s3.createBucket("getput").getName shouldBe "getput"
17 | s3.listBuckets().exists(_.getName == "getput") shouldBe true
18 | s3.putObject("getput", "foo", "bar")
19 | val result = Source.fromInputStream(s3.getObject("getput", "foo").getObjectContent, "UTF-8").mkString
20 | result shouldBe "bar"
21 | }
22 |
23 | it should "upload files with basic credentials" in {
24 | val s3b = new AmazonS3Client(new BasicAWSCredentials("foo", "bar"))
25 | s3b.setEndpoint(s"http://127.0.0.1:$port")
26 | s3b.putObject("getput", "foo2", "bar2")
27 | val result = Source.fromInputStream(s3b.getObject("getput", "foo2").getObjectContent, "UTF-8").mkString
28 | result shouldBe "bar2"
29 |
30 | }
31 | }
32 | }
33 |
34 |
--------------------------------------------------------------------------------
/src/test/java/io/findify/s3mock/example/JavaBuilderExample.java:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.example;
2 |
3 | import com.amazonaws.auth.AWSStaticCredentialsProvider;
4 | import com.amazonaws.auth.AnonymousAWSCredentials;
5 | import com.amazonaws.client.builder.AwsClientBuilder;
6 | import com.amazonaws.services.s3.AmazonS3;
7 | import com.amazonaws.services.s3.AmazonS3Builder;
8 | import com.amazonaws.services.s3.AmazonS3Client;
9 | import com.amazonaws.services.s3.AmazonS3ClientBuilder;
10 | import io.findify.s3mock.S3Mock;
11 |
12 | /**
13 | * Created by shutty on 5/23/17.
14 | */
15 | public class JavaBuilderExample {
16 | public static void main(String[] args) {
17 | S3Mock api = new S3Mock.Builder().withPort(8001).withInMemoryBackend().build();
18 | api.start();
19 | AmazonS3 client = AmazonS3ClientBuilder
20 | .standard()
21 | .withPathStyleAccessEnabled(true)
22 | .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("http://localhost:8001", "us-east-1"))
23 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
24 | .build();
25 | client.createBucket("testbucket");
26 | client.putObject("testbucket", "file/name", "contents");
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/ListBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import akka.http.scaladsl.model.DateTime
4 |
5 |
6 | /**
7 | * Created by shutty on 8/9/16.
8 | */
9 | case class Content(key:String, lastModified:DateTime, md5:String, size:Long, storageClass:String)
10 | case class ListBucket(bucket:String, prefix: Option[String], delimiter: Option[String], commonPrefixes: List[String], contents:List[Content], isTruncated: Boolean) {
11 | def toXML =
12 |
13 | {bucket}
14 | { prefix.map(p => {p} ) }
15 | { delimiter.map(d => {d}) }
16 | { if (commonPrefixes.nonEmpty) {commonPrefixes.map(cp => {cp})} }
17 | {contents.length}
18 | 1000
19 | {isTruncated}
20 | {contents.map(content =>
21 |
22 | {content.key}
23 | {content.lastModified.toString}Z
24 | {content.md5}
25 | {content.size}
26 | {content.storageClass}
27 |
28 | )}
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/alpakka/ListBucketTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.alpakka
2 |
3 | import akka.stream.scaladsl.Sink
4 | import akka.util.ByteString
5 | import io.findify.s3mock.S3MockTest
6 | import scala.concurrent.duration._
7 | import scala.concurrent.Await
8 |
9 | class ListBucketTest extends S3MockTest {
10 | override def behaviour(fixture: => Fixture) = {
11 | val s3 = fixture.client
12 | implicit val sys = fixture.system
13 | implicit val mat = fixture.mat
14 |
15 |
16 | it should "list objects via alpakka" in {
17 | s3.createBucket("alpakkalist")
18 | s3.putObject("alpakkalist", "test1", "foobar")
19 | s3.putObject("alpakkalist", "test2", "foobar")
20 | s3.putObject("alpakkalist", "test3", "foobar")
21 | val result = Await.result(fixture.alpakka.listBucket("alpakkalist", None).runWith(Sink.seq), 5.second)
22 | result.size shouldBe 3
23 | result.map(_.key) shouldBe Seq("test1", "test2", "test3")
24 | }
25 |
26 | it should "list objects with prefix" in {
27 | s3.createBucket("alpakkalist2")
28 | s3.putObject("alpakkalist2", "test1", "foobar")
29 | s3.putObject("alpakkalist2", "test2", "foobar")
30 | s3.putObject("alpakkalist2", "xtest3", "foobar")
31 | val result = Await.result(fixture.alpakka.listBucket("alpakkalist2", Some("test")).runWith(Sink.seq), 5.second)
32 | result.size shouldBe 2
33 | result.map(_.key) shouldBe Seq("test1", "test2")
34 | }
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/ChunkBufferTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.util.ByteString
4 | import org.scalatest.{FlatSpec, Matchers}
5 |
6 | /**
7 | * Created by shutty on 8/11/16.
8 | */
9 | class ChunkBufferTest extends FlatSpec with Matchers {
10 | "chunk buffer" should "detect header" in {
11 | val cb = new ChunkBuffer()
12 | cb.addChunk(ByteString("3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfoo\r\n"))
13 | cb.readHeader shouldBe Some(Header(3, 84, "1234567890123456789012345678901234567890123456789012345678901234"))
14 | }
15 | it should "fail on non-complete header" in {
16 | val cb = new ChunkBuffer()
17 | cb.addChunk(ByteString("3;chunk-signature=123456789012345678901234567890123456789012345678901234567890"))
18 | cb.readHeader shouldBe None
19 | }
20 | it should "pull complete chunks" in {
21 | val cb = new ChunkBuffer()
22 | cb.addChunk(ByteString("3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfoo\r\n"))
23 | val header = cb.readHeader.get
24 | val chunk = cb.pullChunk(header)
25 | chunk shouldBe Some(ByteString("foo"))
26 | }
27 | it should "ignore incomplete chunks" in {
28 | val cb = new ChunkBuffer()
29 | cb.addChunk(ByteString("3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfo"))
30 | val header = cb.readHeader.get
31 | val chunk = cb.pullChunk(header)
32 | chunk shouldBe None
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/S3ChunkedProtocolTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.ActorMaterializer
5 | import akka.stream.scaladsl.{Sink, Source}
6 | import akka.util.ByteString
7 | import org.scalatest.{FlatSpec, Matchers}
8 | import scala.concurrent.duration._
9 | import scala.concurrent.Await
10 |
11 | /**
12 | * Created by shutty on 8/11/16.
13 | */
14 | class S3ChunkedProtocolTest extends FlatSpec with Matchers {
15 | implicit val system = ActorSystem.create("test")
16 | implicit val mat = ActorMaterializer()
17 |
18 | "s3 chunk protocol" should "work with simple ins" in {
19 | val in = "3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfoo\r\n3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nbar\r\n".grouped(10).map(ByteString(_)).toList
20 | val result = Await.result(Source(in).via(new S3ChunkedProtocolStage).map(_.utf8String).runWith(Sink.seq), 10.seconds)
21 | result.mkString shouldBe "foobar"
22 | }
23 | it should "not drop \\r\\n chars" in {
24 | val in = "5;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfoo\r\n\r\n3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nbar\r\n".grouped(10).map(ByteString(_)).toList
25 | val result = Await.result(Source(in).via(new S3ChunkedProtocolStage).map(_.utf8String).runWith(Sink.seq), 10.seconds)
26 | result.mkString shouldBe "foo\r\nbar"
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/transfermanager/PutGetTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.transfermanager
2 |
3 | import java.io.{ByteArrayInputStream, File, FileInputStream}
4 |
5 | import com.amazonaws.services.s3.model.ObjectMetadata
6 | import io.findify.s3mock.S3MockTest
7 |
8 | import scala.io.Source
9 |
10 | /**
11 | * Created by shutty on 11/23/16.
12 | */
13 | class PutGetTest extends S3MockTest {
14 | override def behaviour(fixture: => Fixture) = {
15 | val s3 = fixture.client
16 | val tm = fixture.tm
17 |
18 | it should "put files with TransferManager" in {
19 | s3.createBucket("tm1")
20 | val upload = tm.upload("tm1", "hello1", new ByteArrayInputStream("hello".getBytes), new ObjectMetadata())
21 | val result = upload.waitForUploadResult()
22 | result.getKey shouldBe "hello1"
23 | }
24 |
25 | it should "download files with TransferManager" in {
26 | val file = File.createTempFile("hello1", ".s3mock")
27 | val download = tm.download("tm1", "hello1", file)
28 | download.waitForCompletion()
29 | val result = Source.fromInputStream(new FileInputStream(file), "UTF-8").mkString
30 | result shouldBe "hello"
31 | }
32 |
33 | it should "copy file with TransferManager" in {
34 | val copy = tm.copy("tm1", "hello1", "tm1", "hello2")
35 | val result = copy.waitForCopyResult()
36 | result.getDestinationKey shouldBe "hello2"
37 | val hello2 = s3.getObject("tm1", "hello2")
38 | getContent(hello2) shouldBe "hello"
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/Provider.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider
2 |
3 | import com.amazonaws.services.s3.model.ObjectMetadata
4 | import io.findify.s3mock.provider.metadata.MetadataStore
5 | import io.findify.s3mock.request.{CompleteMultipartUpload, CreateBucketConfiguration}
6 | import io.findify.s3mock.response._
7 |
8 |
9 | case class GetObjectData(bytes: Array[Byte], metadata: Option[ObjectMetadata])
10 |
11 | /**
12 | * Interface for provider implementations.
13 | */
14 | trait Provider {
15 | def metadataStore: MetadataStore
16 | def listBuckets:ListAllMyBuckets
17 | def listBucket(bucket:String, prefix:Option[String], delimiter: Option[String], maxkeys: Option[Int]):ListBucket
18 | def createBucket(name:String, bucketConfig:CreateBucketConfiguration):CreateBucket
19 | def putObject(bucket:String, key:String, data:Array[Byte], metadata: ObjectMetadata):Unit
20 | def getObject(bucket:String, key:String): GetObjectData
21 | def putObjectMultipartStart(bucket:String, key:String):InitiateMultipartUploadResult
22 | def putObjectMultipartPart(bucket:String, key:String, partNumber:Int, uploadId:String, data:Array[Byte]):Unit
23 | def putObjectMultipartComplete(bucket:String, key:String, uploadId:String, request:CompleteMultipartUpload):CompleteMultipartUploadResult
24 | def deleteObject(bucket:String, key:String):Unit
25 | def deleteBucket(bucket:String):Unit
26 | def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult
27 | }
28 |
29 |
30 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/PutObjectMultipartStart.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.nio.charset.StandardCharsets
4 |
5 | import akka.http.scaladsl.model._
6 | import akka.http.scaladsl.server.Directives._
7 | import com.typesafe.scalalogging.LazyLogging
8 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
9 | import io.findify.s3mock.provider.Provider
10 |
11 | import scala.util.{Failure, Success, Try}
12 |
13 | /**
14 | * Created by shutty on 8/20/16.
15 | */
16 | case class PutObjectMultipartStart(implicit provider:Provider) extends LazyLogging {
17 | def route(bucket:String, path:String) = post {
18 | parameter('uploads) { mp =>
19 | complete {
20 | logger.info(s"multipart upload start to $bucket/$path")
21 | Try(provider.putObjectMultipartStart(bucket, path)) match {
22 | case Success(result) =>
23 | HttpResponse(
24 | StatusCodes.OK,
25 | entity = HttpEntity(
26 | ContentTypes.`application/octet-stream`, result.toXML.toString().getBytes(StandardCharsets.UTF_8)
27 | )
28 | )
29 | case Failure(e: NoSuchBucketException) =>
30 | HttpResponse(
31 | StatusCodes.NotFound,
32 | entity = e.toXML.toString()
33 | )
34 | case Failure(t) =>
35 | HttpResponse(
36 | StatusCodes.InternalServerError,
37 | entity = InternalErrorException(t).toXML.toString()
38 | )
39 | }
40 | }
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/alpakka/GetObjectTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.alpakka
2 |
3 | import akka.actor.ActorSystem
4 | import akka.http.scaladsl.model.headers.ByteRange
5 | import akka.stream.ActorMaterializer
6 | import akka.stream.alpakka.s3.auth.AWSCredentials
7 | import akka.stream.alpakka.s3.scaladsl.S3Client
8 | import akka.stream.scaladsl.Sink
9 | import akka.util.ByteString
10 | import com.typesafe.config.ConfigFactory
11 | import io.findify.s3mock.S3MockTest
12 |
13 | import scala.concurrent.duration._
14 | import scala.collection.JavaConverters._
15 | import scala.concurrent.Await
16 |
17 | /**
18 | * Created by shutty on 5/19/17.
19 | */
20 | class GetObjectTest extends S3MockTest {
21 |
22 | override def behaviour(fixture: => Fixture) = {
23 | val s3 = fixture.client
24 | implicit val sys = fixture.system
25 | implicit val mat = fixture.mat
26 |
27 |
28 | it should "get objects via alpakka" in {
29 | s3.createBucket("alpakka1")
30 | s3.putObject("alpakka1", "test1", "foobar")
31 | val result = Await.result(fixture.alpakka.download("alpakka1", "test1").runWith(Sink.seq), 5.second)
32 | val str = result.fold(ByteString(""))(_ ++ _).utf8String
33 | str shouldBe "foobar"
34 | }
35 |
36 | it should "get by range" in {
37 | s3.createBucket("alpakka2")
38 | s3.putObject("alpakka2", "test2", "foobar")
39 | val result = Await.result(fixture.alpakka.download("alpakka2", "test2", ByteRange(1, 4)).runWith(Sink.seq), 5.second)
40 | val str = result.fold(ByteString(""))(_ ++ _).utf8String
41 | str shouldBe "ooba"
42 | }
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/ListBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model._
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
7 | import io.findify.s3mock.provider.Provider
8 |
9 | import scala.util.{Failure, Success, Try}
10 |
11 | /**
12 | * Created by shutty on 8/19/16.
13 | */
14 | case class ListBucket(implicit provider:Provider) extends LazyLogging {
15 | def route(bucket:String) = get {
16 | parameter('prefix?, 'delimiter?, Symbol("max-keys")?) { (prefix, delimiter, maxkeys) =>
17 | complete {
18 | logger.info(s"listing bucket $bucket with prefix=$prefix, delimiter=$delimiter")
19 | Try(provider.listBucket(bucket, prefix, delimiter, maxkeys.map(_.toInt))) match {
20 | case Success(l) => HttpResponse(
21 | StatusCodes.OK,
22 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), l.toXML.toString)
23 | )
24 | case Failure(e: NoSuchBucketException) =>
25 | HttpResponse(
26 | StatusCodes.NotFound,
27 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), e.toXML.toString)
28 | )
29 | case Failure(t) =>
30 | HttpResponse(
31 | StatusCodes.InternalServerError,
32 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), InternalErrorException(t).toXML.toString)
33 | )
34 | }
35 | }
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/DeleteObjects.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
4 | import akka.http.scaladsl.server.Directives.{path, _}
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.NoSuchKeyException
7 | import io.findify.s3mock.provider.Provider
8 | import io.findify.s3mock.request.DeleteObjectsRequest
9 | import io.findify.s3mock.response.DeleteObjectsResponse
10 |
11 | import scala.util.{Failure, Success, Try}
12 |
13 | /**
14 | * Created by shutty on 3/13/17.
15 | */
16 | case class DeleteObjects (implicit provider: Provider) extends LazyLogging {
17 | def route(bucket:String) = post {
18 | parameter('delete) { d =>
19 | entity(as[String]) { xml => {
20 | complete {
21 | val request = DeleteObjectsRequest(scala.xml.XML.loadString(xml).head)
22 | val response = request.objects.foldLeft(DeleteObjectsResponse(Nil, Nil))((res, path) => {
23 | Try(provider.deleteObject(bucket, path)) match {
24 | case Success(_) =>
25 | logger.info(s"deleted object $bucket/$path")
26 | res.copy(deleted = path +: res.deleted)
27 | case Failure(NoSuchKeyException(_, _)) =>
28 | logger.info(s"cannot delete object $bucket/$path: no such key")
29 | res.copy(error = path +: res.error)
30 | case Failure(ex) =>
31 | logger.error(s"cannot delete object $bucket/$path", ex)
32 | res.copy(error = path +: res.error)
33 | }
34 | })
35 | val xmlResponse = response.toXML.toString()
36 | HttpResponse(StatusCodes.OK, entity = xmlResponse)
37 | }
38 | }}
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/PutObjectMultipartComplete.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model._
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
7 | import io.findify.s3mock.provider.Provider
8 | import io.findify.s3mock.request.CompleteMultipartUpload
9 |
10 | import scala.util.{Failure, Success, Try}
11 |
12 | /**
13 | * Created by shutty on 8/20/16.
14 | */
15 | case class PutObjectMultipartComplete(implicit provider:Provider) extends LazyLogging {
16 | def route(bucket:String, path:String) = post {
17 | parameter('uploadId) { uploadId =>
18 | entity(as[String]) { xml =>
19 | complete {
20 | logger.info(s"multipart upload completed for $bucket/$path, id = $uploadId")
21 | val request = CompleteMultipartUpload(scala.xml.XML.loadString(xml).head)
22 | Try(provider.putObjectMultipartComplete(bucket, path, uploadId, request)) match {
23 | case Success(response) =>
24 | HttpResponse(
25 | StatusCodes.OK,
26 | entity = HttpEntity(
27 | ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`),
28 | response.toXML.toString()
29 | )
30 | )
31 | case Failure(e: NoSuchBucketException) =>
32 | HttpResponse(
33 | StatusCodes.NotFound,
34 | entity = e.toXML.toString()
35 | )
36 | case Failure(t) =>
37 | HttpResponse(
38 | StatusCodes.InternalServerError,
39 | entity = InternalErrorException(t).toXML.toString()
40 | )
41 | }
42 | }
43 | }
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | 0.2.4
2 | =======
3 | * pom -> jar dependency type doc fix
4 | * support alpakka multipart uploads
5 | * support alpakka listObjects ([#66](https://github.com/findify/s3mock/issues/66))
6 | * fix bug with etag on FileProvider being alsays "0" ([#70](https://github.com/findify/s3mock/issues/70))
7 | * fix last-modified header always being equal to "1970-01-01 00:00:00"([65](https://github.com/findify/s3mock/issues/70))
8 | * wrong content-type for listObjects ([#60](https://github.com/findify/s3mock/issues/60))
9 | * deleteObjects broken on aws s3 sdk 2.0 ([#71](https://github.com/findify/s3mock/issues/60))
10 | * docker image for non-jvm tests
11 |
12 | 0.2.3
13 | =======
14 | * windows compatibility in FileProvider ([#28](https://github.com/findify/s3mock/issues/28))
15 | * Max Keys not respected when calling list objects (V2) ([#47](https://github.com/findify/s3mock/issues/47))
16 | * getETag from getObjectMetadata returns null ([#48](https://github.com/findify/s3mock/issues/48))
17 | * update to akka 2.5.2, akka-http 10.0.7
18 | * fix concurrent requests causing weird locking issues on FileProvider ([#52](https://github.com/findify/s3mock/issues/52))
19 | * fix warnings in GetObject about incorrent headers ([#54](https://github.com/findify/s3mock/issues/54))
20 |
21 | 0.2.2
22 | =======
23 | * More convenient and traditional Java API with Builder-style instance creation
24 | * Docs update for alpakka usage
25 | * Javadocs for all public API methods
26 | * use latest aws-java-sdk-s3 library
27 |
28 | 0.2.1
29 | =======
30 | * Bump akka to 2.5.1
31 | * fix issue when DeleteObjects response was malformed for multi-object deletes
32 | * alpakka support test case
33 | * fix subpath get/delete issues [#45](https://github.com/findify/s3mock/issues/45)
34 |
35 | 0.2.0
36 | =======
37 | * Support for ranged get requests ([#39](https://github.com/findify/s3mock/pull/39))
38 | * In-memory backend ([#37](https://github.com/findify/s3mock/pull/37))
39 | * Bugfix: ObjectListing#getCommonPrefixes order is not alphabetical ([#41](https://github.com/findify/s3mock/issues/41))
40 | * Akka 2.5.0 support
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/metadata/MapMetadataStore.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider.metadata
2 |
3 | import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream}
4 |
5 | import better.files.File
6 | import com.amazonaws.services.s3.model.ObjectMetadata
7 | import org.iq80.leveldb.Options
8 | import org.iq80.leveldb.impl.Iq80DBFactory._
9 | import org.iq80.leveldb._
10 | import org.iq80.leveldb.impl.Iq80DBFactory
11 |
12 | import scala.collection.mutable
13 |
14 | /**
15 | * Created by shutty on 3/13/17.
16 | */
17 | class MapMetadataStore(path: String) extends MetadataStore {
18 | val bucketMetadata = mutable.Map[String,DB]()
19 |
20 |
21 | override def put(bucket: String, key: String, meta: ObjectMetadata): Unit = {
22 | val map = load(path, bucket)
23 | map.put(bytes(key), meta2bytes(meta))
24 | }
25 | override def get(bucket: String, key: String): Option[ObjectMetadata] = {
26 | val map = load(path, bucket)
27 | val meta = Option(map.get(bytes(key))).map(bytes2meta)
28 | meta
29 | }
30 | override def delete(bucket: String, key: String): Unit = {
31 | val map = load(path, bucket)
32 | map.delete(bytes(key))
33 | }
34 |
35 | override def remove(bucket: String): Unit = {
36 | bucketMetadata.get(bucket).foreach(db => {
37 | db.close()
38 | bucketMetadata.remove(bucket)
39 | })
40 | val file = File(s"$path/$bucket.metadata")
41 | if (file.exists) file.delete()
42 | }
43 |
44 | private def load(path: String, bucket: String): DB = synchronized {
45 | bucketMetadata.get(bucket) match {
46 | case Some(db) => db
47 | case None =>
48 | val options = new Options()
49 | options.createIfMissing(true)
50 | val db = Iq80DBFactory.factory.open(File(s"$path/$bucket.metadata").toJava, options)
51 | bucketMetadata.put(bucket, db)
52 | db
53 | }
54 | }
55 |
56 | private def meta2bytes(meta: ObjectMetadata) = {
57 | val out = new ByteArrayOutputStream()
58 | val stream = new ObjectOutputStream(out)
59 | stream.writeObject(meta)
60 | stream.close()
61 | out.toByteArray
62 | }
63 |
64 | private def bytes2meta(bytes: Array[Byte]): ObjectMetadata = {
65 | val in = new ByteArrayInputStream(bytes)
66 | val stream = new ObjectInputStream(in)
67 | stream.readObject().asInstanceOf[ObjectMetadata]
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/CopyObjectTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 | import java.nio.charset.StandardCharsets
5 | import java.util
6 |
7 | import com.amazonaws.services.s3.model.{CopyObjectRequest, ObjectMetadata, PutObjectRequest}
8 |
9 | /**
10 | * Created by shutty on 3/13/17.
11 | */
12 | class CopyObjectTest extends S3MockTest {
13 | override def behaviour(fixture: => Fixture) = {
14 | val s3 = fixture.client
15 | it should "copy an object even if destdir does not exist" in {
16 | s3.createBucket("bucket-1")
17 | s3.createBucket("bucket-2")
18 | s3.putObject("bucket-1", "test.txt", "contents")
19 | s3.copyObject("bucket-1", "test.txt", "bucket-2", "folder/test.txt")
20 | getContent(s3.getObject("bucket-2", "folder/test.txt")) shouldBe "contents"
21 | }
22 |
23 | it should "copy an object with metadata" in {
24 | s3.createBucket("bucket-3")
25 | val meta = new ObjectMetadata()
26 | val user = new util.HashMap[String, String]()
27 | user.put("a", "b")
28 | meta.setUserMetadata(user)
29 | val req = new PutObjectRequest("bucket-3", "test.txt", new ByteArrayInputStream(Array(61.toByte, 62.toByte, 63.toByte)), meta)
30 | s3.putObject(req)
31 | s3.copyObject("bucket-3", "test.txt", "bucket-3", "test2.txt")
32 | val obj = s3.getObject("bucket-3", "test2.txt")
33 | obj.getObjectMetadata.getUserMetadata.get("a") shouldBe "b"
34 | }
35 |
36 | it should "copy an object with new metadata" in {
37 | s3.createBucket("test-bucket")
38 |
39 | val meta = new ObjectMetadata
40 | meta.addUserMetadata("key1", "value1")
41 | meta.addUserMetadata("key2", "value2")
42 | val putRequest = new PutObjectRequest("test-bucket", "test.txt", new ByteArrayInputStream("test".getBytes(StandardCharsets.UTF_8)), meta)
43 | s3.putObject(putRequest)
44 |
45 | val newMeta = new ObjectMetadata
46 | newMeta.addUserMetadata("new-key1", "new-value1")
47 | newMeta.addUserMetadata("new-key2", "new-value2")
48 | val copyRequest = new CopyObjectRequest("test-bucket", "test.txt", "test-bucket", "test2.txt").withNewObjectMetadata(newMeta)
49 | s3.copyObject(copyRequest)
50 |
51 | val obj = s3.getObject("test-bucket", "test2.txt")
52 | obj.getObjectMetadata.getUserMetadata.size shouldBe 2
53 | obj.getObjectMetadata.getUserMetadata.get("new-key1") shouldBe "new-value1"
54 | obj.getObjectMetadata.getUserMetadata.get("new-key2") shouldBe "new-value2"
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/awscli/GetObjectTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.awscli
2 |
3 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
4 | import akka.stream.scaladsl.Sink
5 | import akka.util.ByteString
6 | import com.amazonaws.services.s3.model.AmazonS3Exception
7 |
8 | import scala.concurrent.Await
9 | import scala.concurrent.duration._
10 |
11 | /**
12 | * Created by shutty on 8/28/16.
13 | */
14 | class GetObjectTest extends AWSCliTest {
15 | override def behaviour(fixture: => Fixture) = {
16 | val s3 = fixture.client
17 | val port = fixture.port
18 | it should "receive LastModified header with AWS CLI" in {
19 | s3.createBucket("awscli-lm")
20 | s3.putObject("awscli-lm", "foo", "bar")
21 | val response = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.GET, uri = s"http://127.0.0.1:$port/awscli-lm/foo")), 10.seconds)
22 | response.headers.find(_.is("last-modified")).map(_.value()) shouldBe Some("Thu, 01 Jan 1970 00:00:00 GMT")
23 | response.entity.contentLengthOption shouldBe Some(3)
24 | }
25 | it should "deal with HEAD requests with AWS CLI" in {
26 | s3.createBucket("awscli-head")
27 | s3.putObject("awscli-head", "foo2", "bar")
28 | val response = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.HEAD, uri = s"http://127.0.0.1:$port/awscli-head/foo2")), 10.seconds)
29 | response.headers.find(_.is("last-modified")).map(_.value()) shouldBe Some("Thu, 01 Jan 1970 00:00:00 GMT")
30 | response.entity.contentLengthOption shouldBe Some(3)
31 | Await.result(response.entity.dataBytes.fold(ByteString(""))(_ ++ _).runWith(Sink.head), 10.seconds).utf8String shouldBe ""
32 | }
33 | it should "deal with metadata requests with AWS CLI" in {
34 | s3.createBucket("awscli-head2")
35 | s3.putObject("awscli-head2", "foo", "bar")
36 | val meta = s3.getObjectMetadata("awscli-head2", "foo")
37 | meta.getContentLength shouldBe 3
38 | }
39 | it should "respond with status 404 if key does not exist with AWS CLI" in {
40 | s3.createBucket("awscli")
41 | val exc = intercept[AmazonS3Exception] {
42 | s3.getObject("awscli", "doesnotexist")
43 | }
44 | exc.getStatusCode shouldBe 404
45 | exc.getErrorCode shouldBe "NoSuchKey"
46 | }
47 |
48 | it should "respond with status 404 if bucket does not exist with AWS CLI" in {
49 |
50 | val exc = intercept[AmazonS3Exception] {
51 | s3.getObject("awscli-404", "doesnotexist")
52 | }
53 | exc.getStatusCode shouldBe 404
54 | exc.getErrorCode shouldBe "NoSuchBucket"
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/GetPutObjectWithMetadataTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 |
5 | import com.amazonaws.services.s3.model.{ObjectMetadata, S3Object}
6 |
7 | import scala.collection.JavaConversions._
8 |
9 | /**
10 | * Created by shutty on 8/10/16.
11 | */
12 | class GetPutObjectWithMetadataTest extends S3MockTest {
13 | override def behaviour(fixture: => Fixture): Unit = {
14 | val s3 = fixture.client
15 | it should "put object with metadata" in {
16 | s3.createBucket("getput").getName shouldBe "getput"
17 | s3.listBuckets().exists(_.getName == "getput") shouldBe true
18 |
19 | val is = new ByteArrayInputStream("bar".getBytes("UTF-8"))
20 | val metadata: ObjectMetadata = new ObjectMetadata()
21 | metadata.setContentType("application/json")
22 | metadata.setUserMetadata(Map("metamaic" -> "maic"))
23 |
24 | s3.putObject("getput", "foo", is, metadata)
25 |
26 | val s3Object: S3Object = s3.getObject("getput", "foo")
27 | val actualMetadata: ObjectMetadata = s3Object.getObjectMetadata
28 | actualMetadata.getContentType shouldBe "application/json"
29 |
30 | getContent(s3Object) shouldBe "bar"
31 | }
32 |
33 | it should "put object with metadata, but skip unvalid content-type" in {
34 | s3.createBucket("getput").getName shouldBe "getput"
35 | s3.listBuckets().exists(_.getName == "getput") shouldBe true
36 |
37 | val is = new ByteArrayInputStream("bar".getBytes("UTF-8"))
38 | val metadata: ObjectMetadata = new ObjectMetadata()
39 | metadata.setContentType("application")
40 | metadata.setUserMetadata(Map("metamaic" -> "maic"))
41 |
42 | s3.putObject("getput", "foo", is, metadata)
43 |
44 | val s3Object: S3Object = s3.getObject("getput", "foo")
45 | val actualMetadata: ObjectMetadata = s3Object.getObjectMetadata
46 | actualMetadata.getContentType shouldBe "application/octet-stream"
47 |
48 | getContent(s3Object) shouldBe "bar"
49 | }
50 | it should "put object in subdirs with metadata, but skip unvalid content-type" in {
51 | s3.createBucket("getput").getName shouldBe "getput"
52 | s3.listBuckets().exists(_.getName == "getput") shouldBe true
53 |
54 | val is = new ByteArrayInputStream("bar".getBytes("UTF-8"))
55 | val metadata: ObjectMetadata = new ObjectMetadata()
56 | metadata.setContentType("application")
57 | metadata.setUserMetadata(Map("metamaic" -> "maic"))
58 |
59 | s3.putObject("getput", "foo1/bar", is, metadata)
60 |
61 | val s3Object: S3Object = s3.getObject("getput", "foo1/bar")
62 | val actualMetadata: ObjectMetadata = s3Object.getObjectMetadata
63 | actualMetadata.getContentType shouldBe "application/octet-stream"
64 |
65 | getContent(s3Object) shouldBe "bar"
66 | }
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/S3ChunkedProtocolStage.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.stream._
4 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
5 | import akka.util.ByteString
6 | import com.typesafe.scalalogging.LazyLogging
7 |
8 | /**
9 | * Created by shutty on 8/11/16.
10 | */
11 | case class Header(chunkSize:Int, headerSize:Int, sig:String)
12 |
13 | class ChunkBuffer extends LazyLogging {
14 | val hexChars = "0123456789abcdef".getBytes.toSet
15 | var size = -1
16 | var buffer = ByteString("")
17 | def addChunk(data:ByteString) = buffer = buffer ++ data
18 | def readHeader:Option[Header] = {
19 | val headerBuffer = buffer.take(90)
20 | val size = headerBuffer.takeWhile(hexChars.contains)
21 | val sig = headerBuffer.drop(size.length).take(83)
22 | if ((size.length <= 8) && (sig.length == 83) && sig.startsWith(";chunk-signature=") && sig.endsWith("\r\n")) {
23 | val header = Header(Integer.parseInt(size.utf8String, 16), size.length + 83, sig.drop(17).dropRight(2).utf8String)
24 | logger.debug(s"read header: $header")
25 | Some(header)
26 | } else {
27 | logger.debug("cannot read header")
28 | None
29 | }
30 | }
31 | def pullChunk(header:Header):Option[ByteString] = {
32 | if (buffer.length >= header.headerSize + header.chunkSize + 2) {
33 | buffer = buffer.drop(header.headerSize)
34 | val chunk = buffer.take(header.chunkSize)
35 | buffer = buffer.drop(header.chunkSize + 2)
36 | logger.debug(s"pulled chunk, size=${header.chunkSize}")
37 | Some(chunk)
38 | } else {
39 | logger.debug(s"not enough data to pull chunk: chunkSize = ${header.chunkSize}, bufferSize = ${buffer.length}")
40 | None
41 | }
42 | }
43 | }
44 |
45 | class S3ChunkedProtocolStage extends GraphStage[FlowShape[ByteString,ByteString]] {
46 | val out = Outlet[ByteString]("s3.out")
47 | val in = Inlet[ByteString]("s3.in")
48 | override val shape = FlowShape(in, out)
49 |
50 | override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) {
51 | val buffer = new ChunkBuffer()
52 |
53 | setHandler(in, new InHandler {
54 | override def onPush() = {
55 | buffer.addChunk(grab(in))
56 | buffer.readHeader match {
57 | case Some(header) => buffer.pullChunk(header) match {
58 | case Some(chunk) => push(out, chunk)
59 | case None => pull(in)
60 | }
61 | case None => pull(in)
62 | }
63 | }
64 |
65 | override def onUpstreamFinish() = {
66 | buffer.readHeader match {
67 | case Some(header) => buffer.pullChunk(header) match {
68 | case Some(chunk) =>
69 | push(out, chunk)
70 | complete(out)
71 | case None =>
72 | complete(out)
73 | }
74 | case None =>
75 | complete(out)
76 | }
77 | }
78 | })
79 | setHandler(out, new OutHandler {
80 | override def onPull() = {
81 | pull(in)
82 | }
83 | })
84 | }
85 |
86 | }
87 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/PutObjectMultipart.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.NotUsed
4 | import akka.http.scaladsl.model._
5 | import akka.http.scaladsl.model.headers.ETag
6 | import akka.http.scaladsl.server.Directives._
7 | import akka.stream.scaladsl.{Flow, Sink}
8 | import akka.stream.{FlowShape, Graph, Materializer}
9 | import akka.util.ByteString
10 | import com.typesafe.scalalogging.LazyLogging
11 | import io.findify.s3mock.S3ChunkedProtocolStage
12 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
13 | import io.findify.s3mock.provider.Provider
14 | import org.apache.commons.codec.digest.DigestUtils
15 |
16 | import scala.util.{Failure, Success, Try}
17 |
18 | /**
19 | * Created by shutty on 8/19/16.
20 | */
21 | case class PutObjectMultipart(implicit provider: Provider, mat: Materializer) extends LazyLogging {
22 |
23 | type EntityDecoder = Graph[FlowShape[ByteString, ByteString], NotUsed]
24 |
25 | private val defaultEntityEncoder = Flow[ByteString].map(identity)
26 |
27 | def route(bucket: String, path: String) = parameter('partNumber, 'uploadId) { (partNumber: String, uploadId: String) =>
28 | put {
29 | logger.debug(s"put multipart object bucket=$bucket path=$path")
30 | headerValueByName("x-amz-decoded-content-length") { decodedLength =>
31 | completeRequest(bucket, path, partNumber.toInt, uploadId, new S3ChunkedProtocolStage)
32 | } ~ completeRequest(bucket, path, partNumber.toInt, uploadId)
33 | } ~ post {
34 | logger.debug(s"post multipart object bucket=$bucket path=$path")
35 | completeRequest(bucket, path, partNumber.toInt, uploadId)
36 | }
37 | }
38 |
39 | def completeRequest(bucket: String,
40 | path: String,
41 | partNumber: Int,
42 | uploadId: String,
43 | entityDecoder: EntityDecoder = defaultEntityEncoder) =
44 | extractRequest { request =>
45 | complete {
46 | val result = request.entity.dataBytes
47 | .via(entityDecoder)
48 | .fold(ByteString(""))(_ ++ _)
49 | .map(data => {
50 | Try(provider.putObjectMultipartPart(bucket, path, partNumber.toInt, uploadId, data.toArray)) match {
51 | case Success(()) =>
52 | HttpResponse(
53 | StatusCodes.OK,
54 | entity = HttpEntity( ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), "")
55 | ).withHeaders(ETag(DigestUtils.md5Hex(data.toArray)))
56 | case Failure(e: NoSuchBucketException) =>
57 | HttpResponse(
58 | StatusCodes.NotFound,
59 | entity = e.toXML.toString()
60 | )
61 | case Failure(t) =>
62 | HttpResponse(
63 | StatusCodes.InternalServerError,
64 | entity = InternalErrorException(t).toXML.toString()
65 | )
66 | }
67 | }).runWith(Sink.head[HttpResponse])
68 | result
69 | }
70 | }
71 |
72 | }
73 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/CopyObject.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.util
4 |
5 | import akka.http.scaladsl.model.{HttpRequest, HttpResponse, StatusCodes}
6 | import akka.http.scaladsl.server.Directives._
7 | import com.amazonaws.services.s3.model.ObjectMetadata
8 | import com.typesafe.scalalogging.LazyLogging
9 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException, NoSuchKeyException}
10 | import io.findify.s3mock.provider.Provider
11 |
12 | import scala.collection.JavaConverters._
13 | import scala.util.{Failure, Success, Try}
14 |
15 | /**
16 | * Created by shutty on 11/23/16.
17 | */
18 | case class CopyObject(implicit provider: Provider) extends LazyLogging {
19 | def split(path: String):Option[(String,String)] = {
20 | val noFirstSlash = path.replaceAll("^/+", "")
21 | val result = noFirstSlash.split("/").toList match {
22 | case bucket :: tail => Some(bucket -> tail.mkString("/"))
23 | case _ => None
24 | }
25 | result
26 | }
27 |
28 | def extractMetadata(req: HttpRequest): Option[ObjectMetadata] = {
29 | req.headers.find(_.lowercaseName() == "x-amz-metadata-directive").map(_.value()) match {
30 | case Some("REPLACE") =>
31 | val user = new util.HashMap[String,String]()
32 | req.headers.filter(_.name().startsWith("x-amz-meta-")).map(h => h.name().replaceAll("x-amz-meta-", "") -> h.value()).foreach { case (k,v) => user.put(k,v) }
33 | val contentType = req.entity.contentType.value
34 | val meta = new ObjectMetadata()
35 | meta.setUserMetadata(user)
36 | meta.setContentType(contentType)
37 | Some(meta)
38 | case Some("COPY") | None => None
39 | }
40 | }
41 | def route(destBucket:String, destKey:String) = put {
42 | headerValueByName("x-amz-copy-source") { source =>
43 | extractRequest { req =>
44 | complete {
45 | val meta = extractMetadata(req)
46 | split(source) match {
47 | case Some((sourceBucket, sourceKey)) =>
48 | Try(provider.copyObject(sourceBucket, sourceKey, destBucket, destKey, meta)) match {
49 | case Success(result) =>
50 | logger.info(s"copied object $sourceBucket/$sourceKey")
51 | HttpResponse(status = StatusCodes.OK, entity = result.toXML.toString())
52 | case Failure(e: NoSuchKeyException) =>
53 | logger.info(s"cannot copy object $sourceBucket/$sourceKey: no such key")
54 | HttpResponse(
55 | StatusCodes.NotFound,
56 | entity = e.toXML.toString()
57 | )
58 | case Failure(e: NoSuchBucketException) =>
59 | logger.info(s"cannot copy object $sourceBucket/$sourceKey: no such bucket")
60 | HttpResponse(
61 | StatusCodes.NotFound,
62 | entity = e.toXML.toString()
63 | )
64 | case Failure(t) =>
65 | logger.error(s"cannot copy object $sourceBucket/$sourceKey: $t", t)
66 | HttpResponse(
67 | StatusCodes.InternalServerError,
68 | entity = InternalErrorException(t).toXML.toString()
69 | )
70 | }
71 | case None =>
72 | logger.error(s"cannot copy object $source")
73 | HttpResponse(StatusCodes.NotFound)
74 | }
75 | }
76 | }
77 | }
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/DeleteTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.http.scaladsl.Http
4 | import akka.http.scaladsl.model._
5 | import com.amazonaws.services.s3.model.{AmazonS3Exception, DeleteObjectsRequest}
6 |
7 | import scala.collection.JavaConverters._
8 | import scala.concurrent.Await
9 | import scala.util.Try
10 | import scala.concurrent.duration._
11 |
12 | /**
13 | * Created by shutty on 8/11/16.
14 | */
15 | class DeleteTest extends S3MockTest {
16 | override def behaviour(fixture: => Fixture) = {
17 | val s3 = fixture.client
18 | it should "delete a bucket" in {
19 | s3.createBucket("del")
20 | s3.listBuckets().asScala.exists(_.getName == "del") shouldBe true
21 | s3.deleteBucket("del")
22 | s3.listBuckets().asScala.exists(_.getName == "del") shouldBe false
23 | }
24 |
25 | it should "return 404 for non existent buckets when deleting" in {
26 | Try(s3.deleteBucket("nodel")).isFailure shouldBe true
27 | }
28 |
29 | it should "delete an object" in {
30 | s3.createBucket("delobj")
31 | s3.putObject("delobj", "somefile", "foo")
32 | s3.listObjects("delobj", "somefile").getObjectSummaries.asScala.exists(_.getKey == "somefile") shouldBe true
33 | s3.deleteObject("delobj", "somefile")
34 | s3.listObjects("delobj", "somefile").getObjectSummaries.asScala.exists(_.getKey == "somefile") shouldBe false
35 | }
36 |
37 | it should "return 404 for non-existent keys when deleting" in {
38 | Try(s3.deleteObject("nodel", "xxx")).isFailure shouldBe true
39 | }
40 |
41 | it should "produce NoSuchBucket if bucket does not exist when deleting" in {
42 | val exc = intercept[AmazonS3Exception] {
43 | s3.deleteBucket("aws-404")
44 | }
45 | exc.getStatusCode shouldBe 404
46 | exc.getErrorCode shouldBe "NoSuchBucket"
47 | }
48 |
49 | it should "delete multiple objects at once" in {
50 | s3.createBucket("delobj2")
51 | s3.putObject("delobj2", "somefile1", "foo1")
52 | s3.putObject("delobj2", "somefile2", "foo2")
53 | s3.listObjects("delobj2", "somefile").getObjectSummaries.size() shouldBe 2
54 | val del = s3.deleteObjects(new DeleteObjectsRequest("delobj2").withKeys("somefile1", "somefile2"))
55 | del.getDeletedObjects.size() shouldBe 2
56 | s3.listObjects("delobj2", "somefile").getObjectSummaries.size() shouldBe 0
57 | }
58 |
59 | it should "do nothing in case for deleting a subpath" in {
60 | s3.createBucket("delobj3")
61 | s3.putObject("delobj3", "some/path/foo1", "foo1")
62 | s3.putObject("delobj3", "some/path/foo2", "foo2")
63 | val del = s3.deleteObject("delobj3", "some/path")
64 | s3.listObjects("delobj3", "some/path/").getObjectSummaries.size() shouldBe 2
65 | }
66 |
67 | it should "work with aws sdk 2.0 style multi-object delete" in {
68 | implicit val mat = fixture.mat
69 | s3.createBucket("owntracks")
70 | s3.putObject("owntracks", "data/2017-07-31/10:34.json", "foo")
71 | s3.putObject("owntracks", "data/2017-07-31/16:23.json", "bar")
72 | val requestData = """"""
73 | val response = Await.result(Http(fixture.system).singleRequest(HttpRequest(
74 | method = HttpMethods.POST,
75 | uri = s"http://localhost:${fixture.port}/owntracks?delete",
76 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), requestData)
77 | )), 10.seconds)
78 | s3.listObjects("owntracks").getObjectSummaries.isEmpty shouldBe true
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/S3Mock.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.actor.ActorSystem
4 | import akka.http.scaladsl.Http
5 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
6 | import akka.http.scaladsl.server.Directives._
7 | import akka.stream.ActorMaterializer
8 | import com.typesafe.scalalogging.LazyLogging
9 | import io.findify.s3mock.provider.{FileProvider, InMemoryProvider, Provider}
10 | import io.findify.s3mock.route._
11 | import scala.concurrent.Await
12 | import scala.concurrent.duration.Duration
13 |
14 | /**
15 | * Create s3mock instance, the hard mode.
16 | * @param port port to bind to
17 | * @param provider backend to use. There are currently two of them implemented, FileProvider and InMemoryProvider
18 | * @param system actor system to use. By default, create an own one.
19 | */
20 | class S3Mock(port:Int, provider:Provider)(implicit system:ActorSystem = ActorSystem.create("s3mock")) extends LazyLogging {
21 | implicit val p = provider
22 | private var bind:Http.ServerBinding = _
23 |
24 | def start = {
25 | implicit val mat = ActorMaterializer()
26 | val http = Http(system)
27 | val route =
28 | pathPrefix(Segment) { bucket =>
29 | pathSingleSlash {
30 | concat(
31 | ListBucket().route(bucket),
32 | CreateBucket().route(bucket),
33 | DeleteBucket().route(bucket),
34 | DeleteObjects().route(bucket)
35 | )
36 | } ~ pathEnd {
37 | concat(
38 | ListBucket().route(bucket),
39 | CreateBucket().route(bucket),
40 | DeleteBucket().route(bucket),
41 | DeleteObjects().route(bucket)
42 | )
43 | } ~ parameterMap { params =>
44 | path(RemainingPath) { key =>
45 | concat(
46 | GetObject().route(bucket, key.toString(), params),
47 | CopyObject().route(bucket, key.toString()),
48 | PutObjectMultipart().route(bucket, key.toString()),
49 | PutObjectMultipartStart().route(bucket, key.toString()),
50 | PutObjectMultipartComplete().route(bucket, key.toString()),
51 | PutObject().route(bucket, key.toString()),
52 | DeleteObject().route(bucket, key.toString())
53 | )
54 | }
55 | }
56 | } ~ ListBuckets().route() ~ extractRequest { request =>
57 | complete {
58 | logger.error(s"method not implemented: ${request.method.value} ${request.uri.toString}")
59 | HttpResponse(status = StatusCodes.NotImplemented)
60 | }
61 | }
62 |
63 | bind = Await.result(http.bindAndHandle(route, "0.0.0.0", port), Duration.Inf)
64 | logger.info(s"bound to 0.0.0.0:$port")
65 | bind
66 | }
67 |
68 | /**
69 | * Stop s3mock instance. For file-based working mode, it will not clean the mounted folder.
70 | */
71 | def stop = Await.result(bind.unbind(), Duration.Inf)
72 |
73 | }
74 |
75 | object S3Mock {
76 | def apply(port: Int): S3Mock = new S3Mock(port, new InMemoryProvider)
77 | def apply(port:Int, dir:String) = new S3Mock(port, new FileProvider(dir))
78 |
79 | /**
80 | * Create an in-memory s3mock instance
81 | * @param port a port to bind to.
82 | * @return s3mock instance
83 | */
84 | def create(port:Int) = apply(port) // Java API
85 | /**
86 | * Create a file-based s3mock instance
87 | * @param port port to bind to
88 | * @param dir directory to mount as a collection of buckets. First-level directories will be treated as buckets, their contents - as keys.
89 | * @return
90 | */
91 | def create(port:Int, dir:String) = apply(port, dir) // Java API
92 | /**
93 | * Builder class for java api.
94 | */
95 | class Builder {
96 | private var defaultPort: Int = 8001
97 | private var defaultProvider: Provider = new InMemoryProvider()
98 |
99 | /**
100 | * Set port to bind to
101 | * @param port port number
102 | * @return
103 | */
104 | def withPort(port: Int): Builder = {
105 | defaultPort = port
106 | this
107 | }
108 |
109 | /**
110 | * Use in-memory backend.
111 | * @return
112 | */
113 | def withInMemoryBackend(): Builder = {
114 | defaultProvider = new InMemoryProvider()
115 | this
116 | }
117 |
118 | /**
119 | * Use file-based backend
120 | * @param path Directory to mount
121 | * @return
122 | */
123 | def withFileBackend(path: String): Builder = {
124 | defaultProvider = new FileProvider(path)
125 | this
126 | }
127 |
128 | /**
129 | * Build s3mock instance
130 | * @return
131 | */
132 | def build(): S3Mock = {
133 | new S3Mock(defaultPort, defaultProvider)
134 | }
135 | }
136 | }
137 |
138 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/S3MockTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.alpakka.s3.S3Settings
5 | import akka.stream.alpakka.s3.auth.BasicCredentials
6 | import akka.stream.{ActorMaterializer, Materializer}
7 | import akka.stream.alpakka.s3.scaladsl.S3Client
8 | import better.files.File
9 | import com.amazonaws.auth.{AWSStaticCredentialsProvider, AnonymousAWSCredentials, BasicAWSCredentials, DefaultAWSCredentialsProviderChain}
10 | import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
11 | import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client, AmazonS3ClientBuilder}
12 | import com.amazonaws.services.s3.model.S3Object
13 | import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuilder}
14 | import com.typesafe.config.{Config, ConfigFactory}
15 | import io.findify.s3mock.provider.{FileProvider, InMemoryProvider}
16 |
17 | import scala.collection.JavaConverters._
18 | import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
19 |
20 | import scala.concurrent.Await
21 | import scala.concurrent.duration.Duration
22 | import scala.io.Source
23 |
24 | /**
25 | * Created by shutty on 8/9/16.
26 | */
27 | trait S3MockTest extends FlatSpec with Matchers with BeforeAndAfterAll {
28 | private val workDir = File.newTemporaryDirectory().pathAsString
29 | private val fileBasedPort = 8001
30 | private val fileSystemConfig = configFor("localhost", fileBasedPort)
31 | private val fileSystem = ActorSystem.create("testfile", fileSystemConfig)
32 | private val fileMat = ActorMaterializer()(fileSystem)
33 | private val fileBasedS3 = clientFor("localhost", fileBasedPort)
34 | private val fileBasedServer = new S3Mock(fileBasedPort, new FileProvider(workDir))
35 | private val fileBasedTransferManager: TransferManager = TransferManagerBuilder.standard().withS3Client(fileBasedS3).build()
36 | private val fileBasedAlpakkaClient = new S3Client(S3Settings(fileSystemConfig))(fileSystem, fileMat)
37 |
38 | private val inMemoryPort = 8002
39 | private val inMemoryConfig = configFor("localhost", inMemoryPort)
40 | private val inMemorySystem = ActorSystem.create("testram", inMemoryConfig)
41 | private val inMemoryMat = ActorMaterializer()(inMemorySystem)
42 | private val inMemoryS3 = clientFor("localhost", inMemoryPort)
43 | private val inMemoryServer = new S3Mock(inMemoryPort, new InMemoryProvider)
44 | private val inMemoryTransferManager: TransferManager = TransferManagerBuilder.standard().withS3Client(inMemoryS3).build()
45 | private val inMemoryBasedAlpakkaClient = new S3Client(S3Settings(inMemoryConfig))(inMemorySystem, inMemoryMat)
46 |
47 | case class Fixture(server: S3Mock, client: AmazonS3, tm: TransferManager, name: String, port: Int, alpakka: S3Client, system: ActorSystem, mat: Materializer)
48 | val fixtures = List(
49 | Fixture(fileBasedServer, fileBasedS3, fileBasedTransferManager, "file based S3Mock", fileBasedPort, fileBasedAlpakkaClient, fileSystem, fileMat),
50 | Fixture(inMemoryServer, inMemoryS3, inMemoryTransferManager, "in-memory S3Mock", inMemoryPort, inMemoryBasedAlpakkaClient, inMemorySystem, inMemoryMat)
51 | )
52 |
53 | def behaviour(fixture: => Fixture) : Unit
54 |
55 | for (fixture <- fixtures) {
56 | fixture.name should behave like behaviour(fixture)
57 | }
58 |
59 | override def beforeAll = {
60 | if (!File(workDir).exists) File(workDir).createDirectory()
61 | fileBasedServer.start
62 | inMemoryServer.start
63 | super.beforeAll
64 | }
65 | override def afterAll = {
66 | super.afterAll
67 | inMemoryServer.stop
68 | fileBasedServer.stop
69 | inMemoryTransferManager.shutdownNow()
70 | Await.result(fileSystem.terminate(), Duration.Inf)
71 | Await.result(inMemorySystem.terminate(), Duration.Inf)
72 | File(workDir).delete()
73 | }
74 | def getContent(s3Object: S3Object): String = Source.fromInputStream(s3Object.getObjectContent, "UTF-8").mkString
75 |
76 | def clientFor(host: String, port: Int): AmazonS3 = {
77 | val endpoint = new EndpointConfiguration(s"http://$host:$port", "us-east-1")
78 | AmazonS3ClientBuilder.standard()
79 | .withPathStyleAccessEnabled(true)
80 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
81 | .withEndpointConfiguration(endpoint)
82 | .build()
83 | }
84 |
85 | def configFor(host: String, port: Int): Config = {
86 | ConfigFactory.parseMap(Map(
87 | "akka.stream.alpakka.s3.proxy.host" -> host,
88 | "akka.stream.alpakka.s3.proxy.port" -> port,
89 | "akka.stream.alpakka.s3.proxy.secure" -> false,
90 | "akka.stream.alpakka.s3.path-style-access" -> true,
91 | "akka.stream.alpakka.s3.aws.access-key-id" -> "foo",
92 | "akka.stream.alpakka.s3.aws.secret-access-key" -> "bar",
93 | "akka.stream.alpakka.s3.aws.default-region" -> "us-east-1",
94 | "akka.stream.alpakka.s3.buffer" -> "memory",
95 | "akka.stream.alpakka.s3.disk-buffer-path" -> ""
96 | ).asJava)
97 |
98 | }
99 |
100 | }
101 |
102 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/MultipartUploadTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 | import java.nio.charset.Charset
5 |
6 | import akka.actor.ActorSystem
7 | import akka.http.scaladsl.Http
8 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
9 | import akka.stream.ActorMaterializer
10 | import akka.stream.scaladsl.Sink
11 | import akka.util.ByteString
12 | import com.amazonaws.services.s3.model.{AmazonS3Exception, CompleteMultipartUploadRequest, InitiateMultipartUploadRequest, UploadPartRequest}
13 | import org.apache.commons.codec.digest.DigestUtils
14 |
15 | import scala.collection.JavaConverters._
16 | import scala.concurrent.duration._
17 | import scala.concurrent.Await
18 | import scala.util.Random
19 |
20 | /**
21 | * Created by shutty on 8/10/16.
22 | */
23 | class MultipartUploadTest extends S3MockTest {
24 | override def behaviour(fixture: => Fixture) = {
25 | implicit val system = ActorSystem.create("test")
26 | implicit val mat = ActorMaterializer()
27 | val http = Http(system)
28 | val s3 = fixture.client
29 | val port = fixture.port
30 |
31 | it should "upload multipart files" in {
32 | s3.createBucket("getput")
33 | val response1 = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2?uploads")), 10.minutes)
34 | val data = Await.result(response1.entity.dataBytes.fold(ByteString(""))(_ ++ _).runWith(Sink.head), 10.seconds)
35 | val uploadId = (scala.xml.XML.loadString(data.utf8String) \ "UploadId").text
36 | val response2 = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2?partNumber=1&uploadId=$uploadId", entity = "foo")), 10.minutes)
37 | response2.status.intValue() shouldBe 200
38 | val response3 = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2?partNumber=2&uploadId=$uploadId", entity = "boo")), 10.minutes)
39 | response3.status.intValue() shouldBe 200
40 | val commit = """
41 | |
42 | | 1
43 | | ETag
44 | |
45 | |
46 | | 2
47 | | ETag
48 | |
49 | |""".stripMargin
50 | val response4 = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2?uploadId=$uploadId", entity = commit)), 10.minutes)
51 | response4.status.intValue() shouldBe 200
52 |
53 | getContent(s3.getObject("getput", "foo2")) shouldBe "fooboo"
54 | }
55 |
56 | it should "work with java sdk" in {
57 | s3.createBucket("getput")
58 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("getput", "foo4"))
59 | val p1 = s3.uploadPart(new UploadPartRequest().withBucketName("getput").withPartSize(10).withKey("foo4").withPartNumber(1).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("hellohello".getBytes())))
60 | val p2 = s3.uploadPart(new UploadPartRequest().withBucketName("getput").withPartSize(10).withKey("foo4").withPartNumber(2).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("worldworld".getBytes())))
61 | val result = s3.completeMultipartUpload(new CompleteMultipartUploadRequest("getput", "foo4", init.getUploadId, List(p1.getPartETag, p2.getPartETag).asJava))
62 | result.getKey shouldBe "foo4"
63 | getContent(s3.getObject("getput", "foo4")) shouldBe "hellohelloworldworld"
64 | }
65 | it should "work with large blobs" in {
66 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("getput", "fooLarge"))
67 | val blobs = for (i <- 0 to 200) yield {
68 | val blob1 = new Array[Byte](10000)
69 | Random.nextBytes(blob1)
70 | val p1 = s3.uploadPart(new UploadPartRequest().withBucketName("getput").withPartSize(blob1.length).withKey("fooLarge").withPartNumber(i).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream(blob1)))
71 | blob1 -> p1.getPartETag
72 | }
73 | val result = s3.completeMultipartUpload(new CompleteMultipartUploadRequest("getput", "fooLarge", init.getUploadId, blobs.map(_._2).asJava))
74 | result.getKey shouldBe "fooLarge"
75 | DigestUtils.md5Hex(s3.getObject("getput", "fooLarge").getObjectContent) shouldBe DigestUtils.md5Hex(blobs.map(_._1).fold(Array[Byte]())(_ ++ _))
76 | }
77 |
78 |
79 | it should "produce NoSuchBucket if bucket does not exist" in {
80 | val exc = intercept[AmazonS3Exception] {
81 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("aws-404", "foo4"))
82 | val p1 = s3.uploadPart(new UploadPartRequest().withBucketName("aws-404").withPartSize(10).withKey("foo4").withPartNumber(1).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("hellohello".getBytes())))
83 | }
84 | exc.getStatusCode shouldBe 404
85 | exc.getErrorCode shouldBe "NoSuchBucket"
86 | }
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/GetObject.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.io.StringWriter
4 | import java.net.URLDecoder
5 | import java.util.Date
6 |
7 | import akka.http.scaladsl.model.HttpEntity.Strict
8 | import akka.http.scaladsl.model._
9 | import akka.http.scaladsl.model.headers.{RawHeader, `Last-Modified`}
10 | import akka.http.scaladsl.server.Directives._
11 | import com.amazonaws.services.s3.Headers
12 | import com.amazonaws.services.s3.model.ObjectMetadata
13 | import com.amazonaws.util.DateUtils
14 | import com.typesafe.scalalogging.LazyLogging
15 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException, NoSuchKeyException}
16 | import io.findify.s3mock.provider.{GetObjectData, Provider}
17 |
18 | import scala.collection.JavaConverters._
19 | import scala.util.{Failure, Success, Try}
20 |
21 | /**
22 | * Created by shutty on 8/19/16.
23 | */
24 | case class GetObject(implicit provider: Provider) extends LazyLogging {
25 | def route(bucket: String, path: String, params: Map[String, String]) = get {
26 |
27 | withRangeSupport {
28 | complete {
29 | logger.debug(s"get object: bucket=$bucket, path=$path")
30 |
31 | Try(provider.getObject(bucket, path)) match {
32 | case Success(GetObjectData(data, metaOption)) =>
33 | metaOption match {
34 | case Some(meta) =>
35 | val entity: Strict = ContentType.parse(meta.getContentType) match {
36 | case Right(value) => HttpEntity(value, data)
37 | case Left(error) => HttpEntity(data)
38 | }
39 |
40 | if (params.contains("tagging")) {
41 | handleTaggingRequest(meta)
42 | } else {
43 | HttpResponse(
44 | status = StatusCodes.OK,
45 | entity = entity,
46 | headers = `Last-Modified`(DateTime(1970, 1, 1)) :: metadataToHeaderList(meta)
47 | )
48 | }
49 |
50 | case None =>
51 | HttpResponse(
52 | status = StatusCodes.OK,
53 | entity = HttpEntity(data),
54 | headers = List(`Last-Modified`(DateTime(1970, 1, 1)))
55 | )
56 | }
57 | case Failure(e: NoSuchKeyException) =>
58 | HttpResponse(
59 | StatusCodes.NotFound,
60 | entity = e.toXML.toString()
61 | )
62 | case Failure(e: NoSuchBucketException) =>
63 | HttpResponse(
64 | StatusCodes.NotFound,
65 | entity = e.toXML.toString()
66 | )
67 | case Failure(t) =>
68 | logger.error("Oops: ", t)
69 | HttpResponse(
70 | StatusCodes.InternalServerError,
71 | entity = InternalErrorException(t).toXML.toString()
72 | )
73 | }
74 | }
75 | }
76 | }
77 |
78 |
79 |
80 | protected def handleTaggingRequest(meta: ObjectMetadata): HttpResponse = {
81 | var root =
82 | var tagset =
83 |
84 | var w = new StringWriter()
85 |
86 | if (meta.getRawMetadata.containsKey("x-amz-tagging")){
87 | var doc =
88 |
89 |
90 | {
91 | meta.getRawMetadata.get("x-amz-tagging").asInstanceOf[String].split("&").map(
92 | (rawTag: String) => {
93 | rawTag.split("=", 2).map(
94 | (part: String) => URLDecoder.decode(part, "UTF-8")
95 | )
96 | }).map(
97 | (kv: Array[String]) =>
98 |
99 | {kv(0)}
100 | {kv(1)}
101 | )
102 | }
103 |
104 |
105 |
106 |
107 | xml.XML.write(w, doc, "UTF-8", true, null)
108 | } else {
109 | var doc =
110 | xml.XML.write(w, doc, "UTF-8", true, null)
111 | }
112 |
113 | meta.setContentType("application/xml; charset=utf-8")
114 | HttpResponse(
115 | status = StatusCodes.OK,
116 | entity = w.toString,
117 | headers = `Last-Modified`(DateTime(1970, 1, 1)) :: metadataToHeaderList(meta)
118 | )
119 | }
120 |
121 | val headerBlacklist = Set("content-type", "connection")
122 | protected def metadataToHeaderList(metadata: ObjectMetadata): List[HttpHeader] = {
123 | val headers = Option(metadata.getRawMetadata)
124 | .map(_.asScala.toMap)
125 | .map(_.map {
126 | case (_, date: Date) =>
127 | `Last-Modified`(DateTime(new org.joda.time.DateTime(date).getMillis))
128 | case (key, value) =>
129 | RawHeader(key, value.toString)
130 | }.toList)
131 | .toList.flatten
132 | .filterNot(header => headerBlacklist.contains(header.lowercaseName))
133 |
134 | val httpExpires = Option(metadata.getHttpExpiresDate).map(date => RawHeader(Headers.EXPIRES, DateUtils.formatRFC822Date(date)))
135 |
136 | val userHeaders = Option(metadata.getUserMetadata)
137 | .map(_.asScala.toMap)
138 | .map(_.map { case (key, value) => {
139 | val name = Option(key).map(_.trim).getOrElse("")
140 | val hvalue = Option(value).map(_.trim).getOrElse("")
141 | RawHeader(Headers.S3_USER_METADATA_PREFIX + name, hvalue)
142 | }}.toList)
143 | .toList
144 | .flatten
145 |
146 | headers ++ httpExpires.toList ++ userHeaders ++ Option(metadata.getContentMD5).map(md5 => RawHeader(Headers.ETAG, md5))
147 | }
148 | }
149 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/PutObject.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.lang.Iterable
4 | import java.util
5 |
6 | import akka.http.javadsl.model.HttpHeader
7 | import akka.http.scaladsl.model.{HttpRequest, HttpResponse, StatusCodes}
8 | import akka.http.scaladsl.server.Directives._
9 | import akka.stream.Materializer
10 | import akka.stream.scaladsl.Sink
11 | import akka.util.ByteString
12 | import com.amazonaws.AmazonClientException
13 | import com.amazonaws.services.s3.Headers
14 | import com.amazonaws.services.s3.internal.ServiceUtils
15 | import com.amazonaws.services.s3.model.ObjectMetadata
16 | import com.amazonaws.util.{DateUtils, StringUtils}
17 | import com.typesafe.scalalogging.LazyLogging
18 | import io.findify.s3mock.S3ChunkedProtocolStage
19 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
20 | import io.findify.s3mock.provider.Provider
21 | import org.apache.commons.codec.digest.DigestUtils
22 |
23 | import scala.collection.JavaConverters._
24 | import scala.util.{Failure, Success, Try}
25 |
26 | /**
27 | * Created by shutty on 8/20/16.
28 | */
29 | case class PutObject(implicit provider:Provider, mat:Materializer) extends LazyLogging {
30 | def route(bucket:String, path:String) = put {
31 | extractRequest { request =>
32 | headerValueByName("authorization") { auth =>
33 | completeSigned(bucket, path)
34 | } ~ completePlain(bucket, path)
35 | }
36 | } ~ post {
37 | completePlain(bucket, path)
38 | }
39 |
40 |
41 | def completeSigned(bucket:String, path:String) = extractRequest { request =>
42 | complete {
43 |
44 |
45 | logger.info(s"put object $bucket/$path (signed)")
46 | val result = request.entity.dataBytes
47 | .via(new S3ChunkedProtocolStage)
48 | .fold(ByteString(""))(_ ++ _)
49 | .map(data => {
50 | val bytes = data.toArray
51 | val metadata = populateObjectMetadata(request, bytes)
52 | Try(provider.putObject(bucket, path, bytes, metadata)) match {
53 | case Success(()) => HttpResponse(StatusCodes.OK)
54 | case Failure(e: NoSuchBucketException) =>
55 | HttpResponse(
56 | StatusCodes.NotFound,
57 | entity = e.toXML.toString()
58 | )
59 | case Failure(t) =>
60 | HttpResponse(
61 | StatusCodes.InternalServerError,
62 | entity = InternalErrorException(t).toXML.toString()
63 | )
64 | }
65 | }).runWith(Sink.head[HttpResponse])
66 | result
67 | }
68 | }
69 |
70 | def completePlain(bucket:String, path:String) = extractRequest { request =>
71 | complete {
72 |
73 | logger.info(s"put object $bucket/$path (unsigned)")
74 | val result = request.entity.dataBytes
75 | .fold(ByteString(""))(_ ++ _)
76 | .map(data => {
77 | val bytes = data.toArray
78 | val metadata = populateObjectMetadata(request, bytes)
79 | Try(provider.putObject(bucket, path, bytes, metadata)) match {
80 | case Success(()) => HttpResponse(StatusCodes.OK)
81 | case Failure(e: NoSuchBucketException) =>
82 | HttpResponse(
83 | StatusCodes.NotFound,
84 | entity = e.toXML.toString()
85 | )
86 | case Failure(t) =>
87 | HttpResponse(
88 | StatusCodes.InternalServerError,
89 | entity = InternalErrorException(t).toXML.toString()
90 | )
91 | }
92 | }).runWith(Sink.head[HttpResponse])
93 | result
94 | }
95 | }
96 |
97 | private def populateObjectMetadata(request: HttpRequest, bytes: Array[Byte]): ObjectMetadata = {
98 | val metadata = new ObjectMetadata()
99 | val ignoredHeaders: util.HashSet[String] = new util.HashSet[String]()
100 | ignoredHeaders.add(Headers.DATE)
101 | ignoredHeaders.add(Headers.SERVER)
102 | ignoredHeaders.add(Headers.REQUEST_ID)
103 | ignoredHeaders.add(Headers.EXTENDED_REQUEST_ID)
104 | ignoredHeaders.add(Headers.CLOUD_FRONT_ID)
105 | ignoredHeaders.add(Headers.CONNECTION)
106 |
107 | val headers: Iterable[HttpHeader] = request.getHeaders()
108 | for (header <- headers.asScala) {
109 | var key: String = header.name()
110 | if (StringUtils.beginsWithIgnoreCase(key, Headers.S3_USER_METADATA_PREFIX)) {
111 | key = key.substring(Headers.S3_USER_METADATA_PREFIX.length)
112 | metadata.addUserMetadata(key, header.value())
113 | }
114 | // else if (ignoredHeaders.contains(key)) {
115 | // ignore...
116 | // }
117 | else if (key.equalsIgnoreCase(Headers.LAST_MODIFIED)) try
118 | metadata.setHeader(key, ServiceUtils.parseRfc822Date(header.value()))
119 |
120 | catch {
121 | case pe: Exception => logger.warn("Unable to parse last modified date: " + header.value(), pe)
122 | }
123 | else if (key.equalsIgnoreCase(Headers.CONTENT_LENGTH)) try
124 | metadata.setHeader(key, java.lang.Long.parseLong(header.value()))
125 |
126 | catch {
127 | case nfe: NumberFormatException => throw new AmazonClientException("Unable to parse content length. Header 'Content-Length' has corrupted data" + nfe.getMessage, nfe)
128 | }
129 | else if (key.equalsIgnoreCase(Headers.ETAG)) metadata.setHeader(key, ServiceUtils.removeQuotes(header.value()))
130 | else if (key.equalsIgnoreCase(Headers.EXPIRES)) try
131 | metadata.setHttpExpiresDate(DateUtils.parseRFC822Date(header.value()))
132 |
133 | catch {
134 | case pe: Exception => logger.warn("Unable to parse http expiration date: " + header.value(), pe)
135 | }
136 | // else if (key.equalsIgnoreCase(Headers.EXPIRATION)) new ObjectExpirationHeaderHandler[ObjectMetadata]().handle(metadata, response)
137 | // else if (key.equalsIgnoreCase(Headers.RESTORE)) new ObjectRestoreHeaderHandler[ObjectRestoreResult]().handle(metadata, response)
138 | // else if (key.equalsIgnoreCase(Headers.REQUESTER_CHARGED_HEADER)) new S3RequesterChargedHeaderHandler[S3RequesterChargedResult]().handle(metadata, response)
139 | else if (key.equalsIgnoreCase(Headers.S3_PARTS_COUNT)) try
140 | metadata.setHeader(key, header.value().toInt)
141 |
142 | catch {
143 | case nfe: NumberFormatException => throw new AmazonClientException("Unable to parse part count. Header x-amz-mp-parts-count has corrupted data" + nfe.getMessage, nfe)
144 | }
145 | else metadata.setHeader(key, header.value())
146 | }
147 |
148 | if(metadata.getContentType == null){
149 | metadata.setContentType(request.entity.getContentType.toString)
150 | }
151 | metadata.getRawMetadata
152 | metadata.setContentMD5(DigestUtils.md5Hex(bytes))
153 | metadata
154 | }
155 |
156 | }
157 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/GetPutObjectTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 | import java.util
5 |
6 | import akka.actor.ActorSystem
7 | import akka.http.scaladsl.Http
8 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
9 | import akka.stream.ActorMaterializer
10 | import com.amazonaws.services.s3.model._
11 | import com.amazonaws.util.IOUtils
12 |
13 | import scala.collection.JavaConversions._
14 | import scala.concurrent.Await
15 | import scala.concurrent.duration._
16 | import scala.util.{Random, Try}
17 |
18 | /**
19 | * Created by shutty on 8/10/16.
20 | */
21 |
22 | class GetPutObjectTest extends S3MockTest {
23 | override def behaviour(fixture: => Fixture) = {
24 | val s3 = fixture.client
25 | val port = fixture.port
26 | it should "put object" in {
27 | s3.createBucket("getput").getName shouldBe "getput"
28 | s3.listBuckets().exists(_.getName == "getput") shouldBe true
29 | s3.putObject("getput", "foo", "bar")
30 | val result = getContent(s3.getObject("getput", "foo"))
31 | result shouldBe "bar"
32 | }
33 | it should "be able to post data" in {
34 | implicit val system = ActorSystem.create("test")
35 | implicit val mat = ActorMaterializer()
36 | val http = Http(system)
37 | if (!s3.listBuckets().exists(_.getName == "getput")) s3.createBucket("getput")
38 | val response = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2", entity = "bar")), 10.seconds)
39 | getContent(s3.getObject("getput", "foo2")) shouldBe "bar"
40 | }
41 | it should "put objects in subdirs" in {
42 | s3.putObject("getput", "foo1/foo2/foo3", "bar")
43 | val result = getContent(s3.getObject("getput", "foo1/foo2/foo3"))
44 | result shouldBe "bar"
45 | }
46 | it should "not drop \\r\\n symbols" in {
47 | s3.putObject("getput", "foorn", "bar\r\nbaz")
48 | val result = getContent(s3.getObject("getput", "foorn"))
49 | result shouldBe "bar\r\nbaz"
50 | }
51 | it should "put & get large binary blobs" in {
52 | val blob = Random.nextString(1024000).getBytes("UTF-8")
53 | s3.putObject("getput", "foolarge", new ByteArrayInputStream(blob), new ObjectMetadata())
54 | val result = getContent(s3.getObject("getput", "foolarge")).getBytes("UTF-8")
55 | result shouldBe blob
56 | }
57 |
58 | it should "store tags and spit them back on get tagging requests" in {
59 | s3.createBucket("tbucket")
60 | s3.putObject(
61 | new PutObjectRequest("tbucket", "taggedobj", new ByteArrayInputStream("content".getBytes("UTF-8")), new ObjectMetadata)
62 | .withTagging(new ObjectTagging(List(new Tag("key1", "val1"), new Tag("key=&interesting", "value=something&stragne"))))
63 | )
64 | var tagging = s3.getObjectTagging(new GetObjectTaggingRequest("tbucket", "taggedobj")).getTagSet
65 | var tagMap = new util.HashMap[String, String]()
66 | for (tag <- tagging) {
67 | tagMap.put(tag.getKey, tag.getValue)
68 | }
69 | tagMap.size() shouldBe 2
70 | tagMap.get("key1") shouldBe "val1"
71 | tagMap.get("key=&interesting") shouldBe "value=something&stragne"
72 | }
73 | it should "be OK with retrieving tags for un-tagged objects" in {
74 | s3.putObject("tbucket", "taggedobj", "some-content")
75 | var tagging = s3.getObjectTagging(new GetObjectTaggingRequest("tbucket", "taggedobj")).getTagSet
76 | tagging.size() shouldBe 0
77 | }
78 |
79 | it should "produce NoSuchBucket if bucket does not exist when GETting" in {
80 | val exc = intercept[AmazonS3Exception] {
81 | s3.getObject("aws-404", "foo")
82 | }
83 | exc.getStatusCode shouldBe 404
84 | exc.getErrorCode shouldBe "NoSuchBucket"
85 | }
86 |
87 | it should "produce NoSuchBucket if bucket does not exist when PUTting" in {
88 | val exc = intercept[AmazonS3Exception] {
89 | s3.putObject("aws-404", "foo", "content")
90 | }
91 | exc.getStatusCode shouldBe 404
92 | exc.getErrorCode shouldBe "NoSuchBucket"
93 | }
94 |
95 | it should "work with large files" in {
96 | val huge = Random.nextString(10 * 1024 * 1024)
97 | s3.putObject("getput", "foobig", huge)
98 | val result = getContent(s3.getObject("getput", "foobig"))
99 | result shouldBe huge
100 | }
101 |
102 | it should "work with dot-files" in {
103 | s3.createBucket("dot")
104 | s3.listBuckets().exists(_.getName == "dot") shouldBe true
105 | s3.putObject("dot", "foo", "bar")
106 | s3.putObject("dot", ".foo", "bar")
107 | val result = s3.listObjects("dot").getObjectSummaries.toList.map(_.getKey)
108 | result shouldBe List(".foo", "foo")
109 | }
110 | it should "support ranged get requests" in {
111 |
112 | val data = new Array[Byte](1000)
113 | Random.nextBytes(data)
114 |
115 | val bucket = "rangedbuck"
116 | val key = "data"
117 |
118 | s3.createBucket(bucket)
119 | s3.putObject(bucket, key, new ByteArrayInputStream(data), new ObjectMetadata())
120 |
121 | val (startByte, endByte) = (5L, 55L)
122 | val getObjectRequest = new GetObjectRequest(bucket, key)
123 | getObjectRequest.setRange(startByte, endByte)
124 |
125 | val sliceOfData = data.slice(startByte.toInt, endByte.toInt + 1)
126 | val retrievedData = IOUtils.toByteArray(s3.getObject(getObjectRequest).getObjectContent)
127 |
128 | retrievedData shouldEqual sliceOfData
129 | }
130 |
131 | it should "return 404 on subpath request" in {
132 | s3.createBucket("subpath")
133 | s3.putObject("subpath", "some/path/example", "bar")
134 | val noSlash = Try(s3.getObject("subpath", "some/path"))
135 | noSlash.failed.get.asInstanceOf[AmazonS3Exception].getStatusCode shouldBe 404
136 | val withSlash = Try(s3.getObject("subpath", "some/path/"))
137 | withSlash.failed.get.asInstanceOf[AmazonS3Exception].getStatusCode shouldBe 404
138 | }
139 |
140 | // this trick is not possible on POSIX-compliant file systems:
141 | // So the test will always fail in file-based provider
142 | it should "be possible to store /key and /key/bar objects at the same time" ignore {
143 | s3.createBucket("prefix")
144 | s3.putObject("prefix", "some/path", "bar")
145 | s3.putObject("prefix", "some", "bar")
146 | val noSlash = Try(s3.getObject("prefix", "some/path"))
147 | val withSlash = Try(s3.getObject("prefix", "some"))
148 | val br=1
149 | }
150 |
151 | it should "have etag in metadata" in {
152 | s3.createBucket("etag")
153 | s3.putObject("etag", "file/name", "contents")
154 | val data = s3.getObjectMetadata("etag", "file/name")
155 | data.getETag shouldBe "98bf7d8c15784f0a3d63204441e1e2aa"
156 | }
157 |
158 | it should "not fail concurrent requests" in {
159 | s3.createBucket("concurrent")
160 | s3.putObject("concurrent", "file/name", "contents")
161 | val results = Range(1, 100).par.map(_ => IOUtils.toString(s3.getObject("concurrent", "file/name").getObjectContent)).toList
162 | results.forall(_ == "contents") shouldBe true
163 | }
164 | }
165 |
166 | }
167 |
168 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # S3 mock library for Java/Scala
2 |
3 | [](https://travis-ci.org/findify/s3mock)
4 | [](https://maven-badges.herokuapp.com/maven-central/io.findify/s3mock_2.12)
5 |
6 | s3mock is a web service implementing AWS S3 API, which can be used for local testing of your code using S3
7 | but without hitting real S3 endpoints.
8 |
9 | Implemented API methods:
10 | * list buckets
11 | * list objects (all & by prefix)
12 | * create bucket
13 | * delete bucket
14 | * put object (via PUT, POST, multipart and chunked uploads are also supported)
15 | * copy object
16 | * get object
17 | * delete object
18 | * batch delete
19 |
20 | Not supported features (these might be implemented later):
21 | * authentication: s3proxy will accept any credentials without validity and signature checking
22 | * bucket policy, ACL, versioning
23 | * object ACL
24 | * posix-incompatible key structure with file-based provider, for example keys `/some.dir/file.txt` and `/some.dir` in the same bucket
25 |
26 | ## Installation
27 |
28 | s3mock package is available for Scala 2.11/2.12 (on Java 8). To install using SBT, add these
29 | statements to your `build.sbt`:
30 |
31 | libraryDependencies += "io.findify" %% "s3mock" % "0.2.4" % "test",
32 |
33 | On maven, update your `pom.xml` in the following way:
34 | ```xml
35 | // add this entry to
36 |
37 | io.findify
38 | s3mock_2.12
39 | 0.2.4
40 | test
41 |
42 | ```
43 |
44 | S3Mock is also available as a [docker container](https://hub.docker.com/r/findify/s3mock/) for out-of-jvm testing:
45 | ```bash
46 | docker run -p 8001:8001 findify/s3mock:latest
47 | ```
48 |
49 | ## Usage
50 |
51 | Just point your s3 client to a localhost, enable path-style access, and it should work out of the box.
52 |
53 | There are two working modes for s3mock:
54 | * File-based: it will map a local directory as a collection of s3 buckets. This mode can be useful when you need to have a bucket with some pre-loaded data (and too lazy to re-upload everything on each run).
55 | * In-memory: keep everything in RAM. All the data you've uploaded to s3mock will be wiped completely on shutdown.
56 |
57 | Java:
58 | ```java
59 | import com.amazonaws.auth.AWSStaticCredentialsProvider;
60 | import com.amazonaws.auth.AnonymousAWSCredentials;
61 | import com.amazonaws.client.builder.AwsClientBuilder;
62 | import com.amazonaws.services.s3.AmazonS3;
63 | import com.amazonaws.services.s3.AmazonS3Builder;
64 | import com.amazonaws.services.s3.AmazonS3Client;
65 | import com.amazonaws.services.s3.AmazonS3ClientBuilder;
66 | import io.findify.s3mock.S3Mock;
67 |
68 | /*
69 | S3Mock.create(8001, "/tmp/s3");
70 | */
71 | S3Mock api = new S3Mock.Builder().withPort(8001).withInMemoryBackend().build();
72 | api.start();
73 |
74 | /* AWS S3 client setup.
75 | * withPathStyleAccessEnabled(true) trick is required to overcome S3 default
76 | * DNS-based bucket access scheme
77 | * resulting in attempts to connect to addresses like "bucketname.localhost"
78 | * which requires specific DNS setup.
79 | */
80 | EndpointConfiguration endpoint = new EndpointConfiguration("http://localhost:8001", "us-west-2");
81 | AmazonS3Client client = AmazonS3ClientBuilder
82 | .standard()
83 | .withPathStyleAccessEnabled(true)
84 | .withEndpointConfiguration(endpoint)
85 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
86 | .build();
87 |
88 | client.createBucket("testbucket");
89 | client.putObject("testbucket", "file/name", "contents");
90 | api.stop();
91 | ```
92 |
93 | Scala with AWS S3 SDK:
94 | ```scala
95 | import com.amazonaws.auth.AWSStaticCredentialsProvider
96 | import com.amazonaws.auth.AnonymousAWSCredentials
97 | import com.amazonaws.client.builder.AwsClientBuilder
98 | import com.amazonaws.services.s3.AmazonS3
99 | import com.amazonaws.services.s3.AmazonS3Builder
100 | import com.amazonaws.services.s3.AmazonS3Client
101 | import com.amazonaws.services.s3.AmazonS3ClientBuilder
102 | import io.findify.s3mock.S3Mock
103 |
104 |
105 | /** Create and start S3 API mock. */
106 | val api = S3Mock(port = 8001, dir = "/tmp/s3")
107 | api.start
108 |
109 | /* AWS S3 client setup.
110 | * withPathStyleAccessEnabled(true) trick is required to overcome S3 default
111 | * DNS-based bucket access scheme
112 | * resulting in attempts to connect to addresses like "bucketname.localhost"
113 | * which requires specific DNS setup.
114 | */
115 | val endpoint = new EndpointConfiguration("http://localhost:8001", "us-west-2")
116 | val client = AmazonS3ClientBuilder
117 | .standard
118 | .withPathStyleAccessEnabled(true)
119 | .withEndpointConfiguration(endpoint)
120 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
121 | .build
122 |
123 | /** Use it as usual. */
124 | client.createBucket("foo")
125 | client.putObject("foo", "bar", "baz")
126 | ```
127 |
128 | Scala with Alpakka 0.8:
129 | ```scala
130 | import akka.actor.ActorSystem
131 | import akka.stream.ActorMaterializer
132 | import akka.stream.alpakka.s3.scaladsl.S3Client
133 | import akka.stream.scaladsl.Sink
134 | import com.typesafe.config.ConfigFactory
135 | import scala.collection.JavaConverters._
136 |
137 | val config = ConfigFactory.parseMap(Map(
138 | "akka.stream.alpakka.s3.proxy.host" -> "localhost",
139 | "akka.stream.alpakka.s3.proxy.port" -> 8001,
140 | "akka.stream.alpakka.s3.proxy.secure" -> false,
141 | "akka.stream.alpakka.s3.path-style-access" -> true
142 | ).asJava)
143 | implicit val system = ActorSystem.create("test", config)
144 | implicit val mat = ActorMaterializer()
145 | import system.dispatcher
146 | val s3a = S3Client()
147 | val contents = s3a.download("bucket", "key").runWith(Sink.reduce[ByteString](_ ++ _)).map(_.utf8String)
148 |
149 | ```
150 |
151 | ## License
152 |
153 | The MIT License (MIT)
154 |
155 | Copyright (c) 2016 Findify AB
156 |
157 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
158 |
159 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
160 |
161 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
162 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/FileProvider.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider
2 | import java.util.UUID
3 | import java.io.{FileInputStream, File => JFile}
4 |
5 | import akka.http.scaladsl.model.DateTime
6 | import better.files.File
7 | import better.files.File.OpenOptions
8 | import com.amazonaws.services.s3.model.ObjectMetadata
9 | import com.typesafe.scalalogging.LazyLogging
10 | import io.findify.s3mock.error.{NoSuchBucketException, NoSuchKeyException}
11 | import io.findify.s3mock.provider.metadata.{MapMetadataStore, MetadataStore}
12 | import io.findify.s3mock.request.{CompleteMultipartUpload, CreateBucketConfiguration}
13 | import io.findify.s3mock.response._
14 | import org.apache.commons.codec.digest.DigestUtils
15 |
16 | import scala.util.Random
17 |
18 | /**
19 | * Created by shutty on 8/9/16.
20 | */
21 | class FileProvider(dir:String) extends Provider with LazyLogging {
22 | val workDir = File(dir)
23 | if (!workDir.exists) workDir.createDirectories()
24 |
25 | private val meta = new MapMetadataStore(dir)
26 |
27 | override def metadataStore: MetadataStore = meta
28 |
29 | override def listBuckets: ListAllMyBuckets = {
30 | val buckets = File(dir).list.map(f => Bucket(fromOs(f.name), DateTime(f.lastModifiedTime.toEpochMilli))).toList
31 | logger.debug(s"listing buckets: ${buckets.map(_.name)}")
32 | ListAllMyBuckets("root", UUID.randomUUID().toString, buckets)
33 | }
34 |
35 | override def listBucket(bucket: String, prefix: Option[String], delimiter: Option[String], maxkeys: Option[Int]) = {
36 | def commonPrefix(dir: String, p: String, d: String): Option[String] = {
37 | dir.indexOf(d, p.length) match {
38 | case -1 => None
39 | case pos => Some(p + dir.substring(p.length, pos) + d)
40 | }
41 | }
42 | val prefixNoLeadingSlash = prefix.getOrElse("").dropWhile(_ == '/')
43 | val bucketFile = File(s"$dir/$bucket/")
44 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
45 | val bucketFileString = fromOs(bucketFile.toString)
46 | val bucketFiles = bucketFile.listRecursively.filter(f => {
47 | val fString = fromOs(f.toString).drop(bucketFileString.length).dropWhile(_ == '/')
48 | fString.startsWith(prefixNoLeadingSlash) && !f.isDirectory
49 | })
50 | val files = bucketFiles.map(f => {
51 | val stream = new FileInputStream(f.toJava)
52 | val md5 = DigestUtils.md5Hex(stream)
53 | Content(fromOs(f.toString).drop(bucketFileString.length+1).dropWhile(_ == '/'), DateTime(f.lastModifiedTime.toEpochMilli), md5, f.size, "STANDARD")
54 | }).toList
55 | logger.debug(s"listing bucket contents: ${files.map(_.key)}")
56 | val commonPrefixes = delimiter match {
57 | case Some(del) => files.flatMap(f => commonPrefix(f.key, prefixNoLeadingSlash, del)).distinct.sorted
58 | case None => Nil
59 | }
60 | val filteredFiles = files.filterNot(f => commonPrefixes.exists(p => f.key.startsWith(p)))
61 | val count = maxkeys.getOrElse(Int.MaxValue)
62 | val result = filteredFiles.sortBy(_.key)
63 | ListBucket(bucket, prefix, delimiter, commonPrefixes, result.take(count), isTruncated = result.size>count)
64 | }
65 |
66 | override def createBucket(name:String, bucketConfig:CreateBucketConfiguration) = {
67 | val bucket = File(s"$dir/$name")
68 | if (!bucket.exists) bucket.createDirectory()
69 | logger.debug(s"creating bucket $name")
70 | CreateBucket(name)
71 | }
72 | override def putObject(bucket:String, key:String, data:Array[Byte], objectMetadata: ObjectMetadata): Unit = {
73 | val bucketFile = File(s"$dir/$bucket")
74 | val file = File(s"$dir/$bucket/$key")
75 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
76 | file.createIfNotExists(createParents = true)
77 | logger.debug(s"writing file for s3://$bucket/$key to $dir/$bucket/$key, bytes = ${data.length}")
78 | file.writeByteArray(data)(OpenOptions.default)
79 | objectMetadata.setLastModified(org.joda.time.DateTime.now().toDate)
80 | metadataStore.put(bucket, key, objectMetadata)
81 | }
82 | override def getObject(bucket:String, key:String): GetObjectData = {
83 | val bucketFile = File(s"$dir/$bucket")
84 | val file = File(s"$dir/$bucket/$key")
85 | logger.debug(s"reading object for s://$bucket/$key")
86 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
87 | if (!file.exists) throw NoSuchKeyException(bucket, key)
88 | if (file.isDirectory) throw NoSuchKeyException(bucket, key)
89 | val meta = metadataStore.get(bucket, key)
90 | GetObjectData(file.byteArray, meta)
91 | }
92 |
93 | override def putObjectMultipartStart(bucket:String, key:String):InitiateMultipartUploadResult = {
94 | val id = Math.abs(Random.nextLong()).toString
95 | val bucketFile = File(s"$dir/$bucket")
96 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
97 | File(s"$dir/.mp/$bucket/$key/$id/.keep").createIfNotExists(createParents = true)
98 | logger.debug(s"starting multipart upload for s3://$bucket/$key")
99 | InitiateMultipartUploadResult(bucket, key, id)
100 | }
101 | override def putObjectMultipartPart(bucket:String, key:String, partNumber:Int, uploadId:String, data:Array[Byte]) = {
102 | val bucketFile = File(s"$dir/$bucket")
103 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
104 | val file = File(s"$dir/.mp/$bucket/$key/$uploadId/$partNumber")
105 | logger.debug(s"uploading multipart chunk $partNumber for s3://$bucket/$key")
106 | file.writeByteArray(data)(OpenOptions.default)
107 | }
108 | override def putObjectMultipartComplete(bucket:String, key:String, uploadId:String, request:CompleteMultipartUpload) = {
109 | val bucketFile = File(s"$dir/$bucket")
110 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
111 | val files = request.parts.map(part => File(s"$dir/.mp/$bucket/$key/$uploadId/${part.partNumber}"))
112 | val parts = files.map(f => f.byteArray)
113 | val file = File(s"$dir/$bucket/$key")
114 | file.createIfNotExists(createParents = true)
115 | val data = parts.fold(Array[Byte]())(_ ++ _)
116 | file.writeBytes(data.toIterator)
117 | File(s"$dir/.mp/$bucket/$key").delete()
118 | logger.debug(s"completed multipart upload for s3://$bucket/$key")
119 | CompleteMultipartUploadResult(bucket, key, file.md5)
120 | }
121 |
122 | override def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = {
123 | val sourceBucketFile = File(s"$dir/$sourceBucket")
124 | val destBucketFile = File(s"$dir/$destBucket")
125 | if (!sourceBucketFile.exists) throw NoSuchBucketException(sourceBucket)
126 | if (!destBucketFile.exists) throw NoSuchBucketException(destBucket)
127 | val sourceFile = File(s"$dir/$sourceBucket/$sourceKey")
128 | val destFile = File(s"$dir/$destBucket/$destKey")
129 | destFile.createIfNotExists(createParents = true)
130 | sourceFile.copyTo(destFile, overwrite = true)
131 | logger.debug(s"Copied s3://$sourceBucket/$sourceKey to s3://$destBucket/$destKey")
132 | val sourceMeta = newMeta.orElse(metadataStore.get(sourceBucket, sourceKey))
133 | sourceMeta.foreach(meta => metadataStore.put(destBucket, destKey, meta))
134 | CopyObjectResult(DateTime(sourceFile.lastModifiedTime.toEpochMilli), destFile.md5)
135 | }
136 |
137 | override def deleteObject(bucket:String, key:String): Unit = {
138 | val file = File(s"$dir/$bucket/$key")
139 | logger.debug(s"deleting object s://$bucket/$key")
140 | if (!file.exists) throw NoSuchKeyException(bucket, key)
141 | if (!file.isDirectory) {
142 | file.delete()
143 | metadataStore.delete(bucket, key)
144 | }
145 | }
146 |
147 | override def deleteBucket(bucket:String): Unit = {
148 | val bucketFile = File(s"$dir/$bucket")
149 | logger.debug(s"deleting bucket s://$bucket")
150 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
151 | bucketFile.delete()
152 | metadataStore.remove(bucket)
153 | }
154 |
155 | /** Replace the os separator with a '/' */
156 | private def fromOs(path: String): String = {
157 | path.replace(JFile.separatorChar, '/')
158 | }
159 |
160 | }
161 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/InMemoryProvider.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider
2 |
3 | import java.time.Instant
4 | import java.util.{Date, UUID}
5 |
6 | import akka.http.scaladsl.model.DateTime
7 | import com.amazonaws.services.s3.model.ObjectMetadata
8 | import com.typesafe.scalalogging.LazyLogging
9 | import io.findify.s3mock.error.{NoSuchBucketException, NoSuchKeyException}
10 | import io.findify.s3mock.provider.metadata.{InMemoryMetadataStore, MetadataStore}
11 | import io.findify.s3mock.request.{CompleteMultipartUpload, CreateBucketConfiguration}
12 | import io.findify.s3mock.response._
13 | import org.apache.commons.codec.digest.DigestUtils
14 |
15 | import scala.collection.concurrent.TrieMap
16 | import scala.collection.mutable
17 | import scala.util.Random
18 |
19 | class InMemoryProvider extends Provider with LazyLogging {
20 | private val mdStore = new InMemoryMetadataStore
21 | private val bucketDataStore = new TrieMap[String, BucketContents]
22 | private val multipartTempStore = new TrieMap[String, mutable.SortedSet[MultipartChunk]]
23 |
24 | private case class BucketContents(creationTime: DateTime, keysInBucket: mutable.Map[String, KeyContents])
25 |
26 | private case class KeyContents(lastModificationTime: DateTime, data: Array[Byte])
27 |
28 | private case class MultipartChunk(partNo: Int, data: Array[Byte]) extends Ordered[MultipartChunk] {
29 | override def compare(that: MultipartChunk): Int = partNo compareTo that.partNo
30 | }
31 |
32 | override def metadataStore: MetadataStore = mdStore
33 |
34 | override def listBuckets: ListAllMyBuckets = {
35 | val buckets = bucketDataStore map { case (name, data) => Bucket(name, data.creationTime) }
36 | logger.debug(s"listing buckets: ${buckets.map(_.name)}")
37 | ListAllMyBuckets("root", UUID.randomUUID().toString, buckets.toList)
38 | }
39 |
40 | override def listBucket(bucket: String, prefix: Option[String], delimiter: Option[String], maxkeys: Option[Int]): ListBucket = {
41 | def commonPrefix(dir: String, p: String, d: String): Option[String] = {
42 | dir.indexOf(d, p.length) match {
43 | case -1 => None
44 | case pos => Some(p + dir.substring(p.length, pos) + d)
45 | }
46 | }
47 |
48 | val prefix2 = prefix.getOrElse("")
49 | bucketDataStore.get(bucket) match {
50 | case Some(bucketContent) =>
51 | val matchingKeys = bucketContent.keysInBucket.filterKeys(_.startsWith(prefix2))
52 | val matchResults = matchingKeys map { case (name, content) =>
53 | Content(name, content.lastModificationTime, DigestUtils.md5Hex(content.data), content.data.length, "STANDARD")
54 | }
55 | logger.debug(s"listing bucket contents: ${matchResults.map(_.key)}")
56 | val commonPrefixes = delimiter match {
57 | case Some(del) => matchResults.flatMap(f => commonPrefix(f.key, prefix2, del)).toList.sorted.distinct
58 | case None => Nil
59 | }
60 | val filteredFiles: List[Content] = matchResults.filterNot(f => commonPrefixes.exists(p => f.key.startsWith(p))).toList
61 | val count = maxkeys.getOrElse(Int.MaxValue)
62 | val result = filteredFiles.sortBy(_.key)
63 | ListBucket(bucket, prefix, delimiter, commonPrefixes, result.take(count).take(count), isTruncated = result.size>count)
64 | case None => throw NoSuchBucketException(bucket)
65 | }
66 | }
67 |
68 | override def createBucket(name: String, bucketConfig: CreateBucketConfiguration): CreateBucket = {
69 | bucketDataStore.putIfAbsent(name, BucketContents(DateTime.now, new TrieMap))
70 | logger.debug(s"creating bucket $name")
71 | CreateBucket(name)
72 | }
73 |
74 | override def putObject(bucket: String, key: String, data: Array[Byte], objectMetadata: ObjectMetadata): Unit = {
75 | bucketDataStore.get(bucket) match {
76 | case Some(bucketContent) =>
77 | logger.debug(s"putting object for s3://$bucket/$key, bytes = ${data.length}")
78 | bucketContent.keysInBucket.put(key, KeyContents(DateTime.now, data))
79 | objectMetadata.setLastModified(org.joda.time.DateTime.now().toDate)
80 | metadataStore.put(bucket, key, objectMetadata)
81 | case None => throw NoSuchBucketException(bucket)
82 | }
83 | }
84 |
85 | override def getObject(bucket: String, key: String): GetObjectData = {
86 | bucketDataStore.get(bucket) match {
87 | case Some(bucketContent) => bucketContent.keysInBucket.get(key) match {
88 | case Some(keyContent) =>
89 | logger.debug(s"reading object for s://$bucket/$key")
90 | val meta = metadataStore.get(bucket, key)
91 | GetObjectData(keyContent.data, meta)
92 | case None => throw NoSuchKeyException(bucket, key)
93 | }
94 | case None => throw NoSuchBucketException(bucket)
95 | }
96 | }
97 |
98 | override def putObjectMultipartStart(bucket: String, key: String): InitiateMultipartUploadResult = {
99 | bucketDataStore.get(bucket) match {
100 | case Some(_) =>
101 | val id = Math.abs(Random.nextLong()).toString
102 | multipartTempStore.putIfAbsent(id, new mutable.TreeSet)
103 | logger.debug(s"starting multipart upload for s3://$bucket/$key")
104 | InitiateMultipartUploadResult(bucket, key, id)
105 | case None => throw NoSuchBucketException(bucket)
106 | }
107 | }
108 |
109 | override def putObjectMultipartPart(bucket: String, key: String, partNumber: Int, uploadId: String, data: Array[Byte]): Unit = {
110 | bucketDataStore.get(bucket) match {
111 | case Some(_) =>
112 | logger.debug(s"uploading multipart chunk $partNumber for s3://$bucket/$key")
113 | multipartTempStore.getOrElseUpdate(uploadId, new mutable.TreeSet).add(MultipartChunk(partNumber, data))
114 | case None => throw NoSuchBucketException(bucket)
115 | }
116 | }
117 |
118 | override def putObjectMultipartComplete(bucket: String, key: String, uploadId: String, request: CompleteMultipartUpload): CompleteMultipartUploadResult = {
119 | bucketDataStore.get(bucket) match {
120 | case Some(bucketContent) =>
121 | val completeBytes = multipartTempStore(uploadId).toSeq.map(_.data).fold(Array[Byte]())(_ ++ _)
122 | bucketContent.keysInBucket.put(key, KeyContents(DateTime.now, completeBytes))
123 | multipartTempStore.remove(uploadId)
124 | logger.debug(s"completed multipart upload for s3://$bucket/$key")
125 | CompleteMultipartUploadResult(bucket, key, DigestUtils.md5Hex(completeBytes))
126 | case None => throw NoSuchBucketException(bucket)
127 | }
128 | }
129 |
130 | override def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = {
131 | (bucketDataStore.get(sourceBucket), bucketDataStore.get(destBucket)) match {
132 | case (Some(srcBucketContent), Some(dstBucketContent)) =>
133 | srcBucketContent.keysInBucket.get(sourceKey) match {
134 | case Some(srcKeyContent) =>
135 | val destFileModTime = DateTime.now
136 | dstBucketContent.keysInBucket.put(destKey, KeyContents(destFileModTime, srcKeyContent.data.clone))
137 | logger.debug(s"Copied s3://$sourceBucket/$sourceKey to s3://$destBucket/$destKey")
138 | val sourceMeta = newMeta.orElse(metadataStore.get(sourceBucket, sourceKey))
139 | sourceMeta.foreach(meta => metadataStore.put(destBucket, destKey, meta))
140 | CopyObjectResult(destFileModTime, DigestUtils.md5Hex(srcKeyContent.data))
141 | case None => throw NoSuchKeyException(sourceBucket, sourceKey)
142 | }
143 | case (None, _) => throw NoSuchBucketException(sourceBucket)
144 | case _ => throw NoSuchBucketException(destBucket)
145 | }
146 | }
147 |
148 | override def deleteObject(bucket: String, key: String): Unit = {
149 | bucketDataStore.get(bucket) match {
150 | case Some(bucketContent) => bucketContent.keysInBucket.get(key) match {
151 | case Some(_) =>
152 | logger.debug(s"deleting object s://$bucket/$key")
153 | bucketContent.keysInBucket.remove(key)
154 | metadataStore.delete(bucket, key)
155 | case None => bucketContent.keysInBucket.keys.find(_.startsWith(key)) match {
156 | case Some(_) =>
157 | logger.debug(s"recursive delete by prefix is not supported by S3")
158 | Unit
159 | case None =>
160 | logger.warn(s"key does not exist")
161 | throw NoSuchKeyException(bucket, key)
162 | }
163 | }
164 | case None => throw NoSuchBucketException(bucket)
165 | }
166 | }
167 |
168 | override def deleteBucket(bucket: String): Unit = {
169 | bucketDataStore.get(bucket) match {
170 | case Some(_) =>
171 | logger.debug(s"deleting bucket s://$bucket")
172 | bucketDataStore.remove(bucket)
173 | metadataStore.remove(bucket)
174 | case None => throw NoSuchBucketException(bucket)
175 | }
176 | }
177 | }
178 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/ListBucketTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.util
4 | import java.util.Date
5 |
6 | import com.amazonaws.services.s3.AmazonS3ClientBuilder
7 | import com.amazonaws.services.s3.model.{AmazonS3Exception, ListObjectsRequest, ListObjectsV2Request, S3ObjectSummary}
8 | import org.joda.time.DateTime
9 |
10 | import scala.collection.JavaConversions._
11 | import scala.collection.JavaConverters._
12 |
13 | /**
14 | * Created by shutty on 8/9/16.
15 | */
16 | class ListBucketTest extends S3MockTest {
17 | override def behaviour(fixture: => Fixture) = {
18 | val s3 = fixture.client
19 | it should "list bucket" in {
20 | s3.createBucket("foo")
21 | s3.listObjects("foo").getObjectSummaries.isEmpty shouldBe true
22 | }
23 | it should "list bucket with prefix" in {
24 | s3.createBucket("list")
25 | s3.putObject("list", "foo1", "xxx")
26 | s3.putObject("list", "foo2", "xxx")
27 | s3.putObject("list", "xfoo3", "xxx")
28 | val list = s3.listObjects("list", "foo").getObjectSummaries.asScala.toList
29 | list.map(_.getKey).forall(_.startsWith("foo")) shouldBe true
30 | }
31 | it should "list objects in subfolders with prefix" in {
32 | s3.createBucket("list2")
33 | s3.putObject("list2", "one/foo1/1", "xxx")
34 | s3.putObject("list2", "one/foo2/2", "xxx")
35 | s3.putObject("list2", "one/foo2/3", "xxx")
36 | s3.putObject("list2", "one/foo2/4", "xxx")
37 | s3.putObject("list2", "one/xfoo3", "xxx")
38 | val ol = s3.listObjects("list2", "one/f").getObjectSummaries.asScala.toList
39 | ol.size shouldBe 4
40 | ol.map(_.getKey).forall(_.startsWith("one/foo")) shouldBe true
41 | }
42 | it should "return empty list if prefix is incorrect" in {
43 | s3.createBucket("list3")
44 | s3.putObject("list3", "one/foo1", "xxx")
45 | s3.putObject("list3", "one/foo2", "xxx")
46 | s3.putObject("list3", "one/xfoo3", "xxx")
47 | s3.listObjects("list3", "qaz/qax").getObjectSummaries.asScala.isEmpty shouldBe true
48 |
49 | }
50 | it should "return keys with valid keys (when no prefix given)" in {
51 | s3.createBucket("list4")
52 | s3.putObject("list4", "one", "xxx")
53 | val summaries: util.List[S3ObjectSummary] = s3.listObjects("list4").getObjectSummaries
54 | summaries.size() shouldBe 1
55 | val summary = summaries.get(0)
56 | summary.getBucketName shouldBe "list4"
57 | summary.getKey shouldBe "one"
58 | summary.getSize shouldBe 3
59 | summary.getStorageClass shouldBe "STANDARD"
60 |
61 | val returnedKey = summaries.last.getKey
62 | s3.getObject("list4", returnedKey).getKey shouldBe "one"
63 | }
64 |
65 | it should "produce NoSuchBucket if bucket does not exist" in {
66 | val exc = intercept[AmazonS3Exception] {
67 | s3.listObjects("aws-404", "qaz/qax")
68 | }
69 | exc.getStatusCode shouldBe 404
70 | exc.getErrorCode shouldBe "NoSuchBucket"
71 | }
72 |
73 | it should "obey delimiters && prefixes v1" in {
74 | s3.createBucket("list5")
75 | s3.putObject("list5", "sample.jpg", "xxx")
76 | s3.putObject("list5", "photos/2006/January/sample.jpg", "yyy")
77 | s3.putObject("list5", "photos/2006/February/sample2.jpg", "zzz")
78 | s3.putObject("list5", "photos/2006/February/sample3.jpg", "zzz")
79 | s3.putObject("list5", "photos/2006/February/sample4.jpg", "zzz")
80 | val req1 = new ListObjectsRequest()
81 | req1.setBucketName("list5")
82 | req1.setDelimiter("/")
83 | val list1 = s3.listObjects(req1)
84 | val summaries1 = list1.getObjectSummaries.map(_.getKey).toList
85 | list1.getCommonPrefixes.asScala.toList shouldBe List("photos/")
86 | summaries1 shouldBe List("sample.jpg")
87 | }
88 | it should "obey delimiters && prefixes v2" in {
89 | s3.createBucket("list5")
90 | s3.putObject("list5", "sample.jpg", "xxx")
91 | s3.putObject("list5", "photos/2006/January/sample.jpg", "yyy")
92 | s3.putObject("list5", "photos/2006/February/sample2.jpg", "zzz")
93 | s3.putObject("list5", "photos/2006/February/sample3.jpg", "zzz")
94 | s3.putObject("list5", "photos/2006/February/sample4.jpg", "zzz")
95 | val req2 = new ListObjectsRequest()
96 | req2.setBucketName("list5")
97 | req2.setDelimiter("/")
98 | req2.setPrefix("photos/2006/")
99 | val list2 = s3.listObjects(req2)
100 | val summaries2 = list2.getObjectSummaries.map(_.getKey).toList
101 | list2.getCommonPrefixes.asScala.toList shouldBe List("photos/2006/February/", "photos/2006/January/")
102 | summaries2 shouldBe Nil
103 | }
104 |
105 | it should "obey delimiters && prefixes v2 (matching real s3)" ignore {
106 | val s3 = AmazonS3ClientBuilder.defaultClient()
107 | s3.createBucket("findify-merlin")
108 | s3.putObject("findify-merlin", "sample.jpg", "xxx")
109 | s3.putObject("findify-merlin", "photos/2006/January/sample.jpg", "yyy")
110 | s3.putObject("findify-merlin", "photos/2006/February/sample2.jpg", "zzz")
111 | s3.putObject("findify-merlin", "photos/2006/February/sample3.jpg", "zzz")
112 | s3.putObject("findify-merlin", "photos/2006/February/sample4.jpg", "zzz")
113 | val req2 = new ListObjectsRequest()
114 | req2.setBucketName("findify-merlin")
115 | req2.setDelimiter("/")
116 | req2.setPrefix("photos/")
117 | val list2 = s3.listObjects(req2)
118 | val summaries2 = list2.getObjectSummaries.map(_.getKey).toList
119 | list2.getCommonPrefixes.asScala.toList shouldBe List("photos/2006/")
120 | summaries2 shouldBe Nil
121 | }
122 |
123 |
124 | it should "obey delimiters && prefixes v3" in {
125 | s3.createBucket("list5")
126 | s3.putObject("list5", "dev/someEvent/2017/03/13/00/_SUCCESS", "xxx")
127 | s3.putObject("list5", "dev/someEvent/2017/03/13/01/_SUCCESS", "yyy")
128 | s3.putObject("list5", "dev/someEvent/2016/12/31/23/_SUCCESS", "zzz")
129 | val req2 = new ListObjectsRequest()
130 | req2.setBucketName("list5")
131 | req2.setDelimiter("/")
132 | req2.setPrefix("dev/")
133 | val list2 = s3.listObjects(req2)
134 | val summaries2 = list2.getObjectSummaries.map(_.getKey).toList
135 | list2.getCommonPrefixes.asScala.toList shouldBe List("dev/someEvent/")
136 | summaries2 shouldBe Nil
137 | }
138 |
139 | it should "list objects in lexicographical order" in {
140 | s3.createBucket("list6")
141 | s3.putObject("list6", "b", "xx")
142 | s3.putObject("list6", "a", "xx")
143 | s3.putObject("list6", "0", "xx")
144 | val list = s3.listObjects("list6")
145 | list.getObjectSummaries.asScala.map(_.getKey).toList shouldBe List("0", "a", "b")
146 | }
147 |
148 | it should "getCommonPrefixes should return return objects sorted lexicographically" in {
149 | s3.createBucket("list7")
150 | s3.putObject("list7", "dev/10/2017/03/13/00/_SUCCESS", "xxx")
151 | s3.putObject("list7", "dev/10/2017/03/13/01/_SUCCESS", "xxx")
152 | s3.putObject("list7", "dev/20/2017/03/13/00/_SUCCESS", "xxx")
153 | s3.putObject("list7", "dev/20/2017/03/13/01/_SUCCESS", "xxx")
154 | s3.putObject("list7", "dev/30/2017/03/13/00/_SUCCESS", "xxx")
155 | s3.putObject("list7", "dev/30/2017/03/13/01/_SUCCESS", "xxx")
156 | s3.putObject("list7", "dev/40/2017/03/13/00/_SUCCESS", "xxx")
157 | s3.putObject("list7", "dev/40/2017/03/13/01/_SUCCESS", "xxx")
158 | s3.putObject("list7", "dev/50/2017/03/13/00/_SUCCESS", "xxx")
159 | s3.putObject("list7", "dev/50/2017/03/13/01/_SUCCESS", "xxx")
160 | val req2 = new ListObjectsRequest()
161 | req2.setBucketName("list7")
162 | req2.setDelimiter("/")
163 | req2.setPrefix("dev/")
164 | val list2 = s3.listObjects(req2)
165 | val summaries2 = list2.getObjectSummaries.map(_.getKey).toList
166 | list2.getCommonPrefixes.asScala.toList shouldBe List("dev/10/", "dev/20/", "dev/30/", "dev/40/", "dev/50/")
167 | summaries2 shouldBe Nil
168 | }
169 |
170 | it should "obey delimiters && prefixes when prefix equals to files name" in {
171 | s3.createBucket("list8")
172 | s3.putObject("list8", "dev/someEvent/2017/03/13/00/_SUCCESS", "xxx")
173 | val req2 = new ListObjectsRequest()
174 | req2.setBucketName("list8")
175 | req2.setDelimiter("/")
176 | req2.setPrefix("dev/someEvent/2017/03/13/00/_SUCCESS")
177 | val list2 = s3.listObjects(req2)
178 | list2.getObjectSummaries.size shouldEqual 1
179 | list2.getObjectSummaries.head.getKey shouldEqual "dev/someEvent/2017/03/13/00/_SUCCESS"
180 | }
181 |
182 | it should "obey withMaxKeys" in {
183 | s3.createBucket("list7k")
184 | s3.putObject("list7k", "b", "xx")
185 | s3.putObject("list7k", "a", "xx")
186 | s3.putObject("list7k", "c", "xx")
187 | val request = new ListObjectsV2Request().withBucketName("list7k").withMaxKeys(2)
188 | val list = s3.listObjectsV2(request)
189 | list.getObjectSummaries.asScala.map(_.getKey).toList shouldBe List("a", "b")
190 | list.isTruncated shouldBe true
191 | }
192 |
193 | it should "have correct etags" in {
194 | s3.createBucket("list9")
195 | s3.putObject("list9", "foo1", "xxx")
196 | s3.putObject("list9", "foo2", "yyy")
197 | val list = s3.listObjects("list9", "foo").getObjectSummaries.asScala.toList
198 | list.find(_.getKey == "foo1").map(_.getETag) shouldBe Some("f561aaf6ef0bf14d4208bb46a4ccb3ad")
199 | }
200 |
201 | it should "set correct last-modified header" in {
202 | s3.createBucket("list10")
203 | s3.putObject("list10", "foo", "xxx")
204 | val list = s3.listObjects("list10").getObjectSummaries.asScala.toList
205 | list.find(_.getKey == "foo").map(_.getLastModified.after(DateTime.now().minusMinutes(1).toDate)) shouldBe Some(true)
206 | }
207 | }
208 | }
209 |
--------------------------------------------------------------------------------