├── .gitignore
├── .travis.yml
├── CHANGELOG.md
├── LICENCE.md
├── README.md
├── build.sbt
├── project
├── build.properties
└── plugins.sbt
└── src
├── main
├── resources
│ └── application.conf
└── scala
│ └── io
│ └── findify
│ └── s3mock
│ ├── Main.scala
│ ├── S3ChunkedProtocolStage.scala
│ ├── S3Mock.scala
│ ├── error
│ ├── InternalErrorException.scala
│ ├── NoSuchBucketException.scala
│ └── NoSuchKeyException.scala
│ ├── provider
│ ├── FileProvider.scala
│ ├── InMemoryProvider.scala
│ ├── Provider.scala
│ └── metadata
│ │ ├── InMemoryMetadataStore.scala
│ │ ├── MapMetadataStore.scala
│ │ └── MetadataStore.scala
│ ├── request
│ ├── CompleteMultipartUploadPart.scala
│ ├── CreateBucketConfiguration.scala
│ └── DeleteObjectsRequest.scala
│ ├── response
│ ├── CompleteMultipartUploadResult.scala
│ ├── CopyObjectResult.scala
│ ├── CreateBucket.scala
│ ├── DeleteObjectsResponse.scala
│ ├── InitiateMultipartUploadResult.scala
│ ├── ListAllMyBuckets.scala
│ └── ListBucket.scala
│ └── route
│ ├── CopyObject.scala
│ ├── CopyObjectMultipart.scala
│ ├── CreateBucket.scala
│ ├── DeleteBucket.scala
│ ├── DeleteObject.scala
│ ├── DeleteObjects.scala
│ ├── GetObject.scala
│ ├── ListBucket.scala
│ ├── ListBuckets.scala
│ ├── MetadataUtil.scala
│ ├── PutObject.scala
│ ├── PutObjectMultipart.scala
│ ├── PutObjectMultipartComplete.scala
│ └── PutObjectMultipartStart.scala
└── test
├── java
└── io
│ └── findify
│ └── s3mock
│ └── example
│ ├── JavaBuilderExample.java
│ └── JavaExample.java
├── resources
├── logback-test.xml
├── reference.conf
└── test.conf
├── scala-2.11
└── scala
│ └── collection
│ └── parallel
│ └── CollectionConverters.scala
├── scala-2.12
└── scala
│ └── collection
│ └── parallel
│ └── CollectionConverters.scala
└── scala
└── io
└── findify
└── s3mock
├── ChunkBufferTest.scala
├── CopyObjectTest.scala
├── CorrectShutdownTest.scala
├── DeleteTest.scala
├── GetPutObjectTest.scala
├── GetPutObjectWithMetadataTest.scala
├── JavaExampleTest.scala
├── ListBucketEmptyWorkdirTest.scala
├── ListBucketTest.scala
├── ListBucketsTest.scala
├── MapMetadataStoreTest.scala
├── MultipartCopyTest.scala
├── MultipartUploadTest.scala
├── PutBucketTest.scala
├── S3ChunkedProtocolTest.scala
├── S3MockTest.scala
├── TypesafeConfigTest.scala
├── alpakka
├── AlpakkaExample.scala
├── GetObjectTest.scala
├── ListBucketTest.scala
└── MultipartUploadTest.scala
├── awscli
├── AWSCliTest.scala
├── GetObjectTest.scala
└── PutBucketTest.scala
└── transfermanager
└── PutGetTest.scala
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | project/project
3 | project/target
4 | target
5 | release.sh
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: scala
2 | scala:
3 | - 2.13.1
4 | jdk:
5 | - oraclejdk11
6 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | 0.2.6
2 | =======
3 | * scala 2.13 support
4 | * java 11 support
5 | * follow symlinks on local disk
6 | * DeleteObjects should encode object key like single-object APIs
7 | * allow = in copy paths
8 | * update readme for alpakka 1.0
9 | * DeleteObjects should encode object key like single-object APIs
10 |
11 | 0.2.5
12 | =======
13 | * multipart copy support
14 | * fix issue with % char on multipart alpakka uploads
15 | * add JAXB module on java 9 docker container
16 | * fix OverlappingFileLockException in GetObject ([#83](https://github.com/findify/s3mock/issues/83))
17 | * docker: allow serving a volume-mounted directory
18 | * fix s3mock not correctly shutting down during internal ActorSystem being active ([#67](https://github.com/findify/s3mock/issues/67))
19 | * bump better-files dependency to 3.x ([#17](https://github.com/findify/s3mock/issues/17))
20 | * migrate build to sbt 1.1, use akka-http 10.1.x
21 |
22 | 0.2.4
23 | =======
24 | * pom -> jar dependency type doc fix
25 | * support alpakka multipart uploads
26 | * support alpakka listObjects ([#66](https://github.com/findify/s3mock/issues/66))
27 | * fix bug with etag on FileProvider being alsays "0" ([#70](https://github.com/findify/s3mock/issues/70))
28 | * fix last-modified header always being equal to "1970-01-01 00:00:00"([65](https://github.com/findify/s3mock/issues/70))
29 | * wrong content-type for listObjects ([#60](https://github.com/findify/s3mock/issues/60))
30 | * deleteObjects broken on aws s3 sdk 2.0 ([#71](https://github.com/findify/s3mock/issues/60))
31 | * docker image for non-jvm tests
32 |
33 | 0.2.3
34 | =======
35 | * windows compatibility in FileProvider ([#28](https://github.com/findify/s3mock/issues/28))
36 | * Max Keys not respected when calling list objects (V2) ([#47](https://github.com/findify/s3mock/issues/47))
37 | * getETag from getObjectMetadata returns null ([#48](https://github.com/findify/s3mock/issues/48))
38 | * update to akka 2.5.2, akka-http 10.0.7
39 | * fix concurrent requests causing weird locking issues on FileProvider ([#52](https://github.com/findify/s3mock/issues/52))
40 | * fix warnings in GetObject about incorrent headers ([#54](https://github.com/findify/s3mock/issues/54))
41 |
42 | 0.2.2
43 | =======
44 | * More convenient and traditional Java API with Builder-style instance creation
45 | * Docs update for alpakka usage
46 | * Javadocs for all public API methods
47 | * use latest aws-java-sdk-s3 library
48 |
49 | 0.2.1
50 | =======
51 | * Bump akka to 2.5.1
52 | * fix issue when DeleteObjects response was malformed for multi-object deletes
53 | * alpakka support test case
54 | * fix subpath get/delete issues [#45](https://github.com/findify/s3mock/issues/45)
55 |
56 | 0.2.0
57 | =======
58 | * Support for ranged get requests ([#39](https://github.com/findify/s3mock/pull/39))
59 | * In-memory backend ([#37](https://github.com/findify/s3mock/pull/37))
60 | * Bugfix: ObjectListing#getCommonPrefixes order is not alphabetical ([#41](https://github.com/findify/s3mock/issues/41))
61 | * Akka 2.5.0 support
--------------------------------------------------------------------------------
/LICENCE.md:
--------------------------------------------------------------------------------
1 | # The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Findify AB
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # S3 mock library for Java/Scala
2 |
3 | [](https://travis-ci.org/findify/s3mock)
4 | [](https://maven-badges.herokuapp.com/maven-central/io.findify/s3mock_2.12)
5 |
6 | s3mock is a web service implementing AWS S3 API, which can be used for local testing of your code using S3
7 | but without hitting real S3 endpoints.
8 |
9 | Implemented API methods:
10 | * list buckets
11 | * list objects (all & by prefix)
12 | * create bucket
13 | * delete bucket
14 | * put object (via PUT, POST, multipart and chunked uploads are also supported)
15 | * copy object
16 | * get object
17 | * delete object
18 | * batch delete
19 |
20 | Not supported features (these might be implemented later):
21 | * authentication: s3proxy will accept any credentials without validity and signature checking
22 | * bucket policy, ACL, versioning
23 | * object ACL
24 | * posix-incompatible key structure with file-based provider, for example keys `/some.dir/file.txt` and `/some.dir` in the same bucket
25 |
26 | ## Installation
27 |
28 | s3mock package is available for Scala 2.11/2.12/2.13 (on Java 8/11). To install using SBT, add these
29 | statements to your `build.sbt`:
30 |
31 | libraryDependencies += "io.findify" %% "s3mock" % "0.2.6" % "test",
32 |
33 | On maven, update your `pom.xml` in the following way:
34 | ```xml
35 | // add this entry to
36 |
37 | io.findify
38 | s3mock_2.13
39 | 0.2.6
40 | test
41 |
42 | ```
43 |
44 | S3Mock is also available as a [docker container](https://hub.docker.com/r/findify/s3mock/) for out-of-jvm testing:
45 | ```bash
46 | docker run -p 8001:8001 findify/s3mock:latest
47 | ```
48 |
49 | To mount a directory containing the prepared content, mount the volume and set the `S3MOCK_DATA_DIR` environment variable:
50 | ```bash
51 | docker run -p 8001:8001 -v /host/path/to/s3mock/:/tmp/s3mock/ -e "S3MOCK_DATA_DIR=/tmp/s3mock" findify/s3mock:latest
52 | ```
53 |
54 | ## Usage
55 |
56 | Just point your s3 client to a localhost, enable path-style access, and it should work out of the box.
57 |
58 | There are two working modes for s3mock:
59 | * File-based: it will map a local directory as a collection of s3 buckets. This mode can be useful when you need to have a bucket with some pre-loaded data (and too lazy to re-upload everything on each run).
60 | * In-memory: keep everything in RAM. All the data you've uploaded to s3mock will be wiped completely on shutdown.
61 |
62 | Java:
63 | ```java
64 | import com.amazonaws.auth.AWSStaticCredentialsProvider;
65 | import com.amazonaws.auth.AnonymousAWSCredentials;
66 | import com.amazonaws.client.builder.AwsClientBuilder;
67 | import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
68 | import com.amazonaws.services.s3.AmazonS3;
69 | import com.amazonaws.services.s3.AmazonS3Builder;
70 | import com.amazonaws.services.s3.AmazonS3Client;
71 | import com.amazonaws.services.s3.AmazonS3ClientBuilder;
72 | import io.findify.s3mock.S3Mock;
73 |
74 | /*
75 | S3Mock.create(8001, "/tmp/s3");
76 | */
77 | S3Mock api = new S3Mock.Builder().withPort(8001).withInMemoryBackend().build();
78 | api.start();
79 |
80 | /* AWS S3 client setup.
81 | * withPathStyleAccessEnabled(true) trick is required to overcome S3 default
82 | * DNS-based bucket access scheme
83 | * resulting in attempts to connect to addresses like "bucketname.localhost"
84 | * which requires specific DNS setup.
85 | */
86 | EndpointConfiguration endpoint = new EndpointConfiguration("http://localhost:8001", "us-west-2");
87 | AmazonS3Client client = AmazonS3ClientBuilder
88 | .standard()
89 | .withPathStyleAccessEnabled(true)
90 | .withEndpointConfiguration(endpoint)
91 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
92 | .build();
93 |
94 | client.createBucket("testbucket");
95 | client.putObject("testbucket", "file/name", "contents");
96 | api.shutdown(); // kills the underlying actor system. Use api.stop() to just unbind the port.
97 | ```
98 |
99 | Scala with AWS S3 SDK:
100 | ```scala
101 | import com.amazonaws.auth.AWSStaticCredentialsProvider
102 | import com.amazonaws.auth.AnonymousAWSCredentials
103 | import com.amazonaws.client.builder.AwsClientBuilder
104 | import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
105 | import com.amazonaws.services.s3.AmazonS3
106 | import com.amazonaws.services.s3.AmazonS3Builder
107 | import com.amazonaws.services.s3.AmazonS3Client
108 | import com.amazonaws.services.s3.AmazonS3ClientBuilder
109 | import io.findify.s3mock.S3Mock
110 |
111 |
112 | /** Create and start S3 API mock. */
113 | val api = S3Mock(port = 8001, dir = "/tmp/s3")
114 | api.start
115 |
116 | /* AWS S3 client setup.
117 | * withPathStyleAccessEnabled(true) trick is required to overcome S3 default
118 | * DNS-based bucket access scheme
119 | * resulting in attempts to connect to addresses like "bucketname.localhost"
120 | * which requires specific DNS setup.
121 | */
122 | val endpoint = new EndpointConfiguration("http://localhost:8001", "us-west-2")
123 | val client = AmazonS3ClientBuilder
124 | .standard
125 | .withPathStyleAccessEnabled(true)
126 | .withEndpointConfiguration(endpoint)
127 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
128 | .build
129 |
130 | /** Use it as usual. */
131 | client.createBucket("foo")
132 | client.putObject("foo", "bar", "baz")
133 | api.shutdown() // this one terminates the actor system. Use api.stop() to just unbind the service without messing with the ActorSystem
134 | ```
135 |
136 | Scala with Alpakka 1.0.0:
137 | ```scala
138 | import akka.actor.ActorSystem
139 | import akka.stream.ActorMaterializer
140 | import akka.stream.alpakka.s3.scaladsl.S3Client
141 | import akka.stream.scaladsl.Sink
142 | import com.typesafe.config.ConfigFactory
143 | import scala.collection.JavaConverters._
144 |
145 | val config = ConfigFactory.parseMap(Map(
146 | "alpakka.s3.proxy.host" -> "localhost",
147 | "alpakka.s3.proxy.port" -> 8001,
148 | "alpakka.s3.proxy.secure" -> false,
149 | "alpakka.s3.path-style-access" -> true,
150 | "alpakka.s3.aws.credentials.provider" -> "static",
151 | "alpakka.s3.aws.credentials.access-key-id" -> "foo",
152 | "alpakka.s3.aws.credentials.secret-access-key" -> "bar",
153 | "alpakka.s3.aws.region.provider" -> "static",
154 | "alpakka.s3.aws.region.default-region" -> "us-east-1"
155 | ).asJava)
156 | implicit val system = ActorSystem.create("test", config)
157 | implicit val mat = ActorMaterializer()
158 | import system.dispatcher
159 | val s3a = S3Client()
160 | val contents = s3a.download("bucket", "key")._1.runWith(Sink.reduce[ByteString](_ ++ _)).map(_.utf8String)
161 |
162 | ```
163 |
164 | ## License
165 |
166 | The MIT License (MIT)
167 |
168 | Copyright (c) 2016 Findify AB
169 |
170 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
171 |
172 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
173 |
174 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
175 |
--------------------------------------------------------------------------------
/build.sbt:
--------------------------------------------------------------------------------
1 | name := "s3mock"
2 |
3 | version := "0.2.6"
4 |
5 | organization := "io.findify"
6 |
7 | scalaVersion in ThisBuild := "2.13.2"
8 |
9 | crossScalaVersions in ThisBuild := Seq("2.11.12", "2.12.10","2.13.2")
10 |
11 | val akkaVersion = "2.5.31"
12 |
13 | licenses := Seq("MIT" -> url("https://opensource.org/licenses/MIT"))
14 |
15 | homepage := Some(url("https://github.com/findify/s3mock"))
16 |
17 | libraryDependencies ++= Seq(
18 | "com.typesafe.akka" %% "akka-stream" % akkaVersion,
19 | "com.typesafe.akka" %% "akka-http" % "10.1.12",
20 | "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion % "test",
21 | "org.scala-lang.modules" %% "scala-xml" % "1.3.0",
22 | "org.scala-lang.modules" %% "scala-collection-compat" % "2.1.6",
23 | "com.github.pathikrit" %% "better-files" % "3.9.1",
24 | "com.typesafe.scala-logging" %% "scala-logging" % "3.9.2",
25 | "com.amazonaws" % "aws-java-sdk-s3" % "1.11.294",
26 | "org.scalatest" %% "scalatest" % "3.0.8" % "test",
27 | "ch.qos.logback" % "logback-classic" % "1.2.3" % "test",
28 | "org.iq80.leveldb" % "leveldb" % "0.12",
29 | "com.lightbend.akka" %% "akka-stream-alpakka-s3" % "1.1.2" % "test",
30 | "javax.xml.bind" % "jaxb-api" % "2.3.0",
31 | "com.sun.xml.bind" % "jaxb-core" % "2.3.0",
32 | "com.sun.xml.bind" % "jaxb-impl" % "2.3.0"
33 | )
34 |
35 | libraryDependencies ++= {
36 | CrossVersion.partialVersion(scalaVersion.value) match {
37 | case Some((2, major)) if major >= 13 =>
38 | Seq("org.scala-lang.modules" %% "scala-parallel-collections" % "0.2.0" % "test")
39 | case _ =>
40 | Seq()
41 | }
42 | }
43 |
44 | parallelExecution in Test := false
45 |
46 | publishMavenStyle := true
47 |
48 | publishTo := {
49 | val nexus = "https://oss.sonatype.org/"
50 | if (isSnapshot.value)
51 | Some("snapshots" at nexus + "content/repositories/snapshots")
52 | else
53 | Some("releases" at nexus + "service/local/staging/deploy/maven2")
54 | }
55 |
56 | pomExtra := (
57 |
58 | git@github.com:findify/s3mock.git
59 | scm:git:git@github.com:findify/s3mock.git
60 |
61 |
62 |
63 | romangrebennikov
64 | Roman Grebennikov
65 | http://www.dfdx.me
66 |
67 | )
68 |
69 | enablePlugins(DockerPlugin)
70 | assemblyJarName in assembly := "s3mock.jar"
71 | mainClass in assembly := Some("io.findify.s3mock.Main")
72 | test in assembly := {}
73 |
74 | dockerfile in docker := new Dockerfile {
75 | from("adoptopenjdk/openjdk11:jre-11.0.7_10-debian")
76 | expose(8001)
77 | add(assembly.value, "/app/s3mock.jar")
78 | entryPoint(
79 | "java",
80 | "-Xmx128m",
81 | "-jar",
82 | "--add-opens",
83 | "java.base/jdk.internal.ref=ALL-UNNAMED",
84 | "/app/s3mock.jar"
85 | )
86 | }
87 | imageNames in docker := Seq(
88 | ImageName(s"findify/s3mock:${version.value.replaceAll("\\+", "_")}"),
89 | ImageName(s"findify/s3mock:latest")
90 | )
91 |
92 | publishTo := sonatypePublishToBundle.value
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version = 1.3.10
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | logLevel := Level.Warn
2 |
3 | addSbtPlugin("com.timushev.sbt" % "sbt-updates" % "0.5.1")
4 | addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2")
5 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "3.9.2")
6 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "2.0.1")
7 | addSbtPlugin("com.github.gseitz" % "sbt-release" % "1.0.13")
8 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.10")
9 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0")
10 |
--------------------------------------------------------------------------------
/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 | akka.http.parsing.illegal-header-warnings = off
2 |
3 | akka.http.server.parsing.max-content-length = 512 M
4 | akka.http.client.parsing.max-content-length = 512 M
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/Main.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import better.files.File
4 | import io.findify.s3mock.provider.FileProvider
5 |
6 | /**
7 | * Created by shutty on 8/9/16.
8 | */
9 | object Main {
10 | def main(args: Array[String]): Unit = {
11 | val server = new S3Mock(8001, new FileProvider(sys.env.getOrElse("S3MOCK_DATA_DIR", File.newTemporaryDirectory(prefix = "s3mock").pathAsString)))
12 | server.start
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/S3ChunkedProtocolStage.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.stream._
4 | import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
5 | import akka.util.ByteString
6 | import com.typesafe.scalalogging.LazyLogging
7 |
8 | /**
9 | * Created by shutty on 8/11/16.
10 | */
11 | case class Header(chunkSize:Int, headerSize:Int, sig:String)
12 |
13 | class ChunkBuffer extends LazyLogging {
14 | val hexChars = "0123456789abcdef".getBytes.toSet
15 | var size = -1
16 | var buffer = ByteString("")
17 | def addChunk(data:ByteString) = buffer = buffer ++ data
18 | def readHeader:Option[Header] = {
19 | val headerBuffer = buffer.take(90)
20 | val size = headerBuffer.takeWhile(hexChars.contains)
21 | val sig = headerBuffer.drop(size.length).take(83)
22 | if ((size.length <= 8) && (sig.length == 83) && sig.startsWith(";chunk-signature=") && sig.endsWith("\r\n")) {
23 | val header = Header(Integer.parseInt(size.utf8String, 16), size.length + 83, sig.drop(17).dropRight(2).utf8String)
24 | logger.debug(s"read header: $header")
25 | Some(header)
26 | } else {
27 | logger.debug("cannot read header")
28 | None
29 | }
30 | }
31 | def pullChunk(header:Header):Option[ByteString] = {
32 | if (buffer.length >= header.headerSize + header.chunkSize + 2) {
33 | buffer = buffer.drop(header.headerSize)
34 | val chunk = buffer.take(header.chunkSize)
35 | buffer = buffer.drop(header.chunkSize + 2)
36 | logger.debug(s"pulled chunk, size=${header.chunkSize}")
37 | Some(chunk)
38 | } else {
39 | logger.debug(s"not enough data to pull chunk: chunkSize = ${header.chunkSize}, bufferSize = ${buffer.length}")
40 | None
41 | }
42 | }
43 | }
44 |
45 | class S3ChunkedProtocolStage extends GraphStage[FlowShape[ByteString,ByteString]] {
46 | val out = Outlet[ByteString]("s3.out")
47 | val in = Inlet[ByteString]("s3.in")
48 | override val shape = FlowShape(in, out)
49 |
50 | override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) {
51 | val buffer = new ChunkBuffer()
52 |
53 | setHandler(in, new InHandler {
54 | override def onPush() = {
55 | buffer.addChunk(grab(in))
56 | buffer.readHeader match {
57 | case Some(header) => buffer.pullChunk(header) match {
58 | case Some(chunk) => push(out, chunk)
59 | case None => pull(in)
60 | }
61 | case None => pull(in)
62 | }
63 | }
64 |
65 | override def onUpstreamFinish() = {
66 | buffer.readHeader match {
67 | case Some(header) => buffer.pullChunk(header) match {
68 | case Some(chunk) =>
69 | push(out, chunk)
70 | complete(out)
71 | case None =>
72 | complete(out)
73 | }
74 | case None =>
75 | complete(out)
76 | }
77 | }
78 | })
79 | setHandler(out, new OutHandler {
80 | override def onPull() = {
81 | pull(in)
82 | }
83 | })
84 | }
85 |
86 | }
87 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/S3Mock.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.actor.ActorSystem
4 | import akka.http.scaladsl.Http
5 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
6 | import akka.http.scaladsl.server.Directives._
7 | import akka.stream.ActorMaterializer
8 | import com.typesafe.scalalogging.LazyLogging
9 | import io.findify.s3mock.provider.{FileProvider, InMemoryProvider, Provider}
10 | import io.findify.s3mock.route._
11 |
12 | import scala.concurrent.{Await, Future}
13 | import scala.concurrent.duration.Duration
14 |
15 | /**
16 | * Create s3mock instance, the hard mode.
17 | * @param port port to bind to
18 | * @param provider backend to use. There are currently two of them implemented, FileProvider and InMemoryProvider
19 | * @param system actor system to use. By default, create an own one.
20 | */
21 | class S3Mock(port:Int, provider:Provider)(implicit system:ActorSystem = ActorSystem.create("s3mock")) extends LazyLogging {
22 | implicit val p = provider
23 | private var bind:Http.ServerBinding = _
24 |
25 | def start = {
26 | implicit val mat = ActorMaterializer()
27 | val http = Http(system)
28 | val route =
29 | pathPrefix(Segment) { bucket =>
30 | pathSingleSlash {
31 | concat(
32 | ListBucket().route(bucket),
33 | CreateBucket().route(bucket),
34 | DeleteBucket().route(bucket),
35 | DeleteObjects().route(bucket)
36 | )
37 | } ~ pathEnd {
38 | concat(
39 | ListBucket().route(bucket),
40 | CreateBucket().route(bucket),
41 | DeleteBucket().route(bucket),
42 | DeleteObjects().route(bucket)
43 | )
44 | } ~ parameterMap { params =>
45 | path(RemainingPath) { key =>
46 | concat(
47 | GetObject().route(bucket, key.toString(), params),
48 | CopyObjectMultipart().route(bucket, key.toString()),
49 | CopyObject().route(bucket, key.toString()),
50 | PutObjectMultipart().route(bucket, key.toString()),
51 | PutObjectMultipartStart().route(bucket, key.toString()),
52 | PutObjectMultipartComplete().route(bucket, key.toString()),
53 | PutObject().route(bucket, key.toString()),
54 | DeleteObject().route(bucket, key.toString())
55 | )
56 | }
57 | }
58 | } ~ ListBuckets().route() ~ extractRequest { request =>
59 | complete {
60 | logger.error(s"method not implemented: ${request.method.value} ${request.uri.toString}")
61 | HttpResponse(status = StatusCodes.NotImplemented)
62 | }
63 | }
64 |
65 | bind = Await.result(http.bindAndHandle(route, "0.0.0.0", port), Duration.Inf)
66 | logger.info(s"bound to 0.0.0.0:$port")
67 | bind
68 | }
69 |
70 | /**
71 | * Stop s3mock instance. For file-based working mode, it will not clean the mounted folder.
72 | * This one is also not shutting down the underlying ActorSystem
73 | */
74 | def stop: Unit = Await.result(bind.unbind(), Duration.Inf)
75 | /**
76 | * Stop s3mock instance and shutdown the underlying ActorSystem.
77 | */
78 | def shutdown: Unit = {
79 | import system.dispatcher
80 | val stopped = for {
81 | _ <- bind.unbind()
82 | _ <- Http().shutdownAllConnectionPools()
83 | _ <- system.terminate()
84 | } yield {
85 | ()
86 | }
87 | Await.result(stopped, Duration.Inf)
88 | }
89 | }
90 |
91 | object S3Mock {
92 | def apply(port: Int): S3Mock = new S3Mock(port, new InMemoryProvider)
93 | def apply(port:Int, dir:String) = new S3Mock(port, new FileProvider(dir))
94 |
95 | /**
96 | * Create an in-memory s3mock instance
97 | * @param port a port to bind to.
98 | * @return s3mock instance
99 | */
100 | def create(port:Int) = apply(port) // Java API
101 | /**
102 | * Create a file-based s3mock instance
103 | * @param port port to bind to
104 | * @param dir directory to mount as a collection of buckets. First-level directories will be treated as buckets, their contents - as keys.
105 | * @return
106 | */
107 | def create(port:Int, dir:String) = apply(port, dir) // Java API
108 | /**
109 | * Builder class for java api.
110 | */
111 | class Builder {
112 | private var defaultPort: Int = 8001
113 | private var defaultProvider: Provider = new InMemoryProvider()
114 |
115 | /**
116 | * Set port to bind to
117 | * @param port port number
118 | * @return
119 | */
120 | def withPort(port: Int): Builder = {
121 | defaultPort = port
122 | this
123 | }
124 |
125 | /**
126 | * Use in-memory backend.
127 | * @return
128 | */
129 | def withInMemoryBackend(): Builder = {
130 | defaultProvider = new InMemoryProvider()
131 | this
132 | }
133 |
134 | /**
135 | * Use file-based backend
136 | * @param path Directory to mount
137 | * @return
138 | */
139 | def withFileBackend(path: String): Builder = {
140 | defaultProvider = new FileProvider(path)
141 | this
142 | }
143 |
144 | /**
145 | * Build s3mock instance
146 | * @return
147 | */
148 | def build(): S3Mock = {
149 | new S3Mock(defaultPort, defaultProvider)
150 | }
151 | }
152 | }
153 |
154 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/error/InternalErrorException.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.error
2 |
3 |
4 | case class InternalErrorException(throwable: Throwable) extends Exception(s"Internal server error", throwable) {
5 | def toXML =
6 |
7 | InternalError
8 | {throwable.getMessage}
9 |
10 | }
11 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/error/NoSuchBucketException.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.error
2 |
3 | /**
4 | * Created by shutty on 8/11/16.
5 | */
6 | case class NoSuchBucketException(bucket:String) extends Exception(s"bucket does not exist: s3://$bucket") {
7 | def toXML =
8 | NoSuchBucket
9 | The specified bucket does not exist
10 | {bucket}
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/error/NoSuchKeyException.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.error
2 |
3 | /**
4 | * Created by shutty on 8/11/16.
5 | */
6 | case class NoSuchKeyException(bucket:String, key:String) extends Exception(s"key does not exist: s3://$bucket/$key") {
7 | def toXML =
8 |
9 | NoSuchKey
10 | The resource you requested does not exist
11 | /{bucket}/{key}
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/FileProvider.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider
2 | import java.util.UUID
3 | import java.io.{FileInputStream, File => JFile}
4 |
5 | import akka.http.scaladsl.model.DateTime
6 | import better.files.File
7 | import better.files.File.OpenOptions
8 | import com.amazonaws.services.s3.model.ObjectMetadata
9 | import com.typesafe.scalalogging.LazyLogging
10 | import io.findify.s3mock.error.{NoSuchBucketException, NoSuchKeyException}
11 | import io.findify.s3mock.provider.metadata.{MapMetadataStore, MetadataStore}
12 | import io.findify.s3mock.request.{CompleteMultipartUpload, CreateBucketConfiguration}
13 | import io.findify.s3mock.response._
14 | import org.apache.commons.codec.digest.DigestUtils
15 |
16 | import scala.util.Random
17 |
18 | /**
19 | * Created by shutty on 8/9/16.
20 | */
21 | class FileProvider(dir:String) extends Provider with LazyLogging {
22 | val workDir = File(dir)
23 | if (!workDir.exists) workDir.createDirectories()
24 |
25 | private val meta = new MapMetadataStore(dir)
26 |
27 | override def metadataStore: MetadataStore = meta
28 |
29 | override def listBuckets: ListAllMyBuckets = {
30 | val buckets = File(dir).list.map(f => Bucket(fromOs(f.name), DateTime(f.lastModifiedTime.toEpochMilli))).toList
31 | logger.debug(s"listing buckets: ${buckets.map(_.name)}")
32 | ListAllMyBuckets("root", UUID.randomUUID().toString, buckets)
33 | }
34 |
35 | override def listBucket(bucket: String, prefix: Option[String], delimiter: Option[String], maxkeys: Option[Int]) = {
36 | def commonPrefix(dir: String, p: String, d: String): Option[String] = {
37 | dir.indexOf(d, p.length) match {
38 | case -1 => None
39 | case pos => Some(p + dir.substring(p.length, pos) + d)
40 | }
41 | }
42 | val prefixNoLeadingSlash = prefix.getOrElse("").dropWhile(_ == '/')
43 | val bucketFile = File(s"$dir/$bucket/")
44 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
45 | val bucketFileString = fromOs(bucketFile.toString)
46 | val bucketFiles = bucketFile.listRecursively(File.VisitOptions.follow).filter(f => {
47 | val fString = fromOs(f.toString).drop(bucketFileString.length).dropWhile(_ == '/')
48 | fString.startsWith(prefixNoLeadingSlash) && !f.isDirectory
49 | })
50 | val files = bucketFiles.map(f => {
51 | val stream = new FileInputStream(f.toJava)
52 | try {
53 | val md5 = DigestUtils.md5Hex(stream)
54 | Content(fromOs(f.toString).drop(bucketFileString.length+1).dropWhile(_ == '/'), DateTime(f.lastModifiedTime.toEpochMilli), md5, f.size, "STANDARD")
55 | } finally {
56 | stream.close()
57 | }
58 | }).toList
59 | logger.debug(s"listing bucket contents: ${files.map(_.key)}")
60 | val commonPrefixes = normalizeDelimiter(delimiter) match {
61 | case Some(del) => files.flatMap(f => commonPrefix(f.key, prefixNoLeadingSlash, del)).distinct.sorted
62 | case None => Nil
63 | }
64 | val filteredFiles = files.filterNot(f => commonPrefixes.exists(p => f.key.startsWith(p)))
65 | val count = maxkeys.getOrElse(Int.MaxValue)
66 | val result = filteredFiles.sortBy(_.key)
67 | ListBucket(bucket, prefix, delimiter, commonPrefixes, result.take(count), isTruncated = result.size>count)
68 | }
69 |
70 | override def createBucket(name:String, bucketConfig:CreateBucketConfiguration) = {
71 | val bucket = File(s"$dir/$name")
72 | if (!bucket.exists) bucket.createDirectory()
73 | logger.debug(s"creating bucket $name")
74 | CreateBucket(name)
75 | }
76 | override def putObject(bucket:String, key:String, data:Array[Byte], objectMetadata: ObjectMetadata): Unit = {
77 | val bucketFile = File(s"$dir/$bucket")
78 | val file = File(s"$dir/$bucket/$key")
79 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
80 | file.createIfNotExists(createParents = true)
81 | logger.debug(s"writing file for s3://$bucket/$key to $dir/$bucket/$key, bytes = ${data.length}")
82 | file.writeByteArray(data)(OpenOptions.default)
83 | objectMetadata.setLastModified(org.joda.time.DateTime.now().toDate)
84 | metadataStore.put(bucket, key, objectMetadata)
85 | }
86 | override def getObject(bucket:String, key:String): GetObjectData = {
87 | val bucketFile = File(s"$dir/$bucket")
88 | val file = File(s"$dir/$bucket/$key")
89 | logger.debug(s"reading object for s3://$bucket/$key")
90 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
91 | if (!file.exists) throw NoSuchKeyException(bucket, key)
92 | if (file.isDirectory) throw NoSuchKeyException(bucket, key)
93 | val meta = metadataStore.get(bucket, key)
94 | GetObjectData(file.byteArray, meta)
95 | }
96 |
97 | override def putObjectMultipartStart(bucket:String, key:String, metadata: ObjectMetadata):InitiateMultipartUploadResult = {
98 | val id = Math.abs(Random.nextLong()).toString
99 | val bucketFile = File(s"$dir/$bucket")
100 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
101 | File(s"$dir/.mp/$bucket/$key/$id/.keep").createIfNotExists(createParents = true)
102 | metadataStore.put(bucket, key, metadata)
103 | logger.debug(s"starting multipart upload for s3://$bucket/$key")
104 | InitiateMultipartUploadResult(bucket, key, id)
105 | }
106 | override def putObjectMultipartPart(bucket:String, key:String, partNumber:Int, uploadId:String, data:Array[Byte]) = {
107 | val bucketFile = File(s"$dir/$bucket")
108 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
109 | val file = File(s"$dir/.mp/$bucket/$key/$uploadId/$partNumber")
110 | logger.debug(s"uploading multipart chunk $partNumber for s3://$bucket/$key")
111 | file.writeByteArray(data)(OpenOptions.default)
112 | }
113 |
114 | override def putObjectMultipartComplete(bucket:String, key:String, uploadId:String, request:CompleteMultipartUpload): CompleteMultipartUploadResult = {
115 | val bucketFile = File(s"$dir/$bucket")
116 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
117 | val files = request.parts.map(part => File(s"$dir/.mp/$bucket/$key/$uploadId/${part.partNumber}"))
118 | val parts = files.map(f => f.byteArray)
119 | val file = File(s"$dir/$bucket/$key")
120 | file.createIfNotExists(createParents = true)
121 | val data = parts.fold(Array[Byte]())(_ ++ _)
122 | file.writeBytes(data.toIterator)
123 | File(s"$dir/.mp/$bucket/$key").delete()
124 | val hash = file.md5
125 | metadataStore.get(bucket, key).foreach {m =>
126 | m.setContentMD5(hash)
127 | m.setLastModified(org.joda.time.DateTime.now().toDate)
128 | }
129 | logger.debug(s"completed multipart upload for s3://$bucket/$key")
130 | CompleteMultipartUploadResult(bucket, key, hash)
131 | }
132 |
133 | override def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = {
134 | val sourceBucketFile = File(s"$dir/$sourceBucket")
135 | val destBucketFile = File(s"$dir/$destBucket")
136 | if (!sourceBucketFile.exists) throw NoSuchBucketException(sourceBucket)
137 | if (!destBucketFile.exists) throw NoSuchBucketException(destBucket)
138 | val sourceFile = File(s"$dir/$sourceBucket/$sourceKey")
139 | val destFile = File(s"$dir/$destBucket/$destKey")
140 | destFile.createIfNotExists(createParents = true)
141 | sourceFile.copyTo(destFile, overwrite = true)
142 | logger.debug(s"Copied s3://$sourceBucket/$sourceKey to s3://$destBucket/$destKey")
143 | val sourceMeta = newMeta.orElse(metadataStore.get(sourceBucket, sourceKey))
144 | sourceMeta.foreach(meta => metadataStore.put(destBucket, destKey, meta))
145 | CopyObjectResult(DateTime(sourceFile.lastModifiedTime.toEpochMilli), destFile.md5)
146 | }
147 |
148 |
149 | override def copyObjectMultipart(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, part: Int, uploadId:String, fromByte: Int, toByte: Int, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = {
150 | val data = getObject(sourceBucket, sourceKey).bytes.slice(fromByte, toByte + 1)
151 | putObjectMultipartPart(destBucket, destKey, part, uploadId, data)
152 | new CopyObjectResult(DateTime.now, DigestUtils.md5Hex(data))
153 | }
154 |
155 | override def deleteObject(bucket:String, key:String): Unit = {
156 | val file = File(s"$dir/$bucket/$key")
157 | logger.debug(s"deleting object s://$bucket/$key")
158 | if (!file.exists) throw NoSuchKeyException(bucket, key)
159 | if (!file.isDirectory) {
160 | file.delete()
161 | metadataStore.delete(bucket, key)
162 | }
163 | }
164 |
165 | override def deleteBucket(bucket:String): Unit = {
166 | val bucketFile = File(s"$dir/$bucket")
167 | logger.debug(s"deleting bucket s://$bucket")
168 | if (!bucketFile.exists) throw NoSuchBucketException(bucket)
169 | bucketFile.delete()
170 | metadataStore.remove(bucket)
171 | }
172 |
173 | /** Replace the os separator with a '/' */
174 | private def fromOs(path: String): String = {
175 | path.replace(JFile.separatorChar, '/')
176 | }
177 |
178 | }
179 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/InMemoryProvider.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider
2 |
3 | import java.time.Instant
4 | import java.util.{Date, UUID}
5 |
6 | import akka.http.scaladsl.model.DateTime
7 | import com.amazonaws.services.s3.model.ObjectMetadata
8 | import com.typesafe.scalalogging.LazyLogging
9 | import io.findify.s3mock.error.{NoSuchBucketException, NoSuchKeyException}
10 | import io.findify.s3mock.provider.metadata.{InMemoryMetadataStore, MetadataStore}
11 | import io.findify.s3mock.request.{CompleteMultipartUpload, CreateBucketConfiguration}
12 | import io.findify.s3mock.response._
13 | import org.apache.commons.codec.digest.DigestUtils
14 |
15 | import scala.collection.concurrent.TrieMap
16 | import scala.collection.mutable
17 | import scala.util.Random
18 |
19 | class InMemoryProvider extends Provider with LazyLogging {
20 | private val mdStore = new InMemoryMetadataStore
21 | private val bucketDataStore = new TrieMap[String, BucketContents]
22 | private val multipartTempStore = new TrieMap[String, mutable.SortedSet[MultipartChunk]]
23 |
24 | private case class BucketContents(creationTime: DateTime, keysInBucket: mutable.Map[String, KeyContents])
25 |
26 | private case class KeyContents(lastModificationTime: DateTime, data: Array[Byte])
27 |
28 | private case class MultipartChunk(partNo: Int, data: Array[Byte]) extends Ordered[MultipartChunk] {
29 | override def compare(that: MultipartChunk): Int = partNo compareTo that.partNo
30 | }
31 |
32 | override def metadataStore: MetadataStore = mdStore
33 |
34 | override def listBuckets: ListAllMyBuckets = {
35 | val buckets = bucketDataStore map { case (name, data) => Bucket(name, data.creationTime) }
36 | logger.debug(s"listing buckets: ${buckets.map(_.name)}")
37 | ListAllMyBuckets("root", UUID.randomUUID().toString, buckets.toList)
38 | }
39 |
40 | override def listBucket(bucket: String, prefix: Option[String], delimiter: Option[String], maxkeys: Option[Int]): ListBucket = {
41 | def commonPrefix(dir: String, p: String, d: String): Option[String] = {
42 | dir.indexOf(d, p.length) match {
43 | case -1 => None
44 | case pos => Some(p + dir.substring(p.length, pos) + d)
45 | }
46 | }
47 |
48 | val prefix2 = prefix.getOrElse("")
49 | bucketDataStore.get(bucket) match {
50 | case Some(bucketContent) =>
51 | val matchingKeys = bucketContent.keysInBucket.filterKeys(_.startsWith(prefix2))
52 | val matchResults = matchingKeys map { case (name, content) =>
53 | Content(name, content.lastModificationTime, DigestUtils.md5Hex(content.data), content.data.length, "STANDARD")
54 | }
55 | logger.debug(s"listing bucket contents: ${matchResults.map(_.key)}")
56 | val commonPrefixes = normalizeDelimiter(delimiter) match {
57 | case Some(del) => matchResults.flatMap(f => commonPrefix(f.key, prefix2, del)).toList.sorted.distinct
58 | case None => Nil
59 | }
60 | val filteredFiles: List[Content] = matchResults.filterNot(f => commonPrefixes.exists(p => f.key.startsWith(p))).toList
61 | val count = maxkeys.getOrElse(Int.MaxValue)
62 | val result = filteredFiles.sortBy(_.key)
63 | ListBucket(bucket, prefix, delimiter, commonPrefixes, result.take(count).take(count), isTruncated = result.size>count)
64 | case None => throw NoSuchBucketException(bucket)
65 | }
66 | }
67 |
68 | override def createBucket(name: String, bucketConfig: CreateBucketConfiguration): CreateBucket = {
69 | bucketDataStore.putIfAbsent(name, BucketContents(DateTime.now, new TrieMap))
70 | logger.debug(s"creating bucket $name")
71 | CreateBucket(name)
72 | }
73 |
74 | override def putObject(bucket: String, key: String, data: Array[Byte], objectMetadata: ObjectMetadata): Unit = {
75 | bucketDataStore.get(bucket) match {
76 | case Some(bucketContent) =>
77 | logger.debug(s"putting object for s3://$bucket/$key, bytes = ${data.length}")
78 | bucketContent.keysInBucket.put(key, KeyContents(DateTime.now, data))
79 | objectMetadata.setLastModified(org.joda.time.DateTime.now().toDate)
80 | metadataStore.put(bucket, key, objectMetadata)
81 | case None => throw NoSuchBucketException(bucket)
82 | }
83 | }
84 |
85 | override def copyObjectMultipart(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, part: Int, uploadId:String, fromByte: Int, toByte: Int, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = {
86 | val data = getObject(sourceBucket, sourceKey).bytes.slice(fromByte, toByte + 1)
87 | putObjectMultipartPart(destBucket, destKey, part, uploadId, data)
88 | new CopyObjectResult(DateTime.now, DigestUtils.md5Hex(data))
89 | }
90 |
91 | override def getObject(bucket: String, key: String): GetObjectData = {
92 | bucketDataStore.get(bucket) match {
93 | case Some(bucketContent) => bucketContent.keysInBucket.get(key) match {
94 | case Some(keyContent) =>
95 | logger.debug(s"reading object for s://$bucket/$key")
96 | val meta = metadataStore.get(bucket, key)
97 | GetObjectData(keyContent.data, meta)
98 | case None => throw NoSuchKeyException(bucket, key)
99 | }
100 | case None => throw NoSuchBucketException(bucket)
101 | }
102 | }
103 |
104 | override def putObjectMultipartStart(bucket: String, key: String, metadata: ObjectMetadata): InitiateMultipartUploadResult = {
105 | bucketDataStore.get(bucket) match {
106 | case Some(_) =>
107 | val id = Math.abs(Random.nextLong()).toString
108 | multipartTempStore.putIfAbsent(id, new mutable.TreeSet)
109 | metadataStore.put(bucket, key, metadata)
110 | logger.debug(s"starting multipart upload for s3://$bucket/$key")
111 | InitiateMultipartUploadResult(bucket, key, id)
112 | case None => throw NoSuchBucketException(bucket)
113 | }
114 | }
115 |
116 | override def putObjectMultipartPart(bucket: String, key: String, partNumber: Int, uploadId: String, data: Array[Byte]): Unit = {
117 | bucketDataStore.get(bucket) match {
118 | case Some(_) =>
119 | logger.debug(s"uploading multipart chunk $partNumber for s3://$bucket/$key")
120 | multipartTempStore.getOrElseUpdate(uploadId, new mutable.TreeSet).add(MultipartChunk(partNumber, data))
121 | case None => throw NoSuchBucketException(bucket)
122 | }
123 | }
124 |
125 | override def putObjectMultipartComplete(bucket: String, key: String, uploadId: String, request: CompleteMultipartUpload): CompleteMultipartUploadResult = {
126 | bucketDataStore.get(bucket) match {
127 | case Some(bucketContent) =>
128 | val completeBytes = multipartTempStore(uploadId).toSeq.map(_.data).fold(Array[Byte]())(_ ++ _)
129 | bucketContent.keysInBucket.put(key, KeyContents(DateTime.now, completeBytes))
130 | multipartTempStore.remove(uploadId)
131 | logger.debug(s"completed multipart upload for s3://$bucket/$key")
132 | val hash = DigestUtils.md5Hex(completeBytes)
133 | metadataStore.get(bucket, key).foreach {m =>
134 | m.setContentMD5(hash)
135 | m.setLastModified(org.joda.time.DateTime.now().toDate)
136 | }
137 | CompleteMultipartUploadResult(bucket, key, hash)
138 | case None => throw NoSuchBucketException(bucket)
139 | }
140 | }
141 |
142 | override def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = {
143 | (bucketDataStore.get(sourceBucket), bucketDataStore.get(destBucket)) match {
144 | case (Some(srcBucketContent), Some(dstBucketContent)) =>
145 | srcBucketContent.keysInBucket.get(sourceKey) match {
146 | case Some(srcKeyContent) =>
147 | val destFileModTime = DateTime.now
148 | dstBucketContent.keysInBucket.put(destKey, KeyContents(destFileModTime, srcKeyContent.data.clone))
149 | logger.debug(s"Copied s3://$sourceBucket/$sourceKey to s3://$destBucket/$destKey")
150 | val sourceMeta = newMeta.orElse(metadataStore.get(sourceBucket, sourceKey))
151 | sourceMeta.foreach(meta => metadataStore.put(destBucket, destKey, meta))
152 | CopyObjectResult(destFileModTime, DigestUtils.md5Hex(srcKeyContent.data))
153 | case None => throw NoSuchKeyException(sourceBucket, sourceKey)
154 | }
155 | case (None, _) => throw NoSuchBucketException(sourceBucket)
156 | case _ => throw NoSuchBucketException(destBucket)
157 | }
158 | }
159 |
160 | override def deleteObject(bucket: String, key: String): Unit = {
161 | bucketDataStore.get(bucket) match {
162 | case Some(bucketContent) => bucketContent.keysInBucket.get(key) match {
163 | case Some(_) =>
164 | logger.debug(s"deleting object s://$bucket/$key")
165 | bucketContent.keysInBucket.remove(key)
166 | metadataStore.delete(bucket, key)
167 | case None => bucketContent.keysInBucket.keys.find(_.startsWith(key)) match {
168 | case Some(_) =>
169 | logger.debug(s"recursive delete by prefix is not supported by S3")
170 | ()
171 | case None =>
172 | logger.warn(s"key does not exist")
173 | throw NoSuchKeyException(bucket, key)
174 | }
175 | }
176 | case None => throw NoSuchBucketException(bucket)
177 | }
178 | }
179 |
180 | override def deleteBucket(bucket: String): Unit = {
181 | bucketDataStore.get(bucket) match {
182 | case Some(_) =>
183 | logger.debug(s"deleting bucket s://$bucket")
184 | bucketDataStore.remove(bucket)
185 | metadataStore.remove(bucket)
186 | case None => throw NoSuchBucketException(bucket)
187 | }
188 | }
189 | }
190 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/Provider.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider
2 |
3 | import com.amazonaws.services.s3.model.ObjectMetadata
4 | import io.findify.s3mock.provider.metadata.MetadataStore
5 | import io.findify.s3mock.request.{CompleteMultipartUpload, CreateBucketConfiguration}
6 | import io.findify.s3mock.response._
7 |
8 |
9 | case class GetObjectData(bytes: Array[Byte], metadata: Option[ObjectMetadata])
10 |
11 | /**
12 | * Interface for provider implementations.
13 | */
14 | trait Provider {
15 | def metadataStore: MetadataStore
16 | def listBuckets:ListAllMyBuckets
17 | def listBucket(bucket:String, prefix:Option[String], delimiter: Option[String], maxkeys: Option[Int]):ListBucket
18 | def createBucket(name:String, bucketConfig:CreateBucketConfiguration):CreateBucket
19 | def putObject(bucket:String, key:String, data:Array[Byte], metadata: ObjectMetadata):Unit
20 | def getObject(bucket:String, key:String): GetObjectData
21 | def putObjectMultipartStart(bucket:String, key:String, metadata: ObjectMetadata):InitiateMultipartUploadResult
22 | def putObjectMultipartPart(bucket:String, key:String, partNumber:Int, uploadId:String, data:Array[Byte]):Unit
23 | def putObjectMultipartComplete(bucket:String, key:String, uploadId:String, request:CompleteMultipartUpload):CompleteMultipartUploadResult
24 | def deleteObject(bucket:String, key:String):Unit
25 | def deleteBucket(bucket:String):Unit
26 | def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult
27 | def copyObjectMultipart(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, partNumber:Int, uploadId:String, fromByte: Int, toByte:Int, meta: Option[ObjectMetadata] = None): CopyObjectResult
28 |
29 | def normalizeDelimiter(delimiter:Option[String]):Option[String] = delimiter.flatMap {s => if(s.isEmpty) None else Some(s)}
30 | }
31 |
32 |
33 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/metadata/InMemoryMetadataStore.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider.metadata
2 |
3 | import com.amazonaws.services.s3.model.ObjectMetadata
4 |
5 | import scala.collection.concurrent.TrieMap
6 | import scala.collection.mutable
7 |
8 | class InMemoryMetadataStore extends MetadataStore {
9 |
10 | private val bucketMetadata = new TrieMap[String, mutable.Map[String, ObjectMetadata]]
11 |
12 | override def put(bucket: String, key: String, meta: ObjectMetadata): Unit = {
13 | val currentBucketMetadata = bucketMetadata.getOrElseUpdate(bucket, new TrieMap[String, ObjectMetadata]())
14 | currentBucketMetadata.put(key, meta)
15 | }
16 |
17 | override def get(bucket: String, key: String): Option[ObjectMetadata] = {
18 | bucketMetadata.get(bucket).flatMap(_.get(key))
19 | }
20 |
21 | override def delete(bucket: String, key: String): Unit = {
22 | val currentBucketMetadata = bucketMetadata.get(bucket)
23 | currentBucketMetadata.flatMap(_.remove(key))
24 | }
25 |
26 | override def remove(bucket: String): Unit = bucketMetadata.remove(bucket)
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/metadata/MapMetadataStore.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider.metadata
2 |
3 | import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream}
4 |
5 | import better.files.File
6 | import com.amazonaws.services.s3.model.ObjectMetadata
7 | import org.iq80.leveldb.Options
8 | import org.iq80.leveldb.impl.Iq80DBFactory._
9 | import org.iq80.leveldb._
10 | import org.iq80.leveldb.impl.Iq80DBFactory
11 |
12 | import scala.collection.mutable
13 |
14 | /**
15 | * Created by shutty on 3/13/17.
16 | */
17 | class MapMetadataStore(path: String) extends MetadataStore {
18 | val bucketMetadata = mutable.Map[String,DB]()
19 |
20 |
21 | override def put(bucket: String, key: String, meta: ObjectMetadata): Unit = {
22 | val map = load(path, bucket)
23 | map.put(bytes(key), meta2bytes(meta))
24 | }
25 | override def get(bucket: String, key: String): Option[ObjectMetadata] = {
26 | val map = load(path, bucket)
27 | val meta = Option(map.get(bytes(key))).map(bytes2meta)
28 | meta
29 | }
30 | override def delete(bucket: String, key: String): Unit = {
31 | val map = load(path, bucket)
32 | map.delete(bytes(key))
33 | }
34 |
35 | override def remove(bucket: String): Unit = {
36 | bucketMetadata.get(bucket).foreach(db => {
37 | db.close()
38 | bucketMetadata.remove(bucket)
39 | })
40 | val file = File(s"$path/$bucket.metadata")
41 | if (file.exists) file.delete()
42 | }
43 |
44 | private def load(path: String, bucket: String): DB = synchronized {
45 | bucketMetadata.get(bucket) match {
46 | case Some(db) => db
47 | case None =>
48 | val options = new Options()
49 | options.createIfMissing(true)
50 | val db = Iq80DBFactory.factory.open(File(s"$path/$bucket.metadata").toJava, options)
51 | bucketMetadata.put(bucket, db)
52 | db
53 | }
54 | }
55 |
56 | private def meta2bytes(meta: ObjectMetadata) = {
57 | val out = new ByteArrayOutputStream()
58 | val stream = new ObjectOutputStream(out)
59 | stream.writeObject(meta)
60 | stream.close()
61 | out.toByteArray
62 | }
63 |
64 | private def bytes2meta(bytes: Array[Byte]): ObjectMetadata = {
65 | val in = new ByteArrayInputStream(bytes)
66 | val stream = new ObjectInputStream(in)
67 | stream.readObject().asInstanceOf[ObjectMetadata]
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/provider/metadata/MetadataStore.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.provider.metadata
2 |
3 | import com.amazonaws.services.s3.model.ObjectMetadata
4 |
5 | /**
6 | * Created by shutty on 3/13/17.
7 | */
8 | trait MetadataStore {
9 | def put(bucket: String, key: String, meta: ObjectMetadata): Unit
10 | def get(bucket: String, key: String): Option[ObjectMetadata]
11 | def delete(bucket: String, key: String): Unit
12 | def remove(bucket: String): Unit
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/request/CompleteMultipartUploadPart.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.request
2 |
3 | /**
4 | * Created by shutty on 8/10/16.
5 | */
6 | case class CompleteMultipartUploadPart(partNumber:Int, etag:String)
7 | case class CompleteMultipartUpload(parts:List[CompleteMultipartUploadPart])
8 |
9 | object CompleteMultipartUploadPart {
10 | def apply(node: scala.xml.Node) = new CompleteMultipartUploadPart(
11 | partNumber = (node \ "PartNumber").text.toInt,
12 | etag = (node \ "ETag").text
13 | )
14 | }
15 |
16 | object CompleteMultipartUpload {
17 | def apply(node:scala.xml.Node) = {
18 | val child = node \ "Part"
19 | new CompleteMultipartUpload(
20 | parts = child.map(n => CompleteMultipartUploadPart(n)).toList
21 | )
22 | }
23 | }
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/request/CreateBucketConfiguration.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.request
2 |
3 | /**
4 | * Created by shutty on 8/10/16.
5 | */
6 | case class CreateBucketConfiguration(locationConstraint:Option[String])
7 |
8 | object CreateBucketConfiguration {
9 | def apply(xml:scala.xml.Node) = {
10 | val region = xml.find(_.label == "locationConstraint").map(_.text)
11 | new CreateBucketConfiguration(region)
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/request/DeleteObjectsRequest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.request
2 |
3 | /**
4 | * Created by shutty on 3/13/17.
5 | */
6 |
7 | case class DeleteObjectsRequest(objects: Seq[String])
8 |
9 | object DeleteObjectsRequest {
10 | def apply(node: scala.xml.Node) = {
11 | val objs = (node \ "Object").map(_ \ "Key").map(_.text)
12 | new DeleteObjectsRequest(objs)
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/CompleteMultipartUploadResult.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import java.net.URLDecoder
4 |
5 | /**
6 | * Created by shutty on 8/10/16.
7 | */
8 | case class CompleteMultipartUploadResult(bucket:String, key:String, etag:String) {
9 | def toXML =
10 |
11 | http://s3.amazonaws.com/{bucket}/{key}
12 | {bucket}
13 | {/* the key is the still URLencoded path */URLDecoder.decode(key, "UTF-8") }
14 | "{etag}"
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/CopyObjectResult.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import akka.http.scaladsl.model.DateTime
4 |
5 |
6 | /**
7 | * Created by shutty on 12/3/16.
8 | */
9 | case class CopyObjectResult(lastModified: DateTime, etag: String) {
10 | def toXML =
11 |
12 | {lastModified.toString}Z
13 | "{etag}"
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/CreateBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | /**
4 | * Created by shutty on 8/10/16.
5 | */
6 | case class CreateBucket(name:String)
7 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/DeleteObjectsResponse.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | /**
4 | * Created by shutty on 3/13/17.
5 | */
6 | case class DeleteObjectsResponse(deleted: Seq[String], error: Seq[String]) {
7 | def toXML = {
8 |
9 | { deleted.map(d => {d}) }
10 | { if (error.nonEmpty) {
11 |
12 | { error.map(e => {
13 | {e}
14 | InternalError
15 | Cannot delete
16 | })}
17 |
18 | }}
19 |
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/InitiateMultipartUploadResult.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import java.net.URLDecoder
4 |
5 | /**
6 | * Created by shutty on 8/10/16.
7 | */
8 | case class InitiateMultipartUploadResult(bucket:String, key:String, uploadId:String) {
9 | def toXML =
10 |
11 | {bucket}
12 | {/* the key is the still URLencoded path */URLDecoder.decode(key, "UTF-8") }
13 | {uploadId}
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/ListAllMyBuckets.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import akka.http.scaladsl.model.DateTime
4 |
5 |
6 | /**
7 | * Created by shutty on 8/9/16.
8 | */
9 | case class Bucket(name:String, creationDate:DateTime)
10 | case class ListAllMyBuckets(ownerName:String, ownerUUID:String, buckets:List[Bucket]) {
11 | def toXML =
12 |
13 |
14 | {ownerUUID}
15 | {ownerName}
16 |
17 |
18 | {
19 | buckets.map(bucket =>
20 |
21 | {bucket.name}
22 | {bucket.creationDate.toString}Z
23 | )
24 | }
25 |
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/response/ListBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.response
2 |
3 | import akka.http.scaladsl.model.DateTime
4 |
5 |
6 | /**
7 | * Created by shutty on 8/9/16.
8 | */
9 | case class Content(key:String, lastModified:DateTime, md5:String, size:Long, storageClass:String)
10 | case class ListBucket(bucket:String, prefix: Option[String], delimiter: Option[String], commonPrefixes: List[String], contents:List[Content], isTruncated: Boolean) {
11 | def toXML =
12 |
13 | {bucket}
14 | { prefix.map(p => {p} ) }
15 | { delimiter.map(d => {d}) }
16 | { if (commonPrefixes.nonEmpty) {commonPrefixes.map(cp => {cp})} }
17 | {contents.length}
18 | 1000
19 | {isTruncated}
20 | {contents.map(content =>
21 |
22 | {content.key}
23 | {content.lastModified.toString}Z
24 | {content.md5}
25 | {content.size}
26 | {content.storageClass}
27 |
28 | )}
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/CopyObject.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.net.URLDecoder
4 | import java.util
5 |
6 | import akka.http.scaladsl.model.{HttpRequest, HttpResponse, StatusCodes}
7 | import akka.http.scaladsl.server.Directives._
8 | import com.amazonaws.services.s3.model.ObjectMetadata
9 | import com.typesafe.scalalogging.LazyLogging
10 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException, NoSuchKeyException}
11 | import io.findify.s3mock.provider.Provider
12 |
13 | import scala.collection.JavaConverters._
14 | import scala.util.{Failure, Success, Try}
15 |
16 | /**
17 | * Created by shutty on 11/23/16.
18 | */
19 | case class CopyObject()(implicit provider: Provider) extends LazyLogging {
20 | def split(path: String):Option[(String,String)] = {
21 | val noFirstSlash = path.replaceAll("^/+", "")
22 | val result = noFirstSlash.split("/").toList match {
23 | case bucket :: tail => Some(bucket -> tail.mkString("/"))
24 | case _ => None
25 | }
26 | result
27 | }
28 |
29 | def extractMetadata(req: HttpRequest): Option[ObjectMetadata] = {
30 | req.headers.find(_.lowercaseName() == "x-amz-metadata-directive").map(_.value()) match {
31 | case Some("REPLACE") =>
32 | val user = new util.HashMap[String,String]()
33 | req.headers.filter(_.name().startsWith("x-amz-meta-")).map(h => h.name().replaceAll("x-amz-meta-", "") -> h.value()).foreach { case (k,v) => user.put(k,v) }
34 | val contentType = req.entity.contentType.value
35 | val meta = new ObjectMetadata()
36 | meta.setUserMetadata(user)
37 | meta.setContentType(contentType)
38 | Some(meta)
39 | case Some("COPY") | None => None
40 | }
41 | }
42 | def route(destBucket:String, destKey:String) = put {
43 | headerValueByName("x-amz-copy-source") { source =>
44 | val decodedSource = URLDecoder.decode(source, "utf-8")
45 | extractRequest { req =>
46 | complete {
47 | val meta = extractMetadata(req)
48 | split(decodedSource) match {
49 | case Some((sourceBucket, sourceKey)) =>
50 | Try(provider.copyObject(sourceBucket, sourceKey, destBucket, destKey, meta)) match {
51 | case Success(result) =>
52 | logger.info(s"copied object $sourceBucket/$sourceKey")
53 | HttpResponse(status = StatusCodes.OK, entity = result.toXML.toString())
54 | case Failure(e: NoSuchKeyException) =>
55 | logger.info(s"cannot copy object $sourceBucket/$sourceKey: no such key")
56 | HttpResponse(
57 | StatusCodes.NotFound,
58 | entity = e.toXML.toString()
59 | )
60 | case Failure(e: NoSuchBucketException) =>
61 | logger.info(s"cannot copy object $sourceBucket/$sourceKey: no such bucket")
62 | HttpResponse(
63 | StatusCodes.NotFound,
64 | entity = e.toXML.toString()
65 | )
66 | case Failure(t) =>
67 | logger.error(s"cannot copy object $sourceBucket/$sourceKey: $t", t)
68 | HttpResponse(
69 | StatusCodes.InternalServerError,
70 | entity = InternalErrorException(t).toXML.toString()
71 | )
72 | }
73 | case None =>
74 | logger.error(s"cannot copy object $source")
75 | HttpResponse(StatusCodes.NotFound)
76 | }
77 | }
78 | }
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/CopyObjectMultipart.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.util
4 |
5 | import akka.http.scaladsl.model.{HttpRequest, HttpResponse, StatusCodes}
6 | import akka.http.scaladsl.server.Directives._
7 | import com.amazonaws.services.s3.model.ObjectMetadata
8 | import com.typesafe.scalalogging.LazyLogging
9 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException, NoSuchKeyException}
10 | import io.findify.s3mock.provider.Provider
11 |
12 | import scala.util.{Failure, Success, Try}
13 |
14 | case class CopyObjectMultipart()(implicit provider: Provider) extends LazyLogging {
15 | def split(path: String): Option[(String, String)] = {
16 | val noFirstSlash = path.replaceAll("^/+", "")
17 | val result = noFirstSlash.split("/").toList match {
18 | case bucket :: tail => Some(bucket -> tail.mkString("/"))
19 | case _ => None
20 | }
21 | result
22 | }
23 |
24 | def extractMetadata(req: HttpRequest): Option[ObjectMetadata] = {
25 | req.headers.find(_.lowercaseName() == "x-amz-metadata-directive").map(_.value()) match {
26 | case Some("REPLACE") =>
27 | val user = new util.HashMap[String, String]()
28 | req.headers.filter(_.name().startsWith("x-amz-meta-")).map(h => h.name().replaceAll("x-amz-meta-", "") -> h.value()).foreach { case (k, v) => user.put(k, v) }
29 | val contentType = req.entity.contentType.value
30 | val meta = new ObjectMetadata()
31 | meta.setUserMetadata(user)
32 | meta.setContentType(contentType)
33 | Some(meta)
34 | case Some("COPY") | None => None
35 | }
36 | }
37 |
38 | def route(destBucket: String, destKey: String) = parameter('partNumber, 'uploadId) {
39 | (partNumber:String, uploadId:String) =>
40 | put {
41 | headerValueByName("x-amz-copy-source") { source =>
42 | extractRequest { req =>
43 | complete {
44 | val byteSeq = req.getHeader("x-amz-copy-source-range").get().value()
45 | logger.error(req.toString())
46 | logger.error(byteSeq)
47 | val Array(fromByte, toByte) = byteSeq.substring(6).split("-")
48 | val meta = extractMetadata(req)
49 | split(source) match {
50 | case Some((sourceBucket, sourceKey)) =>
51 | Try(provider.copyObjectMultipart(sourceBucket, sourceKey, destBucket, destKey, partNumber.toInt, uploadId, fromByte.toInt, toByte.toInt, meta)) match {
52 | case Success(result) =>
53 | logger.info(s"copied object $sourceBucket/$sourceKey")
54 | HttpResponse(status = StatusCodes.OK, entity = result.toXML.toString())
55 | case Failure(e: NoSuchKeyException) =>
56 | logger.info(s"cannot copy object $sourceBucket/$sourceKey: no such key")
57 | HttpResponse(
58 | StatusCodes.NotFound,
59 | entity = e.toXML.toString()
60 | )
61 | case Failure(e: NoSuchBucketException) =>
62 | logger.info(s"cannot copy object $sourceBucket/$sourceKey: no such bucket")
63 | HttpResponse(
64 | StatusCodes.NotFound,
65 | entity = e.toXML.toString()
66 | )
67 | case Failure(t) =>
68 | logger.error(s"cannot copy object $sourceBucket/$sourceKey: $t", t)
69 | HttpResponse(
70 | StatusCodes.InternalServerError,
71 | entity = InternalErrorException(t).toXML.toString()
72 | )
73 | }
74 | case None =>
75 | logger.error(s"cannot copy object $source")
76 | HttpResponse(StatusCodes.NotFound)
77 | }
78 | }
79 | }
80 | }
81 | }
82 | }
83 | }
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/CreateBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.headers.Location
4 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
5 | import akka.http.scaladsl.server.Directives._
6 | import com.typesafe.scalalogging.LazyLogging
7 | import io.findify.s3mock.provider.Provider
8 | import io.findify.s3mock.request.CreateBucketConfiguration
9 |
10 | /**
11 | * Created by shutty on 8/19/16.
12 | */
13 | case class CreateBucket()(implicit provider:Provider) extends LazyLogging {
14 | def route(bucket:String) = put {
15 | entity(as[String]) { xml =>
16 | complete {
17 | logger.info(s"PUT bucket $bucket")
18 | val conf = if (xml.isEmpty) new CreateBucketConfiguration(None) else CreateBucketConfiguration(scala.xml.XML.loadString(xml).head)
19 | val result = provider.createBucket(bucket, conf)
20 | HttpResponse(StatusCodes.OK).withHeaders(Location(s"/${result.name}"))
21 | }
22 | } ~ {
23 | complete {
24 | "ok"
25 | }
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/DeleteBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
7 | import io.findify.s3mock.provider.Provider
8 |
9 | import scala.util.{Failure, Success, Try}
10 |
11 | /**
12 | * Created by shutty on 8/19/16.
13 | */
14 | case class DeleteBucket()(implicit provider:Provider) extends LazyLogging {
15 | def route(bucket:String) = delete {
16 | complete {
17 | Try(provider.deleteBucket(bucket)) match {
18 | case Success(_) =>
19 | logger.debug(s"DELETE bucket $bucket: ok")
20 | HttpResponse(StatusCodes.NoContent)
21 | case Failure(e: NoSuchBucketException) =>
22 | logger.error(s"DELETE bucket $bucket failed: no such bucket")
23 | HttpResponse(
24 | StatusCodes.NotFound,
25 | entity = e.toXML.toString()
26 | )
27 | case Failure(t) =>
28 | HttpResponse(
29 | StatusCodes.InternalServerError,
30 | entity = InternalErrorException(t).toXML.toString()
31 | )
32 | }
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/DeleteObject.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.NoSuchKeyException
7 | import io.findify.s3mock.provider.Provider
8 |
9 | import scala.util.{Failure, Success, Try}
10 |
11 | /**
12 | * Created by shutty on 8/20/16.
13 | */
14 | case class DeleteObject()(implicit provider: Provider) extends LazyLogging {
15 | def route(bucket:String, path:String) = delete {
16 | complete {
17 | Try(provider.deleteObject(bucket, path)) match {
18 | case Success(_) =>
19 | logger.info(s"deleted object $bucket/$path")
20 | HttpResponse(StatusCodes.NoContent)
21 | case Failure(NoSuchKeyException(_, _)) =>
22 | logger.info(s"cannot delete object $bucket/$path: no such key")
23 | HttpResponse(StatusCodes.NotFound)
24 | case Failure(ex) =>
25 | logger.error(s"cannot delete object $bucket/$path", ex)
26 | HttpResponse(StatusCodes.NotFound)
27 | }
28 |
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/DeleteObjects.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes, Uri}
4 | import akka.http.scaladsl.server.Directives.{path, _}
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.NoSuchKeyException
7 | import io.findify.s3mock.provider.Provider
8 | import io.findify.s3mock.request.DeleteObjectsRequest
9 | import io.findify.s3mock.response.DeleteObjectsResponse
10 |
11 | import scala.util.{Failure, Success, Try}
12 |
13 | /**
14 | * Created by shutty on 3/13/17.
15 | */
16 | case class DeleteObjects()(implicit provider: Provider) extends LazyLogging {
17 | def route(bucket:String) = post {
18 | parameter('delete) { d =>
19 | entity(as[String]) { xml => {
20 | complete {
21 | val request = DeleteObjectsRequest(scala.xml.XML.loadString(xml).head)
22 | val response = request.objects.foldLeft(DeleteObjectsResponse(Nil, Nil))((res, rawPath) => {
23 | val path = Uri.Path(rawPath).toString // URL-encoded
24 | Try(provider.deleteObject(bucket, path)) match {
25 | case Success(_) =>
26 | logger.info(s"deleted object $bucket/$path")
27 | res.copy(deleted = path +: res.deleted)
28 | case Failure(NoSuchKeyException(_, _)) =>
29 | logger.info(s"cannot delete object $bucket/$path: no such key")
30 | res.copy(error = path +: res.error)
31 | case Failure(ex) =>
32 | logger.error(s"cannot delete object $bucket/$path", ex)
33 | res.copy(error = path +: res.error)
34 | }
35 | })
36 | val xmlResponse = response.toXML.toString()
37 | HttpResponse(StatusCodes.OK, entity = xmlResponse)
38 | }
39 | }}
40 | }
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/GetObject.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.io.StringWriter
4 | import java.net.URLDecoder
5 | import java.util.Date
6 |
7 | import akka.http.scaladsl.model.HttpEntity.Strict
8 | import akka.http.scaladsl.model._
9 | import akka.http.scaladsl.model.headers.{RawHeader, `Last-Modified`}
10 | import akka.http.scaladsl.server.Directives._
11 | import com.amazonaws.services.s3.Headers
12 | import com.amazonaws.services.s3.model.ObjectMetadata
13 | import com.amazonaws.util.DateUtils
14 | import com.typesafe.scalalogging.LazyLogging
15 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException, NoSuchKeyException}
16 | import io.findify.s3mock.provider.{GetObjectData, Provider}
17 |
18 | import scala.collection.JavaConverters._
19 | import scala.util.{Failure, Success, Try}
20 |
21 | /**
22 | * Created by shutty on 8/19/16.
23 | */
24 | case class GetObject()(implicit provider: Provider) extends LazyLogging {
25 | def route(bucket: String, path: String, params: Map[String, String]) = get {
26 |
27 | withRangeSupport {
28 | respondWithDefaultHeader(`Last-Modified`(DateTime(1970, 1, 1))) {
29 | complete {
30 | logger.debug(s"get object: bucket=$bucket, path=$path")
31 |
32 | Try(provider.getObject(bucket, path)) match {
33 | case Success(GetObjectData(data, metaOption)) =>
34 | metaOption match {
35 | case Some(meta) =>
36 | val entity: Strict = ContentType.parse(meta.getContentType) match {
37 | case Right(value) => HttpEntity(value, data)
38 | case Left(error) => HttpEntity(data)
39 | }
40 |
41 | if (params.contains("tagging")) {
42 | handleTaggingRequest(meta)
43 | } else {
44 | HttpResponse(
45 | status = StatusCodes.OK,
46 | entity = entity,
47 | headers = metadataToHeaderList(meta)
48 | )
49 | }
50 |
51 | case None =>
52 | HttpResponse(
53 | status = StatusCodes.OK,
54 | entity = HttpEntity(data),
55 | headers = List()
56 | )
57 | }
58 | case Failure(e: NoSuchKeyException) =>
59 | HttpResponse(
60 | StatusCodes.NotFound,
61 | entity = e.toXML.toString()
62 | )
63 | case Failure(e: NoSuchBucketException) =>
64 | HttpResponse(
65 | StatusCodes.NotFound,
66 | entity = e.toXML.toString()
67 | )
68 | case Failure(t) =>
69 | logger.error("Oops: ", t)
70 | HttpResponse(
71 | StatusCodes.InternalServerError,
72 | entity = InternalErrorException(t).toXML.toString()
73 | )
74 | }
75 | }
76 | }
77 | }
78 | }
79 |
80 |
81 |
82 | protected def handleTaggingRequest(meta: ObjectMetadata): HttpResponse = {
83 | var root =
84 | var tagset =
85 |
86 | var w = new StringWriter()
87 |
88 | if (meta.getRawMetadata.containsKey("x-amz-tagging")){
89 | var doc =
90 |
91 |
92 | {
93 | meta.getRawMetadata.get("x-amz-tagging").asInstanceOf[String].split("&").map(
94 | (rawTag: String) => {
95 | rawTag.split("=", 2).map(
96 | (part: String) => URLDecoder.decode(part, "UTF-8")
97 | )
98 | }).map(
99 | (kv: Array[String]) =>
100 |
101 | {kv(0)}
102 | {kv(1)}
103 | )
104 | }
105 |
106 |
107 |
108 |
109 | xml.XML.write(w, doc, "UTF-8", true, null)
110 | } else {
111 | var doc =
112 | xml.XML.write(w, doc, "UTF-8", true, null)
113 | }
114 |
115 | meta.setContentType("application/xml; charset=utf-8")
116 | HttpResponse(
117 | status = StatusCodes.OK,
118 | entity = w.toString,
119 | headers = `Last-Modified`(DateTime(1970, 1, 1)) :: metadataToHeaderList(meta)
120 | )
121 | }
122 |
123 | val headerBlacklist = Set("content-type", "connection")
124 | protected def metadataToHeaderList(metadata: ObjectMetadata): List[HttpHeader] = {
125 | val headers = Option(metadata.getRawMetadata)
126 | .map(_.asScala.toMap)
127 | .map(_.map {
128 | case (_, date: Date) =>
129 | `Last-Modified`(DateTime(new org.joda.time.DateTime(date).getMillis))
130 | case (key, value) =>
131 | RawHeader(key, value.toString)
132 | }.toList)
133 | .toList.flatten
134 | .filterNot(header => headerBlacklist.contains(header.lowercaseName))
135 |
136 | val httpExpires = Option(metadata.getHttpExpiresDate).map(date => RawHeader(Headers.EXPIRES, DateUtils.formatRFC822Date(date)))
137 |
138 | val userHeaders = Option(metadata.getUserMetadata)
139 | .map(_.asScala.toMap)
140 | .map(_.map { case (key, value) => {
141 | val name = Option(key).map(_.trim).getOrElse("")
142 | val hvalue = Option(value).map(_.trim).getOrElse("")
143 | RawHeader(Headers.S3_USER_METADATA_PREFIX + name, hvalue)
144 | }}.toList)
145 | .toList
146 | .flatten
147 |
148 | headers ++ httpExpires.toList ++ userHeaders ++ Option(metadata.getContentMD5).map(md5 => RawHeader(Headers.ETAG, md5))
149 | }
150 | }
151 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/ListBucket.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model._
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
7 | import io.findify.s3mock.provider.Provider
8 |
9 | import scala.util.{Failure, Success, Try}
10 | import scala.language.postfixOps
11 |
12 | /**
13 | * Created by shutty on 8/19/16.
14 | */
15 | case class ListBucket()(implicit provider:Provider) extends LazyLogging {
16 | def route(bucket:String) = get {
17 | parameter('prefix?, 'delimiter?, Symbol("max-keys")?) { (prefix, delimiter, maxkeys) =>
18 | complete {
19 | logger.info(s"listing bucket $bucket with prefix=$prefix, delimiter=$delimiter")
20 | Try(provider.listBucket(bucket, prefix, delimiter, maxkeys.map(_.toInt))) match {
21 | case Success(l) => HttpResponse(
22 | StatusCodes.OK,
23 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), l.toXML.toString)
24 | )
25 | case Failure(e: NoSuchBucketException) =>
26 | HttpResponse(
27 | StatusCodes.NotFound,
28 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), e.toXML.toString)
29 | )
30 | case Failure(t) =>
31 | HttpResponse(
32 | StatusCodes.InternalServerError,
33 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), InternalErrorException(t).toXML.toString)
34 | )
35 | }
36 | }
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/ListBuckets.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model._
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.provider.Provider
7 |
8 | /**
9 | * Created by shutty on 8/19/16.
10 | */
11 | case class ListBuckets()(implicit provider:Provider) extends LazyLogging {
12 | def route() = get {
13 | complete {
14 | logger.debug("listing all buckets")
15 | HttpResponse(
16 | StatusCodes.OK,
17 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), provider.listBuckets.toXML.toString)
18 | )
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/MetadataUtil.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.lang.Iterable
4 | import java.util
5 |
6 | import akka.http.javadsl.model.HttpHeader
7 | import akka.http.scaladsl.model.HttpRequest
8 | import com.amazonaws.AmazonClientException
9 | import com.amazonaws.services.s3.Headers
10 | import com.amazonaws.services.s3.internal.ServiceUtils
11 | import com.amazonaws.services.s3.model.ObjectMetadata
12 | import com.amazonaws.util.{DateUtils, StringUtils}
13 | import com.typesafe.scalalogging.LazyLogging
14 |
15 | import scala.collection.JavaConverters._
16 |
17 | object MetadataUtil extends LazyLogging {
18 |
19 | def populateObjectMetadata(request: HttpRequest): ObjectMetadata = {
20 | val metadata = new ObjectMetadata()
21 | val ignoredHeaders: util.HashSet[String] = new util.HashSet[String]()
22 | ignoredHeaders.add(Headers.DATE)
23 | ignoredHeaders.add(Headers.SERVER)
24 | ignoredHeaders.add(Headers.REQUEST_ID)
25 | ignoredHeaders.add(Headers.EXTENDED_REQUEST_ID)
26 | ignoredHeaders.add(Headers.CLOUD_FRONT_ID)
27 | ignoredHeaders.add(Headers.CONNECTION)
28 |
29 | val headers: Iterable[HttpHeader] = request.getHeaders()
30 | for (header <- headers.asScala) {
31 | var key: String = header.name()
32 | if (StringUtils.beginsWithIgnoreCase(key, Headers.S3_USER_METADATA_PREFIX)) {
33 | key = key.substring(Headers.S3_USER_METADATA_PREFIX.length)
34 | metadata.addUserMetadata(key, header.value())
35 | }
36 | // else if (ignoredHeaders.contains(key)) {
37 | // ignore...
38 | // }
39 | else if (key.equalsIgnoreCase(Headers.LAST_MODIFIED)) try
40 | metadata.setHeader(key, ServiceUtils.parseRfc822Date(header.value()))
41 |
42 | catch {
43 | case pe: Exception => logger.warn("Unable to parse last modified date: " + header.value(), pe)
44 | }
45 | else if (key.equalsIgnoreCase(Headers.CONTENT_LENGTH)) try
46 | metadata.setHeader(key, java.lang.Long.parseLong(header.value()))
47 |
48 | catch {
49 | case nfe: NumberFormatException => throw new AmazonClientException("Unable to parse content length. Header 'Content-Length' has corrupted data" + nfe.getMessage, nfe)
50 | }
51 | else if (key.equalsIgnoreCase(Headers.ETAG)) metadata.setHeader(key, ServiceUtils.removeQuotes(header.value()))
52 | else if (key.equalsIgnoreCase(Headers.EXPIRES)) try
53 | metadata.setHttpExpiresDate(DateUtils.parseRFC822Date(header.value()))
54 |
55 | catch {
56 | case pe: Exception => logger.warn("Unable to parse http expiration date: " + header.value(), pe)
57 | }
58 | // else if (key.equalsIgnoreCase(Headers.EXPIRATION)) new ObjectExpirationHeaderHandler[ObjectMetadata]().handle(metadata, response)
59 | // else if (key.equalsIgnoreCase(Headers.RESTORE)) new ObjectRestoreHeaderHandler[ObjectRestoreResult]().handle(metadata, response)
60 | // else if (key.equalsIgnoreCase(Headers.REQUESTER_CHARGED_HEADER)) new S3RequesterChargedHeaderHandler[S3RequesterChargedResult]().handle(metadata, response)
61 | else if (key.equalsIgnoreCase(Headers.S3_PARTS_COUNT)) try
62 | metadata.setHeader(key, header.value().toInt)
63 |
64 | catch {
65 | case nfe: NumberFormatException => throw new AmazonClientException("Unable to parse part count. Header x-amz-mp-parts-count has corrupted data" + nfe.getMessage, nfe)
66 | }
67 | else metadata.setHeader(key, header.value())
68 | }
69 |
70 | if(metadata.getContentType == null){
71 | metadata.setContentType(request.entity.getContentType.toString)
72 | }
73 | metadata
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/PutObject.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model.{HttpRequest, HttpResponse, StatusCodes}
4 | import akka.http.scaladsl.server.Directives._
5 | import akka.stream.Materializer
6 | import akka.stream.scaladsl.Sink
7 | import akka.util.ByteString
8 | import com.amazonaws.services.s3.model.ObjectMetadata
9 | import com.typesafe.scalalogging.LazyLogging
10 | import io.findify.s3mock.S3ChunkedProtocolStage
11 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
12 | import io.findify.s3mock.provider.Provider
13 | import org.apache.commons.codec.digest.DigestUtils
14 |
15 | import scala.util.{Failure, Success, Try}
16 |
17 | /**
18 | * Created by shutty on 8/20/16.
19 | */
20 | case class PutObject()(implicit provider:Provider, mat:Materializer) extends LazyLogging {
21 | def route(bucket:String, path:String) = put {
22 | extractRequest { request =>
23 | headerValueByName("x-amz-decoded-content-length") { _ =>
24 | completeSigned(bucket, path)
25 | } ~ completePlain(bucket, path)
26 | }
27 | } ~ post {
28 | completePlain(bucket, path)
29 | }
30 |
31 |
32 | def completeSigned(bucket:String, path:String) = extractRequest { request =>
33 | complete {
34 |
35 |
36 | logger.info(s"put object $bucket/$path (signed)")
37 | val result = request.entity.dataBytes
38 | .via(new S3ChunkedProtocolStage)
39 | .fold(ByteString(""))(_ ++ _)
40 | .map(data => {
41 | val bytes = data.toArray
42 | val metadata = populateObjectMetadata(request, bytes)
43 | Try(provider.putObject(bucket, path, bytes, metadata)) match {
44 | case Success(()) => HttpResponse(StatusCodes.OK)
45 | case Failure(e: NoSuchBucketException) =>
46 | HttpResponse(
47 | StatusCodes.NotFound,
48 | entity = e.toXML.toString()
49 | )
50 | case Failure(t) =>
51 | HttpResponse(
52 | StatusCodes.InternalServerError,
53 | entity = InternalErrorException(t).toXML.toString()
54 | )
55 | }
56 | }).runWith(Sink.head[HttpResponse])
57 | result
58 | }
59 | }
60 |
61 | def completePlain(bucket:String, path:String) = extractRequest { request =>
62 | complete {
63 |
64 | logger.info(s"put object $bucket/$path (unsigned)")
65 | val result = request.entity.dataBytes
66 | .fold(ByteString(""))(_ ++ _)
67 | .map(data => {
68 | val bytes = data.toArray
69 | val metadata = populateObjectMetadata(request, bytes)
70 | Try(provider.putObject(bucket, path, bytes, metadata)) match {
71 | case Success(()) => HttpResponse(StatusCodes.OK)
72 | case Failure(e: NoSuchBucketException) =>
73 | HttpResponse(
74 | StatusCodes.NotFound,
75 | entity = e.toXML.toString()
76 | )
77 | case Failure(t) =>
78 | HttpResponse(
79 | StatusCodes.InternalServerError,
80 | entity = InternalErrorException(t).toXML.toString()
81 | )
82 | }
83 | }).runWith(Sink.head[HttpResponse])
84 | result
85 | }
86 | }
87 |
88 | private def populateObjectMetadata(request: HttpRequest, bytes: Array[Byte]): ObjectMetadata = {
89 | val metadata = MetadataUtil.populateObjectMetadata(request)
90 | metadata.setContentMD5(DigestUtils.md5Hex(bytes))
91 | metadata
92 | }
93 |
94 | }
95 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/PutObjectMultipart.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.NotUsed
4 | import akka.http.scaladsl.model._
5 | import akka.http.scaladsl.model.headers.ETag
6 | import akka.http.scaladsl.server.Directives._
7 | import akka.stream.scaladsl.{Flow, Sink}
8 | import akka.stream.{FlowShape, Graph, Materializer}
9 | import akka.util.ByteString
10 | import com.typesafe.scalalogging.LazyLogging
11 | import io.findify.s3mock.S3ChunkedProtocolStage
12 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
13 | import io.findify.s3mock.provider.Provider
14 | import org.apache.commons.codec.digest.DigestUtils
15 |
16 | import scala.util.{Failure, Success, Try}
17 |
18 | /**
19 | * Created by shutty on 8/19/16.
20 | */
21 | case class PutObjectMultipart()(implicit provider: Provider, mat: Materializer) extends LazyLogging {
22 |
23 | type EntityDecoder = Graph[FlowShape[ByteString, ByteString], NotUsed]
24 |
25 | private val defaultEntityEncoder = Flow[ByteString].map(identity)
26 |
27 | def route(bucket: String, path: String) = parameter('partNumber, 'uploadId) { (partNumber: String, uploadId: String) =>
28 | put {
29 | logger.debug(s"put multipart object bucket=$bucket path=$path")
30 | headerValueByName("x-amz-decoded-content-length") { decodedLength =>
31 | completeRequest(bucket, path, partNumber.toInt, uploadId, new S3ChunkedProtocolStage)
32 | } ~ completeRequest(bucket, path, partNumber.toInt, uploadId)
33 | } ~ post {
34 | logger.debug(s"post multipart object bucket=$bucket path=$path")
35 | completeRequest(bucket, path, partNumber.toInt, uploadId)
36 | }
37 | }
38 |
39 | def completeRequest(bucket: String,
40 | path: String,
41 | partNumber: Int,
42 | uploadId: String,
43 | entityDecoder: EntityDecoder = defaultEntityEncoder) =
44 | extractRequest { request =>
45 | complete {
46 | val result = request.entity.dataBytes
47 | .via(entityDecoder)
48 | .fold(ByteString(""))(_ ++ _)
49 | .map(data => {
50 | Try(provider.putObjectMultipartPart(bucket, path, partNumber.toInt, uploadId, data.toArray)) match {
51 | case Success(()) =>
52 | HttpResponse(
53 | StatusCodes.OK,
54 | entity = HttpEntity( ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), "")
55 | ).withHeaders(ETag(DigestUtils.md5Hex(data.toArray)))
56 | case Failure(e: NoSuchBucketException) =>
57 | HttpResponse(
58 | StatusCodes.NotFound,
59 | entity = e.toXML.toString()
60 | )
61 | case Failure(t) =>
62 | HttpResponse(
63 | StatusCodes.InternalServerError,
64 | entity = InternalErrorException(t).toXML.toString()
65 | )
66 | }
67 | }).runWith(Sink.head[HttpResponse])
68 | result
69 | }
70 | }
71 |
72 | }
73 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/PutObjectMultipartComplete.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import akka.http.scaladsl.model._
4 | import akka.http.scaladsl.server.Directives._
5 | import com.typesafe.scalalogging.LazyLogging
6 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
7 | import io.findify.s3mock.provider.Provider
8 | import io.findify.s3mock.request.CompleteMultipartUpload
9 |
10 | import scala.util.{Failure, Success, Try}
11 |
12 | /**
13 | * Created by shutty on 8/20/16.
14 | */
15 | case class PutObjectMultipartComplete()(implicit provider:Provider) extends LazyLogging {
16 | def route(bucket:String, path:String) = post {
17 | parameter('uploadId) { uploadId =>
18 | entity(as[String]) { xml =>
19 | complete {
20 | logger.info(s"multipart upload completed for $bucket/$path, id = $uploadId")
21 | val request = CompleteMultipartUpload(scala.xml.XML.loadString(xml).head)
22 | Try(provider.putObjectMultipartComplete(bucket, path, uploadId, request)) match {
23 | case Success(response) =>
24 | HttpResponse(
25 | StatusCodes.OK,
26 | entity = HttpEntity(
27 | ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`),
28 | response.toXML.toString()
29 | )
30 | )
31 | case Failure(e: NoSuchBucketException) =>
32 | HttpResponse(
33 | StatusCodes.NotFound,
34 | entity = e.toXML.toString()
35 | )
36 | case Failure(t) =>
37 | HttpResponse(
38 | StatusCodes.InternalServerError,
39 | entity = InternalErrorException(t).toXML.toString()
40 | )
41 | }
42 | }
43 | }
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/main/scala/io/findify/s3mock/route/PutObjectMultipartStart.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.route
2 |
3 | import java.nio.charset.StandardCharsets
4 |
5 | import akka.http.scaladsl.model._
6 | import akka.http.scaladsl.server.Directives._
7 | import com.typesafe.scalalogging.LazyLogging
8 | import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException}
9 | import io.findify.s3mock.provider.Provider
10 |
11 | import scala.util.{Failure, Success, Try}
12 |
13 | /**
14 | * Created by shutty on 8/20/16.
15 | */
16 | case class PutObjectMultipartStart()(implicit provider:Provider) extends LazyLogging {
17 | def route(bucket:String, path:String) = post {
18 | extractRequest { request =>
19 | parameter('uploads) { mp =>
20 | complete {
21 | val metadata = MetadataUtil.populateObjectMetadata(request)
22 | logger.info(s"multipart upload start to $bucket/$path")
23 | Try(provider.putObjectMultipartStart(bucket, path, metadata)) match {
24 | case Success(result) =>
25 | HttpResponse(
26 | StatusCodes.OK,
27 | entity = HttpEntity(
28 | ContentTypes.`application/octet-stream`, result.toXML.toString().getBytes(StandardCharsets.UTF_8)
29 | )
30 | )
31 | case Failure(e: NoSuchBucketException) =>
32 | HttpResponse(
33 | StatusCodes.NotFound,
34 | entity = e.toXML.toString()
35 | )
36 | case Failure(t) =>
37 | HttpResponse(
38 | StatusCodes.InternalServerError,
39 | entity = InternalErrorException(t).toXML.toString()
40 | )
41 | }
42 | }
43 | }
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/test/java/io/findify/s3mock/example/JavaBuilderExample.java:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.example;
2 |
3 | import com.amazonaws.auth.AWSStaticCredentialsProvider;
4 | import com.amazonaws.auth.AnonymousAWSCredentials;
5 | import com.amazonaws.client.builder.AwsClientBuilder;
6 | import com.amazonaws.services.s3.AmazonS3;
7 | import com.amazonaws.services.s3.AmazonS3Builder;
8 | import com.amazonaws.services.s3.AmazonS3Client;
9 | import com.amazonaws.services.s3.AmazonS3ClientBuilder;
10 | import com.amazonaws.services.s3.model.DeleteObjectsRequest;
11 | import io.findify.s3mock.S3Mock;
12 |
13 | /**
14 | * Created by shutty on 5/23/17.
15 | */
16 | public class JavaBuilderExample {
17 | public static void main(String[] args) {
18 | S3Mock api = new S3Mock.Builder().withPort(8001).withInMemoryBackend().build();
19 | api.start();
20 | AmazonS3 client = AmazonS3ClientBuilder
21 | .standard()
22 | .withPathStyleAccessEnabled(true)
23 | .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("http://localhost:8001", "us-east-1"))
24 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
25 | .build();
26 | client.createBucket("testbucket");
27 | client.putObject("testbucket", "file^name", "contents");
28 | client.deleteObjects(new DeleteObjectsRequest("testbucket").withKeys("file^name"));
29 | }
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/src/test/java/io/findify/s3mock/example/JavaExample.java:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.example;
2 |
3 | import com.amazonaws.auth.AWSStaticCredentialsProvider;
4 | import com.amazonaws.auth.AnonymousAWSCredentials;
5 | import com.amazonaws.client.builder.AwsClientBuilder;
6 | import com.amazonaws.services.s3.AmazonS3;
7 | import com.amazonaws.services.s3.AmazonS3Client;
8 | import com.amazonaws.services.s3.AmazonS3ClientBuilder;
9 | import io.findify.s3mock.S3Mock;
10 |
11 | /**
12 | * Created by shutty on 8/12/16.
13 | */
14 | public class JavaExample {
15 | public static void main(String[] args) {
16 | S3Mock api = S3Mock.create(8001, "/tmp/s3");
17 | api.start();
18 |
19 | AmazonS3 client = AmazonS3ClientBuilder
20 | .standard()
21 | .withPathStyleAccessEnabled(true)
22 | .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("http://localhost:8001", "us-east-1"))
23 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
24 | .build();
25 | client.createBucket("testbucket");
26 | client.putObject("testbucket", "file/name", "contents");
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS} %-5level %logger{36} - %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/src/test/resources/reference.conf:
--------------------------------------------------------------------------------
1 | foo {
2 | default: 10
3 | }
--------------------------------------------------------------------------------
/src/test/resources/test.conf:
--------------------------------------------------------------------------------
1 | foo {
2 | testConfig: "test"
3 | }
--------------------------------------------------------------------------------
/src/test/scala-2.11/scala/collection/parallel/CollectionConverters.scala:
--------------------------------------------------------------------------------
1 | package scala.collection.parallel
2 |
3 | // Dummy object for Scala 2.11 cross compilation.
4 | object CollectionConverters {
5 | }
6 |
--------------------------------------------------------------------------------
/src/test/scala-2.12/scala/collection/parallel/CollectionConverters.scala:
--------------------------------------------------------------------------------
1 | package scala.collection.parallel
2 |
3 | // Dummy object for Scala 2.12 cross compilation.
4 | object CollectionConverters {
5 | }
6 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/ChunkBufferTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.util.ByteString
4 | import org.scalatest.{FlatSpec, Matchers}
5 |
6 | /**
7 | * Created by shutty on 8/11/16.
8 | */
9 | class ChunkBufferTest extends FlatSpec with Matchers {
10 | "chunk buffer" should "detect header" in {
11 | val cb = new ChunkBuffer()
12 | cb.addChunk(ByteString("3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfoo\r\n"))
13 | cb.readHeader shouldBe Some(Header(3, 84, "1234567890123456789012345678901234567890123456789012345678901234"))
14 | }
15 | it should "fail on non-complete header" in {
16 | val cb = new ChunkBuffer()
17 | cb.addChunk(ByteString("3;chunk-signature=123456789012345678901234567890123456789012345678901234567890"))
18 | cb.readHeader shouldBe None
19 | }
20 | it should "pull complete chunks" in {
21 | val cb = new ChunkBuffer()
22 | cb.addChunk(ByteString("3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfoo\r\n"))
23 | val header = cb.readHeader.get
24 | val chunk = cb.pullChunk(header)
25 | chunk shouldBe Some(ByteString("foo"))
26 | }
27 | it should "ignore incomplete chunks" in {
28 | val cb = new ChunkBuffer()
29 | cb.addChunk(ByteString("3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfo"))
30 | val header = cb.readHeader.get
31 | val chunk = cb.pullChunk(header)
32 | chunk shouldBe None
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/CopyObjectTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 | import java.nio.charset.StandardCharsets
5 | import java.util
6 |
7 | import com.amazonaws.services.s3.model.{CopyObjectRequest, ObjectMetadata, PutObjectRequest}
8 |
9 | /**
10 | * Created by shutty on 3/13/17.
11 | */
12 | class CopyObjectTest extends S3MockTest {
13 | override def behaviour(fixture: => Fixture) = {
14 | val s3 = fixture.client
15 | it should "copy an object even if destdir does not exist" in {
16 | s3.createBucket("bucket-1")
17 | s3.createBucket("bucket-2")
18 | s3.putObject("bucket-1", "test.txt", "contents")
19 | s3.copyObject("bucket-1", "test.txt", "bucket-2", "folder/test.txt")
20 | getContent(s3.getObject("bucket-2", "folder/test.txt")) shouldBe "contents"
21 | }
22 |
23 | it should "copy an object with metadata" in {
24 | s3.createBucket("bucket-3")
25 | val meta = new ObjectMetadata()
26 | val user = new util.HashMap[String, String]()
27 | user.put("a", "b")
28 | meta.setUserMetadata(user)
29 | val req = new PutObjectRequest("bucket-3", "test.txt", new ByteArrayInputStream(Array(61.toByte, 62.toByte, 63.toByte)), meta)
30 | s3.putObject(req)
31 | s3.copyObject("bucket-3", "test.txt", "bucket-3", "test2.txt")
32 | val obj = s3.getObject("bucket-3", "test2.txt")
33 | obj.getObjectMetadata.getUserMetadata.get("a") shouldBe "b"
34 | }
35 |
36 | it should "copy an object with new metadata" in {
37 | s3.createBucket("test-bucket")
38 |
39 | val meta = new ObjectMetadata
40 | meta.addUserMetadata("key1", "value1")
41 | meta.addUserMetadata("key2", "value2")
42 | val putRequest = new PutObjectRequest("test-bucket", "test.txt", new ByteArrayInputStream("test".getBytes(StandardCharsets.UTF_8)), meta)
43 | s3.putObject(putRequest)
44 |
45 | val newMeta = new ObjectMetadata
46 | newMeta.addUserMetadata("new-key1", "new-value1")
47 | newMeta.addUserMetadata("new-key2", "new-value2")
48 | val copyRequest = new CopyObjectRequest("test-bucket", "test.txt", "test-bucket", "test2.txt").withNewObjectMetadata(newMeta)
49 | s3.copyObject(copyRequest)
50 |
51 | val obj = s3.getObject("test-bucket", "test2.txt")
52 | obj.getObjectMetadata.getUserMetadata.size shouldBe 2
53 | obj.getObjectMetadata.getUserMetadata.get("new-key1") shouldBe "new-value1"
54 | obj.getObjectMetadata.getUserMetadata.get("new-key2") shouldBe "new-value2"
55 | }
56 |
57 | it should "copy an object with = in key" in {
58 | s3.createBucket("test-bucket")
59 | s3.putObject("test-bucket", "path/with=123/test.txt", "contents")
60 |
61 | val copyRequest = new CopyObjectRequest("test-bucket", "path/with=123/test.txt", "test-bucket", "path/with=345/test2.txt")
62 | s3.copyObject(copyRequest)
63 | getContent(s3.getObject("test-bucket", "path/with=345/test2.txt")) shouldBe "contents"
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/CorrectShutdownTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | object CorrectShutdownTest {
4 | def main(args: Array[String]): Unit = {
5 | val s3mock = S3Mock.create(8080)
6 | s3mock.start
7 | s3mock.shutdown
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/DeleteTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.http.scaladsl.Http
4 | import akka.http.scaladsl.model._
5 | import com.amazonaws.services.s3.model.{AmazonS3Exception, DeleteObjectsRequest}
6 |
7 | import scala.collection.JavaConverters._
8 | import scala.concurrent.Await
9 | import scala.util.Try
10 | import scala.concurrent.duration._
11 |
12 | /**
13 | * Created by shutty on 8/11/16.
14 | */
15 | class DeleteTest extends S3MockTest {
16 | override def behaviour(fixture: => Fixture) = {
17 | val s3 = fixture.client
18 | it should "delete a bucket" in {
19 | s3.createBucket("del")
20 | s3.listBuckets().asScala.exists(_.getName == "del") shouldBe true
21 | s3.deleteBucket("del")
22 | s3.listBuckets().asScala.exists(_.getName == "del") shouldBe false
23 | }
24 |
25 | it should "return 404 for non existent buckets when deleting" in {
26 | Try(s3.deleteBucket("nodel")).isFailure shouldBe true
27 | }
28 |
29 | it should "delete an object" in {
30 | s3.createBucket("delobj")
31 | s3.putObject("delobj", "somefile", "foo")
32 | s3.listObjects("delobj", "somefile").getObjectSummaries.asScala.exists(_.getKey == "somefile") shouldBe true
33 | s3.deleteObject("delobj", "somefile")
34 | s3.listObjects("delobj", "somefile").getObjectSummaries.asScala.exists(_.getKey == "somefile") shouldBe false
35 | }
36 |
37 | it should "return 404 for non-existent keys when deleting" in {
38 | Try(s3.deleteObject("nodel", "xxx")).isFailure shouldBe true
39 | }
40 |
41 | it should "produce NoSuchBucket if bucket does not exist when deleting" in {
42 | val exc = intercept[AmazonS3Exception] {
43 | s3.deleteBucket("aws-404")
44 | }
45 | exc.getStatusCode shouldBe 404
46 | exc.getErrorCode shouldBe "NoSuchBucket"
47 | }
48 |
49 | it should "delete multiple objects at once" in {
50 | s3.createBucket("delobj2")
51 | s3.putObject("delobj2", "somefile1", "foo1")
52 | s3.putObject("delobj2", "somefile2", "foo2")
53 | s3.listObjects("delobj2", "somefile").getObjectSummaries.size() shouldBe 2
54 | val del = s3.deleteObjects(new DeleteObjectsRequest("delobj2").withKeys("somefile1", "somefile2"))
55 | del.getDeletedObjects.size() shouldBe 2
56 | s3.listObjects("delobj2", "somefile").getObjectSummaries.size() shouldBe 0
57 | }
58 |
59 | it should "do nothing in case for deleting a subpath" in {
60 | s3.createBucket("delobj3")
61 | s3.putObject("delobj3", "some/path/foo1", "foo1")
62 | s3.putObject("delobj3", "some/path/foo2", "foo2")
63 | val del = s3.deleteObject("delobj3", "some/path")
64 | s3.listObjects("delobj3", "some/path/").getObjectSummaries.size() shouldBe 2
65 | }
66 |
67 | it should "work with aws sdk 2.0 style multi-object delete" in {
68 | implicit val mat = fixture.mat
69 | s3.createBucket("owntracks")
70 | s3.putObject("owntracks", "data/2017-07-31/10:34.json", "foo")
71 | s3.putObject("owntracks", "data/2017-07-31/16:23.json", "bar")
72 | val requestData = """"""
73 | val response = Await.result(Http(fixture.system).singleRequest(HttpRequest(
74 | method = HttpMethods.POST,
75 | uri = s"http://localhost:${fixture.port}/owntracks?delete",
76 | entity = HttpEntity(ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), requestData)
77 | )), 10.seconds)
78 | s3.listObjects("owntracks").getObjectSummaries.isEmpty shouldBe true
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/GetPutObjectTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 | import java.util
5 |
6 | import akka.actor.ActorSystem
7 | import akka.http.scaladsl.Http
8 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
9 | import akka.stream.ActorMaterializer
10 | import com.amazonaws.services.s3.model._
11 | import com.amazonaws.util.IOUtils
12 |
13 | import scala.collection.parallel.CollectionConverters._
14 | import scala.jdk.CollectionConverters._
15 | import scala.concurrent.Await
16 | import scala.concurrent.duration._
17 | import scala.util.{Random, Try}
18 |
19 | /**
20 | * Created by shutty on 8/10/16.
21 | */
22 |
23 | class GetPutObjectTest extends S3MockTest {
24 | override def behaviour(fixture: => Fixture) = {
25 | val s3 = fixture.client
26 | val port = fixture.port
27 | it should "put object" in {
28 | s3.createBucket("getput").getName shouldBe "getput"
29 | s3.listBuckets().asScala.exists(_.getName == "getput") shouldBe true
30 | s3.putObject("getput", "foo", "bar")
31 | val result = getContent(s3.getObject("getput", "foo"))
32 | result shouldBe "bar"
33 | }
34 | it should "be able to post data" in {
35 | implicit val system = ActorSystem.create("test")
36 | implicit val mat = ActorMaterializer()
37 | val http = Http(system)
38 | if (!s3.listBuckets().asScala.exists(_.getName == "getput")) s3.createBucket("getput")
39 | val response = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2", entity = "bar")), 10.seconds)
40 | getContent(s3.getObject("getput", "foo2")) shouldBe "bar"
41 | }
42 | it should "put objects in subdirs" in {
43 | s3.putObject("getput", "foo1/foo2/foo3", "bar")
44 | val result = getContent(s3.getObject("getput", "foo1/foo2/foo3"))
45 | result shouldBe "bar"
46 | }
47 | it should "not drop \\r\\n symbols" in {
48 | s3.putObject("getput", "foorn", "bar\r\nbaz")
49 | val result = getContent(s3.getObject("getput", "foorn"))
50 | result shouldBe "bar\r\nbaz"
51 | }
52 | it should "put & get large binary blobs" in {
53 | val blob = Random.nextString(1024000).getBytes("UTF-8")
54 | s3.putObject("getput", "foolarge", new ByteArrayInputStream(blob), new ObjectMetadata())
55 | val result = getContent(s3.getObject("getput", "foolarge")).getBytes("UTF-8")
56 | result shouldBe blob
57 | }
58 |
59 | it should "store tags and spit them back on get tagging requests" in {
60 | s3.createBucket("tbucket")
61 | s3.putObject(
62 | new PutObjectRequest("tbucket", "taggedobj", new ByteArrayInputStream("content".getBytes("UTF-8")), new ObjectMetadata)
63 | .withTagging(new ObjectTagging(List(new Tag("key1", "val1"), new Tag("key=&interesting", "value=something&stragne")).asJava))
64 | )
65 | var tagging = s3.getObjectTagging(new GetObjectTaggingRequest("tbucket", "taggedobj")).getTagSet.asScala
66 | var tagMap = new util.HashMap[String, String]()
67 | for (tag <- tagging) {
68 | tagMap.put(tag.getKey, tag.getValue)
69 | }
70 | tagMap.size() shouldBe 2
71 | tagMap.get("key1") shouldBe "val1"
72 | tagMap.get("key=&interesting") shouldBe "value=something&stragne"
73 | }
74 | it should "be OK with retrieving tags for un-tagged objects" in {
75 | s3.putObject("tbucket", "taggedobj", "some-content")
76 | var tagging = s3.getObjectTagging(new GetObjectTaggingRequest("tbucket", "taggedobj")).getTagSet
77 | tagging.size() shouldBe 0
78 | }
79 |
80 | it should "produce NoSuchBucket if bucket does not exist when GETting" in {
81 | val exc = intercept[AmazonS3Exception] {
82 | s3.getObject("aws-404", "foo")
83 | }
84 | exc.getStatusCode shouldBe 404
85 | exc.getErrorCode shouldBe "NoSuchBucket"
86 | }
87 |
88 | it should "produce NoSuchBucket if bucket does not exist when PUTting" in {
89 | val exc = intercept[AmazonS3Exception] {
90 | s3.putObject("aws-404", "foo", "content")
91 | }
92 | exc.getStatusCode shouldBe 404
93 | exc.getErrorCode shouldBe "NoSuchBucket"
94 | }
95 |
96 | it should "work with large files" in {
97 | val huge = Random.nextString(10 * 1024 * 1024)
98 | s3.putObject("getput", "foobig", huge)
99 | val result = getContent(s3.getObject("getput", "foobig"))
100 | result shouldBe huge
101 | }
102 |
103 | it should "work with dot-files" in {
104 | s3.createBucket("dot")
105 | s3.listBuckets().asScala.exists(_.getName == "dot") shouldBe true
106 | s3.putObject("dot", "foo", "bar")
107 | s3.putObject("dot", ".foo", "bar")
108 | val result = s3.listObjects("dot").getObjectSummaries.asScala.toList.map(_.getKey)
109 | result shouldBe List(".foo", "foo")
110 | }
111 |
112 | it should "work with = in path" in {
113 | s3.createBucket("urlencoded")
114 | s3.listBuckets().exists(_.getName == "urlencoded") shouldBe true
115 | s3.putObject("urlencoded", "path/with=123/foo", "bar=")
116 | s3.putObject("urlencoded", "path/withoutequals/foo", "bar")
117 | val result = s3.listObjects("urlencoded").getObjectSummaries.toList.map(_.getKey)
118 | result shouldBe List("path/with=123/foo", "path/withoutequals/foo")
119 | getContent(s3.getObject("urlencoded", "path/with=123/foo")) shouldBe "bar="
120 | getContent(s3.getObject("urlencoded", "path/withoutequals/foo")) shouldBe "bar"
121 | }
122 |
123 | it should "support ranged get requests" in {
124 |
125 | val data = new Array[Byte](1000)
126 | Random.nextBytes(data)
127 |
128 | val bucket = "rangedbuck"
129 | val key = "data"
130 |
131 | s3.createBucket(bucket)
132 | s3.putObject(bucket, key, new ByteArrayInputStream(data), new ObjectMetadata())
133 |
134 | val (startByte, endByte) = (5L, 55L)
135 | val getObjectRequest = new GetObjectRequest(bucket, key)
136 | getObjectRequest.setRange(startByte, endByte)
137 |
138 | val sliceOfData = data.slice(startByte.toInt, endByte.toInt + 1)
139 | val retrievedData = IOUtils.toByteArray(s3.getObject(getObjectRequest).getObjectContent)
140 |
141 | retrievedData shouldEqual sliceOfData
142 | }
143 |
144 | it should "return 404 on subpath request" in {
145 | s3.createBucket("subpath")
146 | s3.putObject("subpath", "some/path/example", "bar")
147 | val noSlash = Try(s3.getObject("subpath", "some/path"))
148 | noSlash.failed.get.asInstanceOf[AmazonS3Exception].getStatusCode shouldBe 404
149 | val withSlash = Try(s3.getObject("subpath", "some/path/"))
150 | withSlash.failed.get.asInstanceOf[AmazonS3Exception].getStatusCode shouldBe 404
151 | }
152 |
153 | // this trick is not possible on POSIX-compliant file systems:
154 | // So the test will always fail in file-based provider
155 | it should "be possible to store /key and /key/bar objects at the same time" ignore {
156 | s3.createBucket("prefix")
157 | s3.putObject("prefix", "some/path", "bar")
158 | s3.putObject("prefix", "some", "bar")
159 | val noSlash = Try(s3.getObject("prefix", "some/path"))
160 | val withSlash = Try(s3.getObject("prefix", "some"))
161 | val br=1
162 | }
163 |
164 | it should "have etag in metadata" in {
165 | s3.createBucket("etag")
166 | s3.putObject("etag", "file/name", "contents")
167 | val data = s3.getObjectMetadata("etag", "file/name")
168 | data.getETag shouldBe "98bf7d8c15784f0a3d63204441e1e2aa"
169 | }
170 |
171 | it should "not fail concurrent requests" in {
172 | s3.createBucket("concurrent")
173 | s3.putObject("concurrent", "file/name", "contents")
174 | val results = Range(1, 100).par.map(_ => IOUtils.toString(s3.getObject("concurrent", "file/name").getObjectContent)).toList
175 | results.forall(_ == "contents") shouldBe true
176 | }
177 | }
178 |
179 | }
180 |
181 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/GetPutObjectWithMetadataTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 |
5 | import com.amazonaws.services.s3.model.{ObjectMetadata, S3Object}
6 |
7 | import scala.jdk.CollectionConverters._
8 |
9 | /**
10 | * Created by shutty on 8/10/16.
11 | */
12 | class GetPutObjectWithMetadataTest extends S3MockTest {
13 | override def behaviour(fixture: => Fixture): Unit = {
14 | val s3 = fixture.client
15 | it should "put object with metadata" in {
16 | s3.createBucket("getput").getName shouldBe "getput"
17 | s3.listBuckets().asScala.exists(_.getName == "getput") shouldBe true
18 |
19 | val is = new ByteArrayInputStream("bar".getBytes("UTF-8"))
20 | val metadata: ObjectMetadata = new ObjectMetadata()
21 | metadata.setContentType("application/json")
22 | metadata.setUserMetadata(Map("metamaic" -> "maic").asJava)
23 |
24 | s3.putObject("getput", "foo", is, metadata)
25 |
26 | val s3Object: S3Object = s3.getObject("getput", "foo")
27 | val actualMetadata: ObjectMetadata = s3Object.getObjectMetadata
28 | actualMetadata.getContentType shouldBe "application/json"
29 |
30 | getContent(s3Object) shouldBe "bar"
31 | }
32 |
33 | it should "put object with metadata, but skip unvalid content-type" in {
34 | s3.createBucket("getput").getName shouldBe "getput"
35 | s3.listBuckets().asScala.exists(_.getName == "getput") shouldBe true
36 |
37 | val is = new ByteArrayInputStream("bar".getBytes("UTF-8"))
38 | val metadata: ObjectMetadata = new ObjectMetadata()
39 | metadata.setContentType("application")
40 | metadata.setUserMetadata(Map("metamaic" -> "maic").asJava)
41 |
42 | s3.putObject("getput", "foo", is, metadata)
43 |
44 | val s3Object: S3Object = s3.getObject("getput", "foo")
45 | val actualMetadata: ObjectMetadata = s3Object.getObjectMetadata
46 | actualMetadata.getContentType shouldBe "application/octet-stream"
47 |
48 | getContent(s3Object) shouldBe "bar"
49 | }
50 | it should "put object in subdirs with metadata, but skip unvalid content-type" in {
51 | s3.createBucket("getput").getName shouldBe "getput"
52 | s3.listBuckets().asScala.exists(_.getName == "getput") shouldBe true
53 |
54 | val is = new ByteArrayInputStream("bar".getBytes("UTF-8"))
55 | val metadata: ObjectMetadata = new ObjectMetadata()
56 | metadata.setContentType("application")
57 | metadata.setUserMetadata(Map("metamaic" -> "maic").asJava)
58 |
59 | s3.putObject("getput", "foo1/bar", is, metadata)
60 |
61 | val s3Object: S3Object = s3.getObject("getput", "foo1/bar")
62 | val actualMetadata: ObjectMetadata = s3Object.getObjectMetadata
63 | actualMetadata.getContentType shouldBe "application/octet-stream"
64 |
65 | getContent(s3Object) shouldBe "bar"
66 | }
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/JavaExampleTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import com.amazonaws.auth.BasicAWSCredentials
4 | import com.amazonaws.services.s3.AmazonS3Client
5 | import scala.jdk.CollectionConverters._
6 | import scala.io.Source
7 |
8 | /**
9 | * Created by shutty on 8/19/16.
10 | */
11 | class JavaExampleTest extends S3MockTest {
12 | override def behaviour(fixture: => Fixture) = {
13 | val s3 = fixture.client
14 | val port = fixture.port
15 | it should "upload files with anonymous credentials" in {
16 | s3.createBucket("getput").getName shouldBe "getput"
17 | s3.listBuckets().asScala.exists(_.getName == "getput") shouldBe true
18 | s3.putObject("getput", "foo", "bar")
19 | val result = Source.fromInputStream(s3.getObject("getput", "foo").getObjectContent, "UTF-8").mkString
20 | result shouldBe "bar"
21 | }
22 |
23 | it should "upload files with basic credentials" in {
24 | val s3b = new AmazonS3Client(new BasicAWSCredentials("foo", "bar"))
25 | s3b.setEndpoint(s"http://127.0.0.1:$port")
26 | s3b.putObject("getput", "foo2", "bar2")
27 | val result = Source.fromInputStream(s3b.getObject("getput", "foo2").getObjectContent, "UTF-8").mkString
28 | result shouldBe "bar2"
29 |
30 | }
31 | }
32 | }
33 |
34 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/ListBucketEmptyWorkdirTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import scala.collection.JavaConverters._
4 |
5 | /**
6 | * Created by shutty on 8/30/16.
7 | */
8 | class ListBucketEmptyWorkdirTest extends S3MockTest {
9 | override def behaviour(fixture: => Fixture) = {
10 | val s3 = fixture.client
11 | it should "list bucket with empty prefix" in {
12 | s3.createBucket("list")
13 | s3.putObject("list", "foo1", "xxx")
14 | s3.putObject("list", "foo2", "xxx")
15 | val list = s3.listObjects("list").getObjectSummaries.asScala.toList
16 | list.map(_.getKey).forall(_.startsWith("foo")) shouldBe true
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/ListBucketTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.util
4 |
5 | import com.amazonaws.services.s3.AmazonS3ClientBuilder
6 | import com.amazonaws.services.s3.model.{AmazonS3Exception, ListObjectsRequest, ListObjectsV2Request, S3ObjectSummary}
7 | import org.joda.time.DateTime
8 |
9 | import scala.jdk.CollectionConverters._
10 |
11 | /**
12 | * Created by shutty on 8/9/16.
13 | */
14 | class ListBucketTest extends S3MockTest {
15 | override def behaviour(fixture: => Fixture) = {
16 | val s3 = fixture.client
17 | it should "list bucket" in {
18 | s3.createBucket("foo")
19 | s3.listObjects("foo").getObjectSummaries.isEmpty shouldBe true
20 | }
21 | it should "list bucket with prefix" in {
22 | s3.createBucket("list")
23 | s3.putObject("list", "foo1", "xxx")
24 | s3.putObject("list", "foo2", "xxx")
25 | s3.putObject("list", "xfoo3", "xxx")
26 | val list = s3.listObjects("list", "foo").getObjectSummaries.asScala.toList
27 | list.map(_.getKey).forall(_.startsWith("foo")) shouldBe true
28 | }
29 | it should "list objects in subfolders with prefix" in {
30 | s3.createBucket("list2")
31 | s3.putObject("list2", "one/foo1/1", "xxx")
32 | s3.putObject("list2", "one/foo2/2", "xxx")
33 | s3.putObject("list2", "one/foo2/3", "xxx")
34 | s3.putObject("list2", "one/foo2/4", "xxx")
35 | s3.putObject("list2", "one/xfoo3", "xxx")
36 | val ol = s3.listObjects("list2", "one/f").getObjectSummaries.asScala.toList
37 | ol.size shouldBe 4
38 | ol.map(_.getKey).forall(_.startsWith("one/foo")) shouldBe true
39 | }
40 | it should "return empty list if prefix is incorrect" in {
41 | s3.createBucket("list3")
42 | s3.putObject("list3", "one/foo1", "xxx")
43 | s3.putObject("list3", "one/foo2", "xxx")
44 | s3.putObject("list3", "one/xfoo3", "xxx")
45 | s3.listObjects("list3", "qaz/qax").getObjectSummaries.asScala.isEmpty shouldBe true
46 |
47 | }
48 | it should "return keys with valid keys (when no prefix given)" in {
49 | s3.createBucket("list4")
50 | s3.putObject("list4", "one", "xxx")
51 | val summaries: util.List[S3ObjectSummary] = s3.listObjects("list4").getObjectSummaries
52 | summaries.size() shouldBe 1
53 | val summary = summaries.get(0)
54 | summary.getBucketName shouldBe "list4"
55 | summary.getKey shouldBe "one"
56 | summary.getSize shouldBe 3
57 | summary.getStorageClass shouldBe "STANDARD"
58 |
59 | val returnedKey = summaries.asScala.last.getKey
60 | s3.getObject("list4", returnedKey).getKey shouldBe "one"
61 | }
62 |
63 | it should "produce NoSuchBucket if bucket does not exist" in {
64 | val exc = intercept[AmazonS3Exception] {
65 | s3.listObjects("aws-404", "qaz/qax")
66 | }
67 | exc.getStatusCode shouldBe 404
68 | exc.getErrorCode shouldBe "NoSuchBucket"
69 | }
70 |
71 | it should "obey delimiters && prefixes v1" in {
72 | s3.createBucket("list5")
73 | s3.putObject("list5", "sample.jpg", "xxx")
74 | s3.putObject("list5", "photos/2006/January/sample.jpg", "yyy")
75 | s3.putObject("list5", "photos/2006/February/sample2.jpg", "zzz")
76 | s3.putObject("list5", "photos/2006/February/sample3.jpg", "zzz")
77 | s3.putObject("list5", "photos/2006/February/sample4.jpg", "zzz")
78 | val req1 = new ListObjectsRequest()
79 | req1.setBucketName("list5")
80 | req1.setDelimiter("/")
81 | val list1 = s3.listObjects(req1)
82 | val summaries1 = list1.getObjectSummaries.asScala.map(_.getKey).toList
83 | list1.getCommonPrefixes.asScala.toList shouldBe List("photos/")
84 | summaries1 shouldBe List("sample.jpg")
85 | }
86 | it should "obey delimiters && prefixes v2" in {
87 | s3.createBucket("list5")
88 | s3.putObject("list5", "sample.jpg", "xxx")
89 | s3.putObject("list5", "photos/2006/January/sample.jpg", "yyy")
90 | s3.putObject("list5", "photos/2006/February/sample2.jpg", "zzz")
91 | s3.putObject("list5", "photos/2006/February/sample3.jpg", "zzz")
92 | s3.putObject("list5", "photos/2006/February/sample4.jpg", "zzz")
93 | val req2 = new ListObjectsRequest()
94 | req2.setBucketName("list5")
95 | req2.setDelimiter("/")
96 | req2.setPrefix("photos/2006/")
97 | val list2 = s3.listObjects(req2)
98 | val summaries2 = list2.getObjectSummaries.asScala.map(_.getKey).toList
99 | list2.getCommonPrefixes.asScala.toList shouldBe List("photos/2006/February/", "photos/2006/January/")
100 | summaries2 shouldBe Nil
101 | }
102 |
103 | it should "obey delimiters && prefixes v2 (matching real s3)" ignore {
104 | val s3 = AmazonS3ClientBuilder.defaultClient()
105 | s3.createBucket("findify-merlin")
106 | s3.putObject("findify-merlin", "sample.jpg", "xxx")
107 | s3.putObject("findify-merlin", "photos/2006/January/sample.jpg", "yyy")
108 | s3.putObject("findify-merlin", "photos/2006/February/sample2.jpg", "zzz")
109 | s3.putObject("findify-merlin", "photos/2006/February/sample3.jpg", "zzz")
110 | s3.putObject("findify-merlin", "photos/2006/February/sample4.jpg", "zzz")
111 | val req2 = new ListObjectsRequest()
112 | req2.setBucketName("findify-merlin")
113 | req2.setDelimiter("/")
114 | req2.setPrefix("photos/")
115 | val list2 = s3.listObjects(req2)
116 | val summaries2 = list2.getObjectSummaries.asScala.map(_.getKey).toList
117 | list2.getCommonPrefixes.asScala.toList shouldBe List("photos/2006/")
118 | summaries2 shouldBe Nil
119 | }
120 |
121 |
122 | it should "obey delimiters && prefixes v3" in {
123 | s3.createBucket("list5")
124 | s3.putObject("list5", "dev/someEvent/2017/03/13/00/_SUCCESS", "xxx")
125 | s3.putObject("list5", "dev/someEvent/2017/03/13/01/_SUCCESS", "yyy")
126 | s3.putObject("list5", "dev/someEvent/2016/12/31/23/_SUCCESS", "zzz")
127 | val req2 = new ListObjectsRequest()
128 | req2.setBucketName("list5")
129 | req2.setDelimiter("/")
130 | req2.setPrefix("dev/")
131 | val list2 = s3.listObjects(req2)
132 | val summaries2 = list2.getObjectSummaries.asScala.map(_.getKey).toList
133 | list2.getCommonPrefixes.asScala.toList shouldBe List("dev/someEvent/")
134 | summaries2 shouldBe Nil
135 | }
136 |
137 | it should "list objects in lexicographical order" in {
138 | s3.createBucket("list6")
139 | s3.putObject("list6", "b", "xx")
140 | s3.putObject("list6", "a", "xx")
141 | s3.putObject("list6", "0", "xx")
142 | val list = s3.listObjects("list6")
143 | list.getObjectSummaries.asScala.map(_.getKey).toList shouldBe List("0", "a", "b")
144 | }
145 |
146 | it should "getCommonPrefixes should return return objects sorted lexicographically" in {
147 | s3.createBucket("list7")
148 | s3.putObject("list7", "dev/10/2017/03/13/00/_SUCCESS", "xxx")
149 | s3.putObject("list7", "dev/10/2017/03/13/01/_SUCCESS", "xxx")
150 | s3.putObject("list7", "dev/20/2017/03/13/00/_SUCCESS", "xxx")
151 | s3.putObject("list7", "dev/20/2017/03/13/01/_SUCCESS", "xxx")
152 | s3.putObject("list7", "dev/30/2017/03/13/00/_SUCCESS", "xxx")
153 | s3.putObject("list7", "dev/30/2017/03/13/01/_SUCCESS", "xxx")
154 | s3.putObject("list7", "dev/40/2017/03/13/00/_SUCCESS", "xxx")
155 | s3.putObject("list7", "dev/40/2017/03/13/01/_SUCCESS", "xxx")
156 | s3.putObject("list7", "dev/50/2017/03/13/00/_SUCCESS", "xxx")
157 | s3.putObject("list7", "dev/50/2017/03/13/01/_SUCCESS", "xxx")
158 | val req2 = new ListObjectsRequest()
159 | req2.setBucketName("list7")
160 | req2.setDelimiter("/")
161 | req2.setPrefix("dev/")
162 | val list2 = s3.listObjects(req2)
163 | val summaries2 = list2.getObjectSummaries.asScala.map(_.getKey).toList
164 | list2.getCommonPrefixes.asScala.toList shouldBe List("dev/10/", "dev/20/", "dev/30/", "dev/40/", "dev/50/")
165 | summaries2 shouldBe Nil
166 | }
167 |
168 | it should "obey delimiters && prefixes when prefix equals to files name" in {
169 | s3.createBucket("list8")
170 | s3.putObject("list8", "dev/someEvent/2017/03/13/00/_SUCCESS", "xxx")
171 | val req2 = new ListObjectsRequest()
172 | req2.setBucketName("list8")
173 | req2.setDelimiter("/")
174 | req2.setPrefix("dev/someEvent/2017/03/13/00/_SUCCESS")
175 | val list2 = s3.listObjects(req2)
176 | list2.getObjectSummaries.size shouldEqual 1
177 | list2.getObjectSummaries.asScala.head.getKey shouldEqual "dev/someEvent/2017/03/13/00/_SUCCESS"
178 | }
179 |
180 | it should "obey withMaxKeys" in {
181 | s3.createBucket("list7k")
182 | s3.putObject("list7k", "b", "xx")
183 | s3.putObject("list7k", "a", "xx")
184 | s3.putObject("list7k", "c", "xx")
185 | val request = new ListObjectsV2Request().withBucketName("list7k").withMaxKeys(2)
186 | val list = s3.listObjectsV2(request)
187 | list.getObjectSummaries.asScala.map(_.getKey).toList shouldBe List("a", "b")
188 | list.isTruncated shouldBe true
189 | }
190 |
191 | it should "have correct etags" in {
192 | s3.createBucket("list9")
193 | s3.putObject("list9", "foo1", "xxx")
194 | s3.putObject("list9", "foo2", "yyy")
195 | val list = s3.listObjects("list9", "foo").getObjectSummaries.asScala.toList
196 | list.find(_.getKey == "foo1").map(_.getETag) shouldBe Some("f561aaf6ef0bf14d4208bb46a4ccb3ad")
197 | }
198 |
199 | it should "set correct last-modified header" in {
200 | s3.createBucket("list10")
201 | s3.putObject("list10", "foo", "xxx")
202 | val list = s3.listObjects("list10").getObjectSummaries.asScala.toList
203 | list.find(_.getKey == "foo").map(_.getLastModified.after(DateTime.now().minusMinutes(1).toDate)) shouldBe Some(true)
204 | }
205 |
206 | it should "work with empty string delimiters as if no delimiter was provided" in {
207 | s3.createBucket("list11")
208 | s3.putObject("list11", "sample.jpg", "xxx")
209 | s3.putObject("list11", "photos/2006/January/sample.jpg", "yyy")
210 |
211 | val req = new ListObjectsRequest()
212 | req.setBucketName("list11")
213 | req.setDelimiter("")
214 | req.setPrefix("")
215 | val list = s3.listObjects(req)
216 | list.getObjectSummaries.asScala.map(_.getKey).toList should contain only ("sample.jpg", "photos/2006/January/sample.jpg")
217 | }
218 | }
219 | }
220 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/ListBucketsTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 | /**
3 | * Created by shutty on 8/9/16.
4 | */
5 | import akka.http.scaladsl.Http
6 | import akka.http.scaladsl.model._
7 |
8 | import scala.collection.JavaConverters._
9 | import scala.concurrent.Await
10 | import scala.concurrent.duration._
11 | class ListBucketsTest extends S3MockTest {
12 | override def behaviour(fixture: => Fixture) = {
13 | val s3 = fixture.client
14 | it should "list empty buckets" in {
15 | s3.listBuckets().isEmpty shouldBe true
16 | }
17 |
18 | it should "have correct xml content-type for bucket list" in {
19 | implicit val sys = fixture.system
20 | implicit val mat = fixture.mat
21 | val response = Await.result(Http().singleRequest(HttpRequest(
22 | method = HttpMethods.GET,
23 | uri = Uri(s"http://localhost:${fixture.port}/")
24 | )), 5.seconds)
25 | response.entity.contentType shouldBe ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`)
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/MapMetadataStoreTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.util
4 |
5 | import com.amazonaws.services.s3.model.ObjectMetadata
6 | import io.findify.s3mock.provider.metadata.{InMemoryMetadataStore, MapMetadataStore, MetadataStore}
7 | import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
8 | /**
9 | * Created by shutty on 3/13/17.
10 | */
11 | class MapMetadataStoreTest extends FlatSpec with Matchers with BeforeAndAfterAll {
12 |
13 | for (metadataStore <- List((new MapMetadataStore("/tmp/s3"), "MapMetadataStore"),
14 | (new InMemoryMetadataStore, "InMemoryMetadataStore"))) {
15 | metadataStore._2 should behave like mdStoreBehaviour(metadataStore._1)
16 | }
17 |
18 | def mdStoreBehaviour(mm: => MetadataStore) = {
19 | it should "save md to a fresh store" in {
20 | val meta = new ObjectMetadata()
21 | val user = new util.HashMap[String, String]()
22 | user.put("foo", "bar")
23 | meta.setUserMetadata(user)
24 | mm.put("foo", "bar", meta)
25 | val m2 = mm.get("foo", "bar").get
26 | m2.getUserMetadata shouldBe meta.getUserMetadata
27 | }
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/MultipartCopyTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 |
5 | import akka.actor.ActorSystem
6 | import akka.http.scaladsl.Http
7 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
8 | import akka.stream.ActorMaterializer
9 | import akka.stream.scaladsl.Sink
10 | import akka.util.ByteString
11 | import com.amazonaws.services.s3.model._
12 | import org.apache.commons.codec.digest.DigestUtils
13 |
14 | import scala.collection.JavaConverters._
15 | import scala.concurrent.Await
16 | import scala.concurrent.duration._
17 | import scala.util.Random
18 |
19 | class MultipartCopyTest extends S3MockTest {
20 | override def behaviour(fixture: => Fixture) = {
21 | implicit val system = ActorSystem.create("test")
22 | implicit val mat = ActorMaterializer()
23 | val http = Http(system)
24 | val s3 = fixture.client
25 | val port = fixture.port
26 |
27 | it should "upload copy multipart files" in {
28 | s3.createBucket("source").getName shouldBe "source"
29 | s3.listBuckets().asScala.exists(_.getName == "source") shouldBe true
30 | val objectSize = 10000000;
31 | val blob = Random.alphanumeric.take(objectSize).mkString
32 |
33 | s3.putObject("source", "foo", blob)
34 | getContent(s3.getObject("source", "foo")) shouldBe blob
35 |
36 | s3.createBucket("dest").getName shouldBe "dest"
37 |
38 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("dest", "bar"))
39 | val partSize = 2500000;
40 | val blobs = for (i <- 0 to 3) yield {
41 | val blob1 = new Array[Byte](partSize)
42 | val bytePosition = i * partSize;
43 | val lastbyte = if (bytePosition + partSize - 1 >= objectSize) objectSize - 1
44 | else bytePosition + partSize - 1
45 |
46 | Random.nextBytes(blob1)
47 | val p1 = s3.copyPart(new CopyPartRequest().withSourceBucketName("source").withSourceKey("foo").withDestinationBucketName("dest").withDestinationKey("bar")
48 | .withUploadId(init.getUploadId).withFirstByte(bytePosition.toLong).withLastByte(lastbyte.toLong).withPartNumber(i))
49 | blob1 -> p1.getPartETag
50 | }
51 | val result = s3.completeMultipartUpload(new CompleteMultipartUploadRequest("dest", "bar", init.getUploadId, blobs.map(_._2).asJava))
52 | result.getKey shouldBe "bar"
53 | println(result.getLocation)
54 | val source = getContent(s3.getObject("source", "foo"))
55 | val dest = getContent(s3.getObject("dest", "bar"))
56 | dest.length() shouldBe source.length()
57 | DigestUtils.md5Hex(dest) shouldBe DigestUtils.md5Hex(source)
58 | }
59 |
60 |
61 | it should "produce NoSuchBucket if bucket does not exist" in {
62 | val objectSize = 10000000;
63 | val blob = Random.alphanumeric.take(objectSize).mkString
64 | val exc = intercept[AmazonS3Exception] {
65 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("aws-404", "foo4"))
66 | val partSize = 2500000;
67 | val blobs = for (i <- 0 to 3) yield {
68 | val blob1 = new Array[Byte](partSize)
69 | val bytePosition = i * partSize;
70 | val lastbyte = if (bytePosition + partSize - 1 >= objectSize) objectSize - 1
71 | else bytePosition + partSize - 1
72 |
73 | Random.nextBytes(blob1)
74 | val p1 = s3.copyPart(new CopyPartRequest().withSourceBucketName("source").withSourceKey("foo").withDestinationBucketName("dest").withDestinationKey("bar")
75 | .withUploadId(init.getUploadId).withFirstByte(bytePosition.toLong).withLastByte(lastbyte.toLong).withPartNumber(i))
76 | blob1 -> p1.getPartETag
77 | }
78 |
79 | }
80 | exc.getStatusCode shouldBe 404
81 | exc.getErrorCode shouldBe "NoSuchBucket"
82 | }
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/MultipartUploadTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import java.io.ByteArrayInputStream
4 | import java.nio.charset.Charset
5 |
6 | import akka.actor.ActorSystem
7 | import akka.http.scaladsl.Http
8 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
9 | import akka.stream.ActorMaterializer
10 | import akka.stream.scaladsl.Sink
11 | import akka.util.ByteString
12 | import com.amazonaws.services.s3.model._
13 | import org.apache.commons.codec.digest.DigestUtils
14 |
15 | import scala.collection.JavaConverters._
16 | import scala.concurrent.duration._
17 | import scala.concurrent.Await
18 | import scala.util.Random
19 |
20 | /**
21 | * Created by shutty on 8/10/16.
22 | */
23 | class MultipartUploadTest extends S3MockTest {
24 | override def behaviour(fixture: => Fixture) = {
25 | implicit val system = ActorSystem.create("test")
26 | implicit val mat = ActorMaterializer()
27 | val http = Http(system)
28 | val s3 = fixture.client
29 | val port = fixture.port
30 |
31 | it should "upload multipart files" in {
32 | s3.createBucket("getput")
33 | val response1 = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2?uploads")), 10.minutes)
34 | val data = Await.result(response1.entity.dataBytes.fold(ByteString(""))(_ ++ _).runWith(Sink.head), 10.seconds)
35 | val uploadId = (scala.xml.XML.loadString(data.utf8String) \ "UploadId").text
36 | val response2 = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2?partNumber=1&uploadId=$uploadId", entity = "foo")), 10.minutes)
37 | response2.status.intValue() shouldBe 200
38 | val response3 = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2?partNumber=2&uploadId=$uploadId", entity = "boo")), 10.minutes)
39 | response3.status.intValue() shouldBe 200
40 | val commit = """
41 | |
42 | | 1
43 | | ETag
44 | |
45 | |
46 | | 2
47 | | ETag
48 | |
49 | |""".stripMargin
50 | val response4 = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.POST, uri = s"http://127.0.0.1:$port/getput/foo2?uploadId=$uploadId", entity = commit)), 10.minutes)
51 | response4.status.intValue() shouldBe 200
52 |
53 | getContent(s3.getObject("getput", "foo2")) shouldBe "fooboo"
54 | }
55 |
56 | it should "work with java sdk" in {
57 | s3.createBucket("getput")
58 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("getput", "foo4"))
59 | val p1 = s3.uploadPart(new UploadPartRequest().withBucketName("getput").withPartSize(10).withKey("foo4").withPartNumber(1).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("hellohello".getBytes())))
60 | val p2 = s3.uploadPart(new UploadPartRequest().withBucketName("getput").withPartSize(10).withKey("foo4").withPartNumber(2).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("worldworld".getBytes())))
61 | val result = s3.completeMultipartUpload(new CompleteMultipartUploadRequest("getput", "foo4", init.getUploadId, List(p1.getPartETag, p2.getPartETag).asJava))
62 | result.getKey shouldBe "foo4"
63 | getContent(s3.getObject("getput", "foo4")) shouldBe "hellohelloworldworld"
64 | }
65 | it should "work with large blobs" in {
66 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("getput", "fooLarge"))
67 | val blobs = for (i <- 0 to 200) yield {
68 | val blob1 = new Array[Byte](10000)
69 | Random.nextBytes(blob1)
70 | val p1 = s3.uploadPart(new UploadPartRequest().withBucketName("getput").withPartSize(blob1.length).withKey("fooLarge").withPartNumber(i).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream(blob1)))
71 | blob1 -> p1.getPartETag
72 | }
73 | val result = s3.completeMultipartUpload(new CompleteMultipartUploadRequest("getput", "fooLarge", init.getUploadId, blobs.map(_._2).asJava))
74 | result.getKey shouldBe "fooLarge"
75 | DigestUtils.md5Hex(s3.getObject("getput", "fooLarge").getObjectContent) shouldBe DigestUtils.md5Hex(blobs.map(_._1).fold(Array[Byte]())(_ ++ _))
76 | }
77 |
78 |
79 | it should "produce NoSuchBucket if bucket does not exist" in {
80 | val exc = intercept[AmazonS3Exception] {
81 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("aws-404", "foo4"))
82 | val p1 = s3.uploadPart(new UploadPartRequest().withBucketName("aws-404").withPartSize(10).withKey("foo4").withPartNumber(1).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("hellohello".getBytes())))
83 | }
84 | exc.getStatusCode shouldBe 404
85 | exc.getErrorCode shouldBe "NoSuchBucket"
86 | }
87 |
88 | it should "upload multipart with metadata" in {
89 | s3.createBucket("getput")
90 | val metadata: ObjectMetadata = new ObjectMetadata()
91 | metadata.setContentType("application/json")
92 | metadata.addUserMetadata("metamaic", "maic")
93 | val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest("getput", "foo4", metadata))
94 | val p1 = s3.uploadPart(new UploadPartRequest().withBucketName("getput").withPartSize(10).withKey("foo4").withPartNumber(1).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("hellohello".getBytes())))
95 | val p2 = s3.uploadPart(new UploadPartRequest().withBucketName("getput").withPartSize(10).withKey("foo4").withPartNumber(2).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("worldworld".getBytes())))
96 | val result = s3.completeMultipartUpload(new CompleteMultipartUploadRequest("getput", "foo4", init.getUploadId, List(p1.getPartETag, p2.getPartETag).asJava))
97 | result.getKey shouldBe "foo4"
98 | val s3Object = s3.getObject("getput", "foo4")
99 | getContent(s3Object) shouldBe "hellohelloworldworld"
100 |
101 | val actualMetadata: ObjectMetadata = s3Object.getObjectMetadata
102 | actualMetadata.getContentType shouldBe "application/json"
103 | actualMetadata.getUserMetadata.get("metamaic") shouldBe "maic"
104 | }
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/PutBucketTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 | import scala.jdk.CollectionConverters._
3 | /**
4 | * Created by shutty on 8/10/16.
5 | */
6 | class PutBucketTest extends S3MockTest {
7 | override def behaviour(fixture: => Fixture) = {
8 | val s3 = fixture.client
9 | it should "create buckets" in {
10 | s3.listBuckets().isEmpty shouldBe true
11 | s3.createBucket("hello").getName shouldBe "hello"
12 | s3.listBuckets().asScala.exists(_.getName == "hello") shouldBe true
13 | }
14 | it should "create buckets with region" in {
15 | s3.createBucket("hello2", "us-west-1")
16 | s3.listBuckets().asScala.exists(_.getName == "hello2") shouldBe true
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/S3ChunkedProtocolTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.ActorMaterializer
5 | import akka.stream.scaladsl.{Sink, Source}
6 | import akka.util.ByteString
7 | import org.scalatest.{FlatSpec, Matchers}
8 | import scala.concurrent.duration._
9 | import scala.concurrent.Await
10 |
11 | /**
12 | * Created by shutty on 8/11/16.
13 | */
14 | class S3ChunkedProtocolTest extends FlatSpec with Matchers {
15 | implicit val system = ActorSystem.create("test")
16 | implicit val mat = ActorMaterializer()
17 |
18 | "s3 chunk protocol" should "work with simple ins" in {
19 | val in = "3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfoo\r\n3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nbar\r\n".grouped(10).map(ByteString(_)).toList
20 | val result = Await.result(Source(in).via(new S3ChunkedProtocolStage).map(_.utf8String).runWith(Sink.seq), 10.seconds)
21 | result.mkString shouldBe "foobar"
22 | }
23 | it should "not drop \\r\\n chars" in {
24 | val in = "5;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nfoo\r\n\r\n3;chunk-signature=1234567890123456789012345678901234567890123456789012345678901234\r\nbar\r\n".grouped(10).map(ByteString(_)).toList
25 | val result = Await.result(Source(in).via(new S3ChunkedProtocolStage).map(_.utf8String).runWith(Sink.seq), 10.seconds)
26 | result.mkString shouldBe "foo\r\nbar"
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/S3MockTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.alpakka.s3.S3Settings
5 | import akka.stream.alpakka.s3.scaladsl.S3
6 | import akka.stream.{ActorMaterializer, Materializer}
7 | import better.files.File
8 | import com.amazonaws.auth.{AWSStaticCredentialsProvider, AnonymousAWSCredentials, BasicAWSCredentials, DefaultAWSCredentialsProviderChain}
9 | import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
10 | import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client, AmazonS3ClientBuilder}
11 | import com.amazonaws.services.s3.model.S3Object
12 | import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuilder}
13 | import com.typesafe.config.{Config, ConfigFactory}
14 | import io.findify.s3mock.provider.{FileProvider, InMemoryProvider}
15 |
16 | import scala.collection.JavaConverters._
17 | import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
18 |
19 | import scala.concurrent.Await
20 | import scala.concurrent.duration.Duration
21 | import scala.io.Source
22 |
23 | /**
24 | * Created by shutty on 8/9/16.
25 | */
26 | trait S3MockTest extends FlatSpec with Matchers with BeforeAndAfterAll {
27 | private val workDir = File.newTemporaryDirectory().pathAsString
28 | private val fileBasedPort = 8001
29 | private val fileSystemConfig = configFor("localhost", fileBasedPort)
30 | private val fileSystem = ActorSystem.create("testfile", fileSystemConfig)
31 | private val fileMat = ActorMaterializer()(fileSystem)
32 | private val fileBasedS3 = clientFor("localhost", fileBasedPort)
33 | private val fileBasedServer = new S3Mock(fileBasedPort, new FileProvider(workDir))
34 | private val fileBasedTransferManager: TransferManager = TransferManagerBuilder.standard().withS3Client(fileBasedS3).build()
35 | private val fileBasedAlpakkaClient = S3
36 |
37 | private val inMemoryPort = 8002
38 | private val inMemoryConfig = configFor("localhost", inMemoryPort)
39 | private val inMemorySystem = ActorSystem.create("testram", inMemoryConfig)
40 | private val inMemoryMat = ActorMaterializer()(inMemorySystem)
41 | private val inMemoryS3 = clientFor("localhost", inMemoryPort)
42 | private val inMemoryServer = new S3Mock(inMemoryPort, new InMemoryProvider)
43 | private val inMemoryTransferManager: TransferManager = TransferManagerBuilder.standard().withS3Client(inMemoryS3).build()
44 | private val inMemoryBasedAlpakkaClient = S3
45 |
46 | case class Fixture(server: S3Mock, client: AmazonS3, tm: TransferManager, name: String, port: Int, alpakka: S3.type , system: ActorSystem, mat: Materializer)
47 | val fixtures = List(
48 | Fixture(fileBasedServer, fileBasedS3, fileBasedTransferManager, "file based S3Mock", fileBasedPort, fileBasedAlpakkaClient, fileSystem, fileMat),
49 | Fixture(inMemoryServer, inMemoryS3, inMemoryTransferManager, "in-memory S3Mock", inMemoryPort, inMemoryBasedAlpakkaClient, inMemorySystem, inMemoryMat)
50 | )
51 |
52 | def behaviour(fixture: => Fixture) : Unit
53 |
54 | for (fixture <- fixtures) {
55 | fixture.name should behave like behaviour(fixture)
56 | }
57 |
58 | override def beforeAll = {
59 | if (!File(workDir).exists) File(workDir).createDirectory()
60 | fileBasedServer.start
61 | inMemoryServer.start
62 | super.beforeAll
63 | }
64 | override def afterAll = {
65 | super.afterAll
66 | inMemoryServer.stop
67 | fileBasedServer.stop
68 | inMemoryTransferManager.shutdownNow()
69 | Await.result(fileSystem.terminate(), Duration.Inf)
70 | Await.result(inMemorySystem.terminate(), Duration.Inf)
71 | File(workDir).delete()
72 | }
73 | def getContent(s3Object: S3Object): String = Source.fromInputStream(s3Object.getObjectContent, "UTF-8").mkString
74 |
75 | def clientFor(host: String, port: Int): AmazonS3 = {
76 | val endpoint = new EndpointConfiguration(s"http://$host:$port", "us-east-1")
77 | AmazonS3ClientBuilder.standard()
78 | .withPathStyleAccessEnabled(true)
79 | .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()))
80 | .withEndpointConfiguration(endpoint)
81 | .build()
82 | }
83 |
84 | def configFor(host: String, port: Int): Config = {
85 | ConfigFactory.parseMap(Map(
86 | "alpakka.s3.proxy.host" -> host,
87 | "alpakka.s3.proxy.port" -> port,
88 | "alpakka.s3.proxy.secure" -> false,
89 | "alpakka.s3.path-style-access" -> true,
90 | "alpakka.s3.aws.credentials.provider" -> "static",
91 | "alpakka.s3.aws.credentials.access-key-id" -> "foo",
92 | "alpakka.s3.aws.credentials.secret-access-key" -> "bar",
93 | "alpakka.s3.aws.region.provider" -> "static",
94 | "alpakka.s3.aws.region.default-region" -> "us-east-1",
95 | "alpakka.s3.buffer" -> "memory",
96 | "alpakka.s3.disk-buffer-path" -> ""
97 | ).asJava)
98 |
99 | }
100 |
101 | }
102 |
103 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/TypesafeConfigTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | /*
6 | a repro for
7 | https://github.com/findify/s3mock/issues/56
8 | Not yet fixed :(
9 | */
10 |
11 | class TypesafeConfigTest extends S3MockTest {
12 | override def behaviour(fixture: => Fixture) = {
13 |
14 | it should "load typesafe config files" ignore {
15 | val conf = ConfigFactory.parseResources("/test.conf")
16 | conf.getString("foo.testConfig") shouldBe "test"
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/alpakka/AlpakkaExample.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.alpakka
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.ActorMaterializer
5 | import akka.stream.alpakka.s3.scaladsl.S3
6 | import akka.stream.scaladsl.Sink
7 | import com.typesafe.config.ConfigFactory
8 |
9 | import scala.concurrent.Future
10 | import scala.jdk.CollectionConverters._
11 |
12 | object AlpakkaExample {
13 | def main(args: Array[String]): Unit = {
14 | val config = ConfigFactory.parseMap(Map(
15 | "alpakka.s3.proxy.host" -> "localhost",
16 | "alpakka.s3.proxy.port" -> 8001,
17 | "alpakka.s3.proxy.secure" -> false,
18 | "alpakka.s3.path-style-access" -> true,
19 | "alpakka.s3.aws.credentials.provider" -> "static",
20 | "alpakka.s3.aws.credentials.access-key-id" -> "foo",
21 | "alpakka.s3.aws.credentials.secret-access-key" -> "bar",
22 | "alpakka.s3.aws.region.provider" -> "static",
23 | "alpakka.s3.aws.region.default-region" -> "us-east-1"
24 | ).asJava)
25 | implicit val system = ActorSystem.create("test", config)
26 | implicit val mat = ActorMaterializer()
27 | import system.dispatcher
28 | val posibleSource = S3.download("bucket", "key").runWith(Sink.head)
29 | val contents = posibleSource.flatMap( obj => obj.map( content => content._1.runWith(Sink.head).map(_.utf8String)).getOrElse(Future.successful("")))
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/alpakka/GetObjectTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.alpakka
2 |
3 | import akka.actor.ActorSystem
4 | import akka.http.scaladsl.model.headers.ByteRange
5 | import akka.stream.ActorMaterializer
6 | import akka.stream.scaladsl.Sink
7 | import akka.util.ByteString
8 | import com.typesafe.config.ConfigFactory
9 | import io.findify.s3mock.S3MockTest
10 |
11 | import scala.concurrent.duration._
12 | import scala.jdk.CollectionConverters._
13 | import scala.concurrent.Await
14 |
15 | /**
16 | * Created by shutty on 5/19/17.
17 | */
18 | class GetObjectTest extends S3MockTest {
19 |
20 | override def behaviour(fixture: => Fixture) = {
21 | val s3 = fixture.client
22 | implicit val sys = fixture.system
23 | implicit val mat = fixture.mat
24 |
25 |
26 | it should "get objects via alpakka" in {
27 | s3.createBucket("alpakka1")
28 | s3.putObject("alpakka1", "test1", "foobar")
29 | val (result,_) = Await.result(fixture.alpakka.download("alpakka1", "test1").runWith(Sink.head),5.seconds).get
30 | val str = Await.result(result.runFold(ByteString(""))(_ ++ _),5.seconds).utf8String
31 | str shouldBe "foobar"
32 | }
33 |
34 | it should "get by range" in {
35 | s3.createBucket("alpakka2")
36 | s3.putObject("alpakka2", "test2", "foobar")
37 | val (result,_) = Await.result(fixture.alpakka.download("alpakka2", "test2", Some(ByteRange(1, 4))).runWith(Sink.head),5.seconds).get
38 | val str = Await.result(result.runFold(ByteString(""))(_ ++ _),5.seconds).utf8String
39 | str shouldBe "ooba"
40 | }
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/alpakka/ListBucketTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.alpakka
2 |
3 | import akka.stream.scaladsl.Sink
4 | import akka.util.ByteString
5 | import io.findify.s3mock.S3MockTest
6 | import scala.concurrent.duration._
7 | import scala.concurrent.Await
8 |
9 | class ListBucketTest extends S3MockTest {
10 | override def behaviour(fixture: => Fixture) = {
11 | val s3 = fixture.client
12 | implicit val sys = fixture.system
13 | implicit val mat = fixture.mat
14 |
15 |
16 | it should "list objects via alpakka" in {
17 | s3.createBucket("alpakkalist")
18 | s3.putObject("alpakkalist", "test1", "foobar")
19 | s3.putObject("alpakkalist", "test2", "foobar")
20 | s3.putObject("alpakkalist", "test3", "foobar")
21 | val result = Await.result(fixture.alpakka.listBucket("alpakkalist", None).runWith(Sink.seq), 5.second)
22 | result.size shouldBe 3
23 | result.map(_.key) shouldBe Seq("test1", "test2", "test3")
24 | }
25 |
26 | it should "list objects with prefix" in {
27 | s3.createBucket("alpakkalist2")
28 | s3.putObject("alpakkalist2", "test1", "foobar")
29 | s3.putObject("alpakkalist2", "test2", "foobar")
30 | s3.putObject("alpakkalist2", "xtest3", "foobar")
31 | val result = Await.result(fixture.alpakka.listBucket("alpakkalist2", Some("test")).runWith(Sink.seq), 5.second)
32 | result.size shouldBe 2
33 | result.map(_.key) shouldBe Seq("test1", "test2")
34 | }
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/alpakka/MultipartUploadTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.alpakka
2 |
3 | import akka.stream.scaladsl.Source
4 | import akka.util.ByteString
5 | import io.findify.s3mock.S3MockTest
6 |
7 | import scala.concurrent.Await
8 | import scala.concurrent.duration._
9 |
10 | /**
11 | * Created by shutty on 5/19/17.
12 | */
13 | class MultipartUploadTest extends S3MockTest {
14 |
15 | override def behaviour(fixture: => Fixture) = {
16 | val s3 = fixture.client
17 | implicit val sys = fixture.system
18 | implicit val mat = fixture.mat
19 |
20 |
21 | it should "upload multipart files" in {
22 | s3.createBucket("alpakka1")
23 |
24 | val result = Await.result(Source.single(ByteString("testcontent1"))
25 | .runWith(fixture.alpakka.multipartUpload("alpakka1", "test1")), 5.seconds)
26 |
27 | result.bucket shouldBe "alpakka1"
28 | result.key shouldBe "test1"
29 |
30 | getContent(s3.getObject("alpakka1", "test1")) shouldBe "testcontent1"
31 | }
32 |
33 |
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/awscli/AWSCliTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.awscli
2 |
3 | import akka.actor.ActorSystem
4 | import akka.http.scaladsl.Http
5 | import akka.stream.ActorMaterializer
6 | import io.findify.s3mock.S3MockTest
7 |
8 | /**
9 | * Created by shutty on 8/28/16.
10 | */
11 | trait AWSCliTest extends S3MockTest {
12 | implicit val system = ActorSystem.create("awscli")
13 | implicit val mat = ActorMaterializer()
14 | val http = Http(system)
15 | }
16 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/awscli/GetObjectTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.awscli
2 |
3 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
4 | import akka.stream.scaladsl.Sink
5 | import akka.util.ByteString
6 | import com.amazonaws.services.s3.model.AmazonS3Exception
7 | import java.time.format.DateTimeFormatter
8 | import java.time.temporal.TemporalAccessor
9 |
10 | import scala.concurrent.Await
11 | import scala.concurrent.duration._
12 |
13 | /**
14 | * Created by shutty on 8/28/16.
15 | */
16 | class GetObjectTest extends AWSCliTest {
17 | override def behaviour(fixture: => Fixture) = {
18 | val s3 = fixture.client
19 | val port = fixture.port
20 | it should "receive LastModified header with AWS CLI" in {
21 | s3.createBucket("awscli-lm")
22 | s3.putObject("awscli-lm", "foo", "bar")
23 | val response = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.GET, uri = s"http://127.0.0.1:$port/awscli-lm/foo")), 10.seconds)
24 | val last_modified = response.headers
25 | .find(_.is("last-modified"))
26 | .map(h => DateTimeFormatter.RFC_1123_DATE_TIME.parse(h.value()))
27 | .get
28 |
29 | // Timestamp changes everytime we run the test. We can not check the value
30 | last_modified shouldBe a[TemporalAccessor]
31 | response.entity.contentLengthOption shouldBe Some(3)
32 | }
33 | it should "deal with HEAD requests with AWS CLI" in {
34 | s3.createBucket("awscli-head")
35 | s3.putObject("awscli-head", "foo2", "bar")
36 | val response = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.HEAD, uri = s"http://127.0.0.1:$port/awscli-head/foo2")), 10.seconds)
37 | val last_modified = response.headers
38 | .find(_.is("last-modified"))
39 | .map(h => DateTimeFormatter.RFC_1123_DATE_TIME.parse(h.value()))
40 | .get
41 |
42 | // Timestamp changes everytime we run the test. We can not check the value
43 | last_modified shouldBe a[TemporalAccessor]
44 | response.entity.contentLengthOption shouldBe Some(3)
45 | Await.result(response.entity.dataBytes.fold(ByteString(""))(_ ++ _).runWith(Sink.head), 10.seconds).utf8String shouldBe ""
46 | }
47 | it should "deal with metadata requests with AWS CLI" in {
48 | s3.createBucket("awscli-head2")
49 | s3.putObject("awscli-head2", "foo", "bar")
50 | val meta = s3.getObjectMetadata("awscli-head2", "foo")
51 | meta.getContentLength shouldBe 3
52 | }
53 | it should "respond with status 404 if key does not exist with AWS CLI" in {
54 | s3.createBucket("awscli")
55 | val exc = intercept[AmazonS3Exception] {
56 | s3.getObject("awscli", "doesnotexist")
57 | }
58 | exc.getStatusCode shouldBe 404
59 | exc.getErrorCode shouldBe "NoSuchKey"
60 | }
61 |
62 | it should "respond with status 404 if bucket does not exist with AWS CLI" in {
63 |
64 | val exc = intercept[AmazonS3Exception] {
65 | s3.getObject("awscli-404", "doesnotexist")
66 | }
67 | exc.getStatusCode shouldBe 404
68 | exc.getErrorCode shouldBe "NoSuchBucket"
69 | }
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/awscli/PutBucketTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.awscli
2 |
3 | import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
4 | import scala.concurrent.Await
5 | import scala.concurrent.duration._
6 | import scala.jdk.CollectionConverters._
7 |
8 | /**
9 | * Created by shutty on 8/28/16.
10 | */
11 | class PutBucketTest extends AWSCliTest {
12 | override def behaviour(fixture: => Fixture) = {
13 | val s3 = fixture.client
14 | val port = fixture.port
15 | it should "create bucket with AWS CLI" in {
16 | val response = Await.result(http.singleRequest(HttpRequest(method = HttpMethods.PUT, uri = s"http://127.0.0.1:$port/awscli")), 10.seconds)
17 | s3.listBuckets().asScala.exists(_.getName == "awscli") shouldBe true
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/src/test/scala/io/findify/s3mock/transfermanager/PutGetTest.scala:
--------------------------------------------------------------------------------
1 | package io.findify.s3mock.transfermanager
2 |
3 | import java.io.{ByteArrayInputStream, File, FileInputStream}
4 |
5 | import com.amazonaws.services.s3.model.ObjectMetadata
6 | import io.findify.s3mock.S3MockTest
7 |
8 | import scala.io.Source
9 |
10 | /**
11 | * Created by shutty on 11/23/16.
12 | */
13 | class PutGetTest extends S3MockTest {
14 | override def behaviour(fixture: => Fixture) = {
15 | val s3 = fixture.client
16 | val tm = fixture.tm
17 |
18 | it should "put files with TransferManager" in {
19 | s3.createBucket("tm1")
20 | val upload = tm.upload("tm1", "hello1", new ByteArrayInputStream("hello".getBytes), new ObjectMetadata())
21 | val result = upload.waitForUploadResult()
22 | result.getKey shouldBe "hello1"
23 | }
24 |
25 | it should "download files with TransferManager" in {
26 | val file = File.createTempFile("hello1", ".s3mock")
27 | val download = tm.download("tm1", "hello1", file)
28 | download.waitForCompletion()
29 | val result = Source.fromInputStream(new FileInputStream(file), "UTF-8").mkString
30 | result shouldBe "hello"
31 | }
32 |
33 | it should "copy file with TransferManager" in {
34 | val copy = tm.copy("tm1", "hello1", "tm1", "hello2")
35 | val result = copy.waitForCopyResult()
36 | result.getDestinationKey shouldBe "hello2"
37 | val hello2 = s3.getObject("tm1", "hello2")
38 | getContent(hello2) shouldBe "hello"
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------