├── .github └── workflows │ └── maven.yml ├── .gitignore ├── LICENSE.txt ├── README.md ├── maybe-release.sh ├── pom.xml ├── pubring.gpg ├── redis-conf.json ├── redis ├── analyze-pathsize.lua ├── redis-rest-storage-pathsize.rb └── redis-rest-storage-performance.rb ├── secring.gpg ├── src ├── main │ ├── java │ │ └── org │ │ │ └── swisspush │ │ │ └── reststorage │ │ │ ├── CollectionResource.java │ │ │ ├── DocumentResource.java │ │ │ ├── EventBusAdapter.java │ │ │ ├── FilePutter.java │ │ │ ├── FileSystemDirLister.java │ │ │ ├── FileSystemRestStorageRunner.java │ │ │ ├── FileSystemStorage.java │ │ │ ├── LoggingFileReadStream.java │ │ │ ├── MimeTypeResolver.java │ │ │ ├── ModuleConfigurationAuthentication.java │ │ │ ├── RedisRestStorageRunner.java │ │ │ ├── Resource.java │ │ │ ├── RestStorageHandler.java │ │ │ ├── RestStorageMod.java │ │ │ ├── Storage.java │ │ │ ├── UrlParser.java │ │ │ ├── exception │ │ │ ├── RestStorageExceptionFactory.java │ │ │ ├── RestStorageNoStackReplyException.java │ │ │ ├── RestStorageNoStacktraceException.java │ │ │ ├── RestStorageThriftyExceptionFactory.java │ │ │ └── RestStorageWastefulExceptionFactory.java │ │ │ ├── lock │ │ │ ├── Lock.java │ │ │ ├── impl │ │ │ │ └── RedisBasedLock.java │ │ │ └── lua │ │ │ │ ├── LockLuaScripts.java │ │ │ │ ├── LuaScript.java │ │ │ │ ├── LuaScriptState.java │ │ │ │ ├── RedisCommand.java │ │ │ │ ├── RedisCommandDoNothing.java │ │ │ │ └── ReleaseLockRedisCommand.java │ │ │ ├── redis │ │ │ ├── DefaultRedisProvider.java │ │ │ ├── DefaultRedisReadyProvider.java │ │ │ ├── EventBusRedisMetricsPublisher.java │ │ │ ├── RedisMetricsPublisher.java │ │ │ ├── RedisMonitor.java │ │ │ ├── RedisProvider.java │ │ │ ├── RedisReadyProvider.java │ │ │ ├── RedisStorage.java │ │ │ └── RedisUtils.java │ │ │ ├── s3 │ │ │ ├── FileReadStream.java │ │ │ ├── FileWriteStream.java │ │ │ ├── S3FileSystemDirLister.java │ │ │ └── S3FileSystemStorage.java │ │ │ └── util │ │ │ ├── FailedAsyncResult.java │ │ │ ├── GZIPUtil.java │ │ │ ├── HttpRequestHeader.java │ │ │ ├── HttpRequestParam.java │ │ │ ├── LockMode.java │ │ │ ├── ModuleConfiguration.java │ │ │ ├── ResourceNameUtil.java │ │ │ ├── ResourcesUtils.java │ │ │ ├── Result.java │ │ │ └── StatusCode.java │ └── resources │ │ ├── cleanup.lua │ │ ├── del.lua │ │ ├── get.lua │ │ ├── lock_release.lua │ │ ├── mime-types.properties │ │ ├── mod.json │ │ ├── put.lua │ │ └── storageExpand.lua └── test │ ├── java │ └── org │ │ └── swisspush │ │ └── reststorage │ │ ├── CleanupIntegrationTest.java │ │ ├── ConfigurableTestCase.java │ │ ├── CrudIntegrationTest.java │ │ ├── EtagIntegrationTest.java │ │ ├── ExpirationIntegrationTest.java │ │ ├── FilesystemStorageIntegrationTest.java │ │ ├── FilesystemStorageTest.java │ │ ├── FilesystemStorageTestCase.java │ │ ├── JedisFactory.java │ │ ├── LockIntegrationTest.java │ │ ├── MimeTypeResolverTest.java │ │ ├── OffsetIntegrationTest.java │ │ ├── PathLevelIntegrationTest.java │ │ ├── RedirectIntegrationTest.java │ │ ├── RestStorageHandlerTest.java │ │ ├── Return200onDeleteNonExistingTest.java │ │ ├── Return404onDeleteNonExistingTest.java │ │ ├── StorageExpandIntegrationTest.java │ │ ├── lock │ │ └── impl │ │ │ └── RedisBasedLockTest.java │ │ ├── lua │ │ ├── AbstractLuaScriptTest.java │ │ ├── RedisCleanupLuaScriptTests.java │ │ ├── RedisDelLuaScriptTests.java │ │ ├── RedisGetLuaScriptTests.java │ │ ├── RedisPutLuaScriptTests.java │ │ ├── RedisStorageExpandLuaScriptTests.java │ │ └── ReleaseLockLuaScriptTests.java │ │ ├── mocks │ │ ├── DelegatingVertxFileSystem.java │ │ ├── FailFastRestStorage.java │ │ ├── FailFastVertx.java │ │ ├── FailFastVertxAsyncFile.java │ │ ├── FailFastVertxFileSystem.java │ │ ├── FailFastVertxHttpServerRequest.java │ │ ├── FailFastVertxHttpServerResponse.java │ │ ├── FailFastVertxWebRoutingContext.java │ │ └── FailFastVertxWriteStream.java │ │ ├── redis │ │ ├── DefaultRedisReadyProviderTest.java │ │ ├── RedisMonitorTest.java │ │ ├── RedisStorageIntegrationTestCase.java │ │ ├── RedisStorageTest.java │ │ └── ResourceCompressionIntegrationTest.java │ │ └── util │ │ ├── GZIPUtilTest.java │ │ ├── HttpRequestHeaderTest.java │ │ ├── HttpRequestParamTest.java │ │ ├── ModuleConfigurationTest.java │ │ └── ResourceNameUtilTest.java │ └── resources │ ├── logging.properties │ ├── redis_info_output │ ├── redis_info_persistance_loading_0 │ ├── redis_info_persistance_loading_1 │ └── testResource.gz └── staging.groovy /.github/workflows/maven.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a Java project with Maven, and cache/restore any dependencies to improve the workflow execution time 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven 3 | 4 | name: Java CI with Maven 5 | 6 | on: 7 | workflow_dispatch: 8 | push: 9 | pull_request: 10 | branches: [ "master", "develop" ] 11 | 12 | jobs: 13 | build_maven: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | - name: Set up JDK 11 19 | uses: actions/setup-java@v3 20 | with: 21 | distribution: 'temurin' 22 | java-version: '11' 23 | server-id: sonatype-nexus-staging # Value of the distributionManagement/repository/id field of the pom.xml 24 | server-username: CI_DEPLOY_USERNAME # env variable for username in deploy 25 | server-password: CI_DEPLOY_PASSWORD # env variable for token in deploy 26 | gpg-private-key: ${{ secrets.CI_GPG_PRIVATE_KEY }} # Value of the GPG private key to import 27 | gpg-passphrase: CI_GPG_PASSPHRASE # env variable for GPG private key passphrase 28 | 29 | - name: Start Redis 30 | uses: supercharge/redis-github-action@1.4.0 31 | with: 32 | redis-version: 4 33 | 34 | - name: Install, unit test, integration test 35 | run: mvn install -Dmaven.javadoc.skip=true -B -V 36 | 37 | - name: Upload coverage reports to Codecov 38 | uses: codecov/codecov-action@v3 39 | 40 | - name: Release to maven central 41 | if: github.ref_name == 'master' && github.event_name != 'pull_request' && github.repository == 'swisspost/vertx-rest-storage' 42 | run: | 43 | curl -s https://get.sdkman.io | bash 44 | source "$HOME/.sdkman/bin/sdkman-init.sh" 45 | sdk install groovy 3.0.8 46 | 47 | chmod +x ./maybe-release.sh 48 | ./maybe-release.sh 49 | env: 50 | CI_DEPLOY_USERNAME: ${{ secrets.CI_DEPLOY_USERNAME }} 51 | CI_DEPLOY_PASSWORD: ${{ secrets.CI_DEPLOY_PASSWORD }} 52 | CI_GPG_PASSPHRASE: ${{ secrets.CI_GPG_PASSPHRASE }} 53 | 54 | - name: After release 55 | run: bash <(curl -s https://codecov.io/bash) 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | classes/ 3 | build/ 4 | mods/ 5 | .project 6 | .classpath 7 | /.gradle 8 | /dogs 9 | .settings 10 | /bin 11 | /vertx_classpath.txt 12 | 13 | # Intellij 14 | .idea/ 15 | *.iml 16 | *.iws 17 | .vertx/ 18 | src/main/.gradle/ 19 | src/test/.gradle/ 20 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | This work is licensed under the Apache License, Version 2.0 (the "License"); 2 | you may not use this file except in compliance with the License. 3 | 4 | You may obtain a copy of the License at 5 | 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | -------------------------------------------------------------------------------- /maybe-release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ev 3 | git fetch 4 | git reset --hard 5 | groovy staging.groovy drop 6 | rc=$? 7 | if [ $rc -ne 0 ] 8 | then 9 | echo 'problem when trying to drop, ignored' 10 | fi 11 | echo 'starting a new nexus repository ...' 12 | OUTPUT=$(groovy staging.groovy start) 13 | echo "repository Id: $OUTPUT" 14 | mvn -B -Prelease jgitflow:release-start jgitflow:release-finish -DrepositoryId=${OUTPUT} 15 | rc=$? 16 | if [ $rc -eq 0 ] 17 | then 18 | groovy staging.groovy close ${OUTPUT} 19 | groovy staging.groovy promote ${OUTPUT} 20 | rc=$? 21 | if [ $rc -ne 0 ] 22 | then 23 | echo 'Release failed, cannot promote stage' 24 | exit $rc 25 | fi 26 | echo 'Release done, will push' 27 | git tag 28 | git push --tags 29 | git checkout develop 30 | git push origin develop 31 | exit 0 32 | fi 33 | echo 'Release failed' 34 | exit $rc 35 | -------------------------------------------------------------------------------- /pubring.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swisspost/vertx-rest-storage/dc7248148201fc33a033be7a304af546de88f3d1/pubring.gpg -------------------------------------------------------------------------------- /redis-conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "storage": "redis", 3 | "redisConfig" : { 4 | "address": "redis-client" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /redis/analyze-pathsize.lua: -------------------------------------------------------------------------------- 1 | local sep = ":"; 2 | local toAnalyze = KEYS[1] 3 | local resourcesPrefix = ARGV[1] 4 | local collectionsPrefix = ARGV[2] 5 | 6 | local resourceCount = 0 7 | local resourcesSize = 0 8 | 9 | local function analyzeChildrenAndItself(path) 10 | if redis.call('exists',resourcesPrefix..path) == 1 then 11 | local res_size = redis.call('hget', resourcesPrefix..path, 'resource') 12 | resourceCount = resourceCount + 1 13 | resourcesSize = resourcesSize + string.len(res_size) 14 | elseif redis.call('exists',collectionsPrefix..path) == 1 then 15 | local members = redis.call('zrangebyscore',collectionsPrefix..path,'-inf','+inf') 16 | for key,value in pairs(members) do 17 | local pathToAnalyze = path..":"..value 18 | analyzeChildrenAndItself(pathToAnalyze) 19 | end 20 | else 21 | redis.log(redis.LOG_WARNING, "can't analyze resource from type: "..path) 22 | end 23 | end 24 | 25 | local function round(num, dp) 26 | return string.format("%."..(dp or 0).."f", num) 27 | end 28 | 29 | local function toHumanReadable(size) 30 | if size >= 1048576 then 31 | local mbs = round(size / 1048576, 2) 32 | return mbs.." MB" 33 | elseif size >= 1024 then 34 | local kbs = round(size / 1024, 2) 35 | return kbs.." KB" 36 | else 37 | return size.." Bytes" 38 | end 39 | end 40 | 41 | if "/" == toAnalyze then 42 | toAnalyze = "" 43 | end 44 | 45 | analyzeChildrenAndItself(toAnalyze) 46 | 47 | return "Found "..resourceCount.." resources with total size of "..toHumanReadable(resourcesSize) -------------------------------------------------------------------------------- /redis/redis-rest-storage-pathsize.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/ruby 2 | 3 | ############################################################################################ 4 | # Evalutes the memory usage of the subtree provided as path 5 | # 6 | # Attention: Be aware that LUA scripts to block all other redis operations. Do not use this 7 | # script against a running (production) database! 8 | # 9 | # usage: ruby redis-rest-storage-pathsize.rb -t /rest/resources 10 | # t is the path to evaluate 11 | ############################################################################################ 12 | require 'optparse' 13 | 14 | options = {:target => nil, :host => 'localhost', :port => 6379, :resources => 'rest-storage:resources', :collections => 'rest-storage:collections'} 15 | 16 | parser = OptionParser.new do|opts| 17 | opts.banner = "Usage: analyze_redis.rb [options]" 18 | opts.separator "" 19 | opts.on('-t', '--target TARGET_PATH', 'The target path to analzye') do |path| 20 | options[:target] = path; 21 | end 22 | 23 | opts.on('-s', '--redis-host REDIS-HOST', 'The redis server host. Default is localhost') do |host| 24 | options[:host] = host; 25 | end 26 | 27 | opts.on('-p', '--redis-port REDIS-PORT', 'The redis server port. Default is 6379') do |port| 28 | options[:port] = port; 29 | end 30 | 31 | opts.on('-r', '--resources-prefix RESSOURCES-PREFIX', 'The key prefix for resources. Default is rest-storage:resources') do |resources| 32 | options[:resources] = resources; 33 | end 34 | 35 | opts.on('-c', '--collections-prefix COLLECTIONS-PREFIX', 'The key prefix for collections. Default is rest-storage:collections') do |collections| 36 | options[:collections] = collections; 37 | end 38 | 39 | opts.on('-h', '--help', 'Displays Help') do 40 | puts opts 41 | exit 42 | end 43 | end 44 | 45 | parser.parse! 46 | 47 | if options[:target] == nil 48 | print 'Enter target path: ' 49 | options[:target] = gets.chomp 50 | end 51 | 52 | def buildTargetKey(path) 53 | 54 | if "/".eql? path 55 | return path 56 | end 57 | 58 | if !path.start_with?("/") 59 | path = "/".concat(path) 60 | end 61 | 62 | if path.end_with?("/") 63 | path = path[0...-1] 64 | end 65 | 66 | return path.gsub("/", ":") 67 | end 68 | 69 | targetKey = buildTargetKey(options[:target]) 70 | start = Time.now 71 | 72 | puts '###################################################################' 73 | puts '' 74 | puts 'Starting analysis:' 75 | puts '------------------' 76 | puts 'target path: ' + options[:target] 77 | puts 'redis host: ' + options[:host] 78 | puts 'redis port: ' + String(Integer(options[:port])) 79 | puts 'redis resource key: ' + options[:resources] + targetKey 80 | puts '' 81 | puts 'be patient this could take a while...' 82 | puts '' 83 | puts `redis-cli -h #{options[:host]} -p #{options[:port]} EVAL "$(cat analyze-pathsize.lua)" 1 #{targetKey} #{options[:resources]} #{options[:collections]}` 84 | puts '' 85 | puts 'script execution time: ' + String(Float(Time.now - start)) + ' seconds' 86 | puts '' 87 | puts '###################################################################' -------------------------------------------------------------------------------- /redis/redis-rest-storage-performance.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/ruby 2 | 3 | ############################################################################################ 4 | # Evalutes the time spent in redis for a rest-storage call 5 | # The execution time is collected over the redis slowlog command 6 | # http://redis.io/commands/slowlog 7 | # You have to consider, that the slowlog only has 128 entries max 8 | # 9 | # usage: ruby redis-rest-storage-performance.rb -u http://host:port/rest/resource -s 500 10 | # u is the url to call / evaluate 11 | # s is the number of calls / samples to execute 12 | ############################################################################################ 13 | require 'optparse' 14 | 15 | ############################################################################################ 16 | # Parse the redis slowlog and create an object of every entry 17 | # Because we have no separator, we take the unique progressive identifier as identifier 18 | # for a new entry 19 | # 20 | # slowlog: the slowlog of the actual run 21 | # redis_calls: the redis_calls the executed runs 22 | ############################################################################################ 23 | def parse_slowlog(slowlog, redis_calls) 24 | first_id = slowlog.length-5 25 | start_id = slowlog[first_id].to_i 26 | 27 | i = slowlog.length-1 28 | next_id = start_id 29 | redis_call_str = Array.new 30 | redis_calls_str = Array.new 31 | while i >=0 do 32 | next_line = slowlog[i].strip 33 | redis_call_str << next_line 34 | if next_line == next_id.to_s 35 | next_id +=1 36 | redis_calls_str << redis_call_str 37 | redis_call_str = Array.new 38 | end 39 | i -= 1 40 | end 41 | 42 | redis_calls_str.each do |redis_call_str| 43 | 44 | time = redis_call_str[redis_call_str.length-3].to_i 45 | command = redis_call_str[redis_call_str.length-4] 46 | 47 | key = "" 48 | if(command == "evalsha") 49 | key = redis_call_str[redis_call_str.length-7] 50 | else 51 | key = redis_call_str[redis_call_str.length-5] 52 | end 53 | 54 | if(!key.start_with?("rest-storage") && !key.start_with?(":nemo")) 55 | next 56 | end 57 | 58 | command_key = command+"_"+key 59 | 60 | if(redis_calls.has_key?(command_key)) 61 | redis_calls[command_key][:time] << time 62 | else 63 | redis_call = {:key => key, :time => [time], :command => command} 64 | redis_calls[command_key] = redis_call 65 | end 66 | 67 | end 68 | 69 | return redis_calls 70 | end 71 | 72 | ############################################################################################ 73 | # Sets the limit to enter the slowlog for a command to 1 microsecond. 74 | # Resets the slowlog 75 | # Executes the rest-storage call 76 | # Gets the slowlog 77 | ############################################################################################ 78 | def execute_rest_storage_and_read_slowlog(url) 79 | `redis-cli config set slowlog-log-slower-than 1` 80 | `redis-cli slowlog reset` 81 | `curl -s #{url} > /dev/null` 82 | slowlog = `redis-cli slowlog get 128` 83 | lines = slowlog.split(/\n+/) 84 | return lines 85 | end 86 | 87 | ############################################################################################ 88 | # Parse the options 89 | ############################################################################################ 90 | options = {} 91 | optparse = OptionParser.new do |opts| 92 | opts.banner = "test the performance of a rest-storage call" 93 | opts.on("-u", "--url URL", "the url to call") do |url| 94 | options[:url] = url 95 | end 96 | opts.on("-s", "--samples SAMPLES", "the number of samples to take") do |samples| 97 | options[:samples] = samples 98 | end 99 | end 100 | optparse.parse! 101 | 102 | samples = options[:samples].to_i 103 | url = options[:url] 104 | 105 | ############################################################################################ 106 | # Execute the calls 107 | ############################################################################################ 108 | redis_calls = Hash.new 109 | samples.times do |i| 110 | lines = execute_rest_storage_and_read_slowlog(url) 111 | redis_calls = parse_slowlog(lines, redis_calls) 112 | end 113 | 114 | ############################################################################################ 115 | # Collect the data of every call and calculate the average 116 | ############################################################################################ 117 | result = Hash.new 118 | redis_calls.each do |key,redis_call| 119 | avg_time = redis_call[:time].inject{|sum,x| sum + x } / samples 120 | result[key]=avg_time 121 | end 122 | result.sort_by {|key,time| time}.each do |key,value| 123 | puts "time: " + value.to_s + " key: " + key 124 | end 125 | 126 | ############################################################################################ 127 | # Calculate the time spent in redis calls and the time spent in lua 128 | ############################################################################################ 129 | evalshaPlusRedis = result.clone.delete_if{|key,value| !key.match(/^evalsha.*/)}.values.inject{|sum,a| sum + a} 130 | onlyRedis = result.clone.delete_if{|key,value| key.match(/^evalsha.*/)}.values.inject{|sum,a| sum + a} 131 | puts "time in redis calls: " + onlyRedis.to_s 132 | puts "time in lua: " + (evalshaPlusRedis.to_i-onlyRedis.to_i).to_s 133 | -------------------------------------------------------------------------------- /secring.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swisspost/vertx-rest-storage/dc7248148201fc33a033be7a304af546de88f3d1/secring.gpg -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/CollectionResource.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | import java.util.List; 3 | 4 | 5 | 6 | public class CollectionResource extends Resource { 7 | public List items; 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/DocumentResource.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.core.Handler; 4 | import io.vertx.core.buffer.Buffer; 5 | import io.vertx.core.streams.ReadStream; 6 | import io.vertx.core.streams.WriteStream; 7 | 8 | public class DocumentResource extends Resource { 9 | public long length; 10 | public String etag; 11 | public ReadStream readStream; 12 | public WriteStream writeStream; 13 | public Handler closeHandler; // Called by client to close the storage 14 | public Handler endHandler; // Called by storage to notify 15 | } 16 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/FileSystemDirLister.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.core.Handler; 4 | import io.vertx.core.Promise; 5 | import io.vertx.core.Vertx; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.io.File; 10 | import java.io.IOException; 11 | import java.nio.file.Files; 12 | import java.nio.file.Path; 13 | import java.nio.file.Paths; 14 | import java.util.ArrayList; 15 | import java.util.Collections; 16 | import java.util.stream.Stream; 17 | 18 | 19 | /** 20 | * This type handles listing of directories in filesystem. 21 | * 22 | *

Internally it makes use of worker-threads to keep eventloop-thread 23 | * responsive.

24 | */ 25 | public class FileSystemDirLister { 26 | 27 | private static final Logger log = LoggerFactory.getLogger(FileSystemDirLister.class); 28 | private final Vertx vertx; 29 | private final String root; 30 | 31 | public FileSystemDirLister(Vertx vertx, String root) { 32 | this.vertx = vertx; 33 | this.root = root; 34 | } 35 | 36 | public void handleListingRequest(String path, final int offset, final int count, final Handler handler) { 37 | // Delegate work to worker thread from thread pool. 38 | log.trace("Delegate to worker pool"); 39 | final long startTimeMillis = System.currentTimeMillis(); 40 | vertx.executeBlocking(promise -> { 41 | log.trace("Welcome on worker-thread."); 42 | listDirBlocking(path, offset, count, (Promise) (Promise) promise); 43 | log.trace("worker-thread says bye."); 44 | }, event -> { 45 | log.trace("Welcome back on eventloop-thread."); 46 | if (log.isDebugEnabled()) { 47 | final long durationMillis = System.currentTimeMillis() - startTimeMillis; 48 | log.debug("List directory contents of '{}' took {}ms", path, durationMillis); 49 | } 50 | if (event.failed()) { 51 | log.error("Directory listing failed.", event.cause()); 52 | final Resource erroneousResource = new Resource() {{ 53 | // Set fields according to documentation in Resource class. 54 | name = Paths.get(path).getFileName().toString(); 55 | exists = false; 56 | error = rejected = invalid = true; 57 | errorMessage = invalidMessage = event.cause().getMessage(); 58 | }}; 59 | handler.handle(erroneousResource); 60 | } else { 61 | handler.handle((Resource) event.result()); 62 | } 63 | }); 64 | log.trace("Work delegated."); 65 | } 66 | 67 | private void listDirBlocking(String path, int offset, int count, Promise promise) { 68 | // 69 | // HINT: This method gets executed on a worker thread! 70 | // 71 | // Convert String to Path 72 | final Path searchPath = Paths.get(canonicalizeVirtualPath(path)); 73 | // Prepare our result. 74 | final CollectionResource collection = new CollectionResource() {{ 75 | items = new ArrayList<>(128); 76 | }}; 77 | final String fullPath = canonicalizeVirtualPath(path); 78 | try (Stream source = Files.list(searchPath)) { 79 | source.forEach(entry -> { 80 | final String entryName = entry.getFileName().toString(); 81 | log.trace("Processing entry '{}'", entryName); 82 | if (".tmp".equals(entryName) && fullPath.length() == root.length()) { 83 | // Ignore hidden '/.tmp/' directory. 84 | return; 85 | } 86 | // Create resource representing currently processed directory entry. 87 | final Resource resource; 88 | if (Files.isDirectory(entry)) { 89 | resource = new CollectionResource(); 90 | } else if (Files.isRegularFile(entry)) { 91 | resource = new DocumentResource(); 92 | } else { 93 | resource = new Resource(); 94 | resource.exists = false; 95 | } 96 | resource.name = entryName; 97 | collection.items.add(resource); 98 | }); 99 | } catch (IOException e) { 100 | promise.fail(e); 101 | return; 102 | } 103 | Collections.sort(collection.items); 104 | // Don't know exactly what we do here now. Seems we check 'limit' for a range request. 105 | int n = count; 106 | if (n == -1) { 107 | n = collection.items.size(); 108 | } 109 | // Don't know exactly what we do here. But it seems we evaluate 'start' of a range request. 110 | if (offset > -1) { 111 | if (offset >= collection.items.size() || (offset + n) >= collection.items.size() || (offset == 0 && n == -1)) { 112 | promise.complete(collection); 113 | } else { 114 | collection.items = collection.items.subList(offset, offset + n); 115 | promise.complete(collection); 116 | } 117 | } else { 118 | // TODO: Resolve future 119 | // Previous implementation did nothing here. Why? Should we do something here? 120 | // See: "https://github.com/hiddenalpha/vertx-rest-storage/blob/v2.5.2/src/main/java/org/swisspush/reststorage/FileSystemStorage.java#L77" 121 | log.warn("May we should do something here. I've no idea why old implementation did nothing."); 122 | } 123 | } 124 | 125 | private String canonicalizeVirtualPath(String path) { 126 | return canonicalizeRealPath(root + path); 127 | } 128 | 129 | private static String canonicalizeRealPath(String path) { 130 | try { 131 | return new File(path).getCanonicalPath(); 132 | } catch (IOException e) { 133 | throw new RuntimeException(e); 134 | } 135 | } 136 | 137 | } 138 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/FileSystemRestStorageRunner.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.core.AsyncResult; 4 | import io.vertx.core.Vertx; 5 | import org.slf4j.Logger; 6 | 7 | import static org.slf4j.LoggerFactory.getLogger; 8 | 9 | /** 10 | * Created by florian kammermann on 23.05.2016. 11 | * 12 | * Deploys the rest-storage to vert.x. 13 | * Used in the standalone scenario. 14 | */ 15 | public class FileSystemRestStorageRunner { 16 | 17 | public static void main(String[] args) { 18 | Vertx.vertx().deployVerticle(new RestStorageMod(), FileSystemRestStorageRunner::onDeployDone); 19 | } 20 | 21 | private static void onDeployDone(AsyncResult ev) { 22 | Logger log = getLogger(FileSystemRestStorageRunner.class); 23 | if( ev.failed() ){ 24 | log.error("deployVerticle(new RestStorageMod())", ev.cause()); 25 | return; 26 | } 27 | log.info("rest-storage started"); 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/LoggingFileReadStream.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.core.Handler; 4 | import io.vertx.core.buffer.Buffer; 5 | import io.vertx.core.file.AsyncFile; 6 | import io.vertx.core.file.impl.AsyncFileImpl; 7 | import io.vertx.core.streams.ReadStream; 8 | import org.slf4j.Logger; 9 | 10 | import static org.slf4j.LoggerFactory.getLogger; 11 | 12 | 13 | /** 14 | * Decorator with the purpose to log what's going on in a reading stream of an 15 | * {@link AsyncFile}. 16 | */ 17 | public class LoggingFileReadStream implements ReadStream { 18 | 19 | private static final Logger log = getLogger(LoggingFileReadStream.class); 20 | private final long expectedSize; 21 | private final String path; 22 | private final AsyncFile delegate; 23 | private long writtenBytes = 0; 24 | 25 | /** 26 | * @param expectedSize Actual file size which is expected to be streamed through that stream 27 | * in bytes. 28 | * @param path Token printed alongside the logs so when reading logs, we can see which 29 | * log belongs to which file. A possible candidate is to use the file path 30 | * but it theoretically can be anything which helps you to find logs 31 | * related to your observed file. 32 | * @param delegate The file (or stream) we wanna observe. 33 | */ 34 | LoggingFileReadStream(long expectedSize, String path, AsyncFile delegate) { 35 | this.expectedSize = expectedSize; 36 | this.path = path; 37 | this.delegate = delegate; 38 | } 39 | 40 | @Override 41 | public ReadStream exceptionHandler(Handler handler) { 42 | log.trace("exceptionHandler registered for reading '{}'", path); 43 | delegate.exceptionHandler( ex -> { 44 | log.debug("Got an exception at offset {} ({} bytes remaining) for '{}'", 45 | writtenBytes, expectedSize - writtenBytes, path, ex); 46 | handler.handle(ex); 47 | }); 48 | return this; 49 | } 50 | 51 | @Override 52 | public ReadStream handler(Handler handler) { 53 | log.trace("handler registered"); 54 | delegate.handler(buf -> { 55 | if (weShouldLogThatChunk(buf)) { 56 | log.debug("Read {} bytes at offset {} of total {} from '{}'", 57 | buf.length(), writtenBytes, expectedSize, path); 58 | } 59 | writtenBytes += buf.length(); 60 | handler.handle(buf); 61 | }); 62 | return this; 63 | } 64 | 65 | @Override 66 | public ReadStream pause() { 67 | log.debug("Pause reading at offset {} for '{}'", writtenBytes, path); 68 | delegate.pause(); 69 | return this; 70 | } 71 | 72 | @Override 73 | public ReadStream resume() { 74 | log.debug("Resume reading at offset {} for '{}'", writtenBytes, path); 75 | delegate.resume(); 76 | return this; 77 | } 78 | 79 | @Override 80 | public ReadStream fetch(long amount) { 81 | log.debug("fetch amount {}", amount); 82 | return delegate.fetch(amount); 83 | } 84 | 85 | @Override 86 | public ReadStream endHandler(Handler endHandler) { 87 | log.trace("endHandler registered."); 88 | delegate.endHandler(aVoid -> { 89 | log.debug("End handler called ({} bytes remaining) for '{}'", expectedSize - writtenBytes, path); 90 | endHandler.handle(aVoid); 91 | }); 92 | return this; 93 | } 94 | 95 | /** 96 | * Determines if it is worth writing some details to the logs for that chunk. 97 | */ 98 | private boolean weShouldLogThatChunk(Buffer buf) { 99 | 100 | if (log.isTraceEnabled()) { 101 | // Simply log everything. 102 | return true; 103 | } 104 | 105 | // Because trace is disabled, we only log near begin or end of stream to not 106 | // flood the log too much. Especially for large files which would produce 107 | // hundreds of lines of output. 108 | 109 | if (writtenBytes <= AsyncFileImpl.DEFAULT_READ_BUFFER_SIZE) { 110 | // We'll log near the beginning. 111 | return true; 112 | } 113 | 114 | if ((expectedSize - writtenBytes - buf.length()) < AsyncFileImpl.DEFAULT_READ_BUFFER_SIZE) { 115 | // We'll log near the end. 116 | return true; 117 | } 118 | 119 | // Neither verbosity is configured, nor are we at a interesting position in the stream. 120 | return false; 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/MimeTypeResolver.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import org.slf4j.Logger; 4 | 5 | import java.io.IOException; 6 | import java.io.InputStream; 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | import java.util.Properties; 10 | 11 | import static org.slf4j.LoggerFactory.getLogger; 12 | 13 | public class MimeTypeResolver { 14 | 15 | private static final Logger log = getLogger(MimeTypeResolver.class); 16 | private final Map mimeTypes = new HashMap<>(); 17 | 18 | private final String defaultMimeType; 19 | 20 | public MimeTypeResolver(String defaultMimeType) { 21 | this.defaultMimeType = defaultMimeType; 22 | Properties props = new Properties(); 23 | InputStream in = this.getClass().getClassLoader().getResourceAsStream("mime-types.properties"); 24 | try { 25 | props.load(in); 26 | } catch (IOException e) { 27 | throw new RuntimeException(e); 28 | } finally { 29 | try { 30 | in.close(); 31 | } catch (IOException ex) { 32 | log.debug("close() failed", ex); 33 | } 34 | } 35 | 36 | for( Map.Entry entry : props.entrySet()) { 37 | mimeTypes.put(((String)entry.getKey()).toLowerCase(), (String)entry.getValue()); 38 | } 39 | } 40 | 41 | public String resolveMimeType(String path) { 42 | int lastSlash = path.lastIndexOf("/"); 43 | String part = path; 44 | if(lastSlash >= 0 && !path.endsWith("/")) { 45 | part = part.substring(lastSlash+1); 46 | } 47 | int dot = part.lastIndexOf("."); 48 | if(dot == -1 || part.endsWith(".")) { 49 | return defaultMimeType; 50 | } else { 51 | String extension = part.substring(dot+1); 52 | String type = mimeTypes.get(extension.toLowerCase()); 53 | if(type==null) { 54 | type = "text/plain"; 55 | } 56 | return type; 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/ModuleConfigurationAuthentication.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.netty.util.internal.StringUtil; 4 | import io.vertx.core.AsyncResult; 5 | import io.vertx.core.Future; 6 | import io.vertx.core.Handler; 7 | import io.vertx.core.json.JsonObject; 8 | import io.vertx.ext.auth.authentication.AuthenticationProvider; 9 | import io.vertx.ext.auth.authentication.Credentials; 10 | import io.vertx.ext.auth.authentication.UsernamePasswordCredentials; 11 | import org.slf4j.Logger; 12 | import org.swisspush.reststorage.util.ModuleConfiguration; 13 | 14 | import java.util.Objects; 15 | 16 | import static org.slf4j.LoggerFactory.getLogger; 17 | 18 | /** 19 | * Custom implementation of a {@link AuthenticationProvider} using credentials from {@link ModuleConfiguration} 20 | * 21 | * @author https://github.com/mcweba [Marc-Andre Weber] 22 | */ 23 | public class ModuleConfigurationAuthentication implements AuthenticationProvider { 24 | 25 | private final static Logger logger = getLogger(ModuleConfigurationAuthentication.class); 26 | 27 | private static final String INVALID_CREDENTIALS = "invalid credentials"; 28 | 29 | private static class User { 30 | final String name; 31 | final String password; 32 | 33 | private User(String name, String password) { 34 | this.name = Objects.requireNonNull(name); 35 | this.password = Objects.requireNonNull(password); 36 | } 37 | } 38 | 39 | private final User user; 40 | 41 | public ModuleConfigurationAuthentication(ModuleConfiguration configuration) { 42 | Objects.requireNonNull(configuration); 43 | 44 | String username = configuration.getHttpRequestHandlerUsername(); 45 | String password = configuration.getHttpRequestHandlerPassword(); 46 | 47 | if (StringUtil.isNullOrEmpty(username) || StringUtil.isNullOrEmpty(password)) { 48 | logger.warn("Credentials are missing/empty"); 49 | this.user = null; 50 | } else { 51 | this.user = new User(username, password); 52 | } 53 | } 54 | 55 | @Override 56 | public void authenticate(JsonObject authInfo, Handler> resultHandler) { 57 | authenticate(new UsernamePasswordCredentials(authInfo), resultHandler); 58 | } 59 | 60 | @Override 61 | public void authenticate(Credentials credentials, Handler> resultHandler) { 62 | try { 63 | UsernamePasswordCredentials authInfo = (UsernamePasswordCredentials) credentials; 64 | authInfo.checkValid(null); 65 | 66 | if(user == null) { 67 | resultHandler.handle(Future.failedFuture(INVALID_CREDENTIALS)); 68 | } else { 69 | if (Objects.equals(user.name, authInfo.getUsername()) 70 | && Objects.equals(user.password, authInfo.getPassword())) { 71 | resultHandler.handle(Future.succeededFuture(io.vertx.ext.auth.User.fromName(user.name))); 72 | } else { 73 | resultHandler.handle(Future.failedFuture(INVALID_CREDENTIALS)); 74 | } 75 | } 76 | } catch (RuntimeException e) { 77 | resultHandler.handle(Future.failedFuture(e)); 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/RedisRestStorageRunner.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.core.AsyncResult; 4 | import io.vertx.core.DeploymentOptions; 5 | import io.vertx.core.Vertx; 6 | import org.slf4j.Logger; 7 | import org.swisspush.reststorage.util.ModuleConfiguration; 8 | 9 | import static org.slf4j.LoggerFactory.getLogger; 10 | 11 | /** 12 | * Runner class to start rest-storage in standalone mode with a redis based storage 13 | * 14 | * @author https://github.com/mcweba [Marc-Andre Weber] 15 | */ 16 | public class RedisRestStorageRunner { 17 | 18 | public static void main(String[] args) { 19 | ModuleConfiguration modConfig = new ModuleConfiguration() 20 | .storageType(ModuleConfiguration.StorageType.redis) 21 | .redisReconnectAttempts(-1) 22 | .redisPoolRecycleTimeoutMs(-1) 23 | .redisReadyCheckIntervalMs(10000) 24 | .resourceCleanupIntervalSec(10); 25 | 26 | Vertx.vertx().deployVerticle(new RestStorageMod(), 27 | new DeploymentOptions().setConfig(modConfig.asJsonObject()), 28 | RedisRestStorageRunner::onDeployComplete); 29 | } 30 | 31 | private static void onDeployComplete(AsyncResult ev) { 32 | Logger log = getLogger(RedisRestStorageRunner.class); 33 | if( ev.failed() ){ 34 | log.error("Failed to deploy RestStorageMod", ev.cause()); 35 | return; 36 | } 37 | log.info("rest-storage started"); 38 | } 39 | 40 | } 41 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/Resource.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.core.Handler; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | 10 | 11 | public class Resource implements Comparable{ 12 | public String name; 13 | public boolean exists = true; 14 | public boolean modified = true; 15 | public boolean invalid = false; 16 | public boolean rejected = false; 17 | public boolean error = false; 18 | public String invalidMessage; 19 | public String errorMessage; 20 | 21 | public Handler errorHandler; 22 | 23 | @Override 24 | public int compareTo(Resource o) { 25 | return this.name.compareTo(o.name); 26 | } 27 | 28 | @Override 29 | public int hashCode() { 30 | final int prime = 31; 31 | int result = 1; 32 | result = prime * result + ((name == null) ? 0 : name.hashCode()); 33 | return result; 34 | } 35 | 36 | @Override 37 | public boolean equals(Object obj) { 38 | if (this == obj) 39 | return true; 40 | if (obj == null) 41 | return false; 42 | if (getClass() != obj.getClass()) 43 | return false; 44 | Resource other = (Resource) obj; 45 | if (name == null) { 46 | if (other.name != null) 47 | return false; 48 | } else if (!name.equals(other.name)) 49 | return false; 50 | return true; 51 | } 52 | 53 | public void addErrorHandler(Handler handler) { 54 | if (errorHandler instanceof EventEmitter) { 55 | ((EventEmitter) errorHandler).addHandler(handler); 56 | } else { 57 | errorHandler = new EventEmitter<>() {{ 58 | if (errorHandler != null) addHandler(errorHandler); 59 | addHandler(handler); 60 | }}; 61 | } 62 | } 63 | 64 | } 65 | 66 | 67 | /** 68 | *

Simple event dispatcher.

69 | * 70 | * @param 71 | * Event type. 72 | */ 73 | class EventEmitter implements Handler { 74 | 75 | private static final Logger log = LoggerFactory.getLogger(EventEmitter.class); 76 | 77 | /** 78 | *

Delegated propagated event to each caller. Keep in mind: Every handler 79 | * will receive same event instance!

80 | */ 81 | @Override 82 | public void handle(T event) { 83 | for (Handler handler : handlers) { 84 | try { 85 | handler.handle(event); 86 | } catch (Exception e) { 87 | log.error("Exception thrown in event handler.", e); 88 | } 89 | } 90 | } 91 | 92 | /** 93 | * @param handler 94 | * Handler to receive events with. 95 | */ 96 | public void addHandler(Handler handler) { 97 | //if( handler == null ){ throw new IllegalArgumentException("Arg 'handler' MUST NOT be null."); } 98 | handlers.add(handler); 99 | } 100 | 101 | private final List> handlers = new ArrayList<>(); 102 | 103 | } 104 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/RestStorageMod.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | ; 3 | import io.vertx.core.AbstractVerticle; 4 | import io.vertx.core.Future; 5 | import io.vertx.core.Handler; 6 | import io.vertx.core.Promise; 7 | import io.vertx.core.Vertx; 8 | import io.vertx.core.http.HttpServerOptions; 9 | import io.vertx.core.http.HttpServerRequest; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | import org.swisspush.reststorage.exception.RestStorageExceptionFactory; 13 | import org.swisspush.reststorage.redis.DefaultRedisProvider; 14 | import org.swisspush.reststorage.redis.RedisProvider; 15 | import org.swisspush.reststorage.redis.RedisStorage; 16 | import org.swisspush.reststorage.s3.S3FileSystemStorage; 17 | import org.swisspush.reststorage.util.ModuleConfiguration; 18 | 19 | import static org.swisspush.reststorage.exception.RestStorageExceptionFactory.newRestStorageThriftyExceptionFactory; 20 | 21 | public class RestStorageMod extends AbstractVerticle { 22 | 23 | private final Logger log = LoggerFactory.getLogger(RestStorageMod.class); 24 | 25 | private RedisProvider redisProvider; 26 | private final RestStorageExceptionFactory exceptionFactory; 27 | 28 | public RestStorageMod() { 29 | this.exceptionFactory = newRestStorageThriftyExceptionFactory(); 30 | } 31 | 32 | public RestStorageMod( 33 | RedisProvider redisProvider, 34 | RestStorageExceptionFactory exceptionFactory 35 | ) { 36 | assert exceptionFactory != null; 37 | this.redisProvider = redisProvider; 38 | this.exceptionFactory = exceptionFactory; 39 | } 40 | 41 | @Override 42 | public void start(Promise promise) { 43 | ModuleConfiguration modConfig = ModuleConfiguration.fromJsonObject(config()); 44 | log.info("Starting RestStorageMod with configuration: {}", modConfig); 45 | 46 | createStorage(modConfig).onComplete(event -> { 47 | if (event.failed()) { 48 | promise.fail(event.cause()); 49 | } else { 50 | Handler handler = new RestStorageHandler( 51 | vertx, log, event.result(), exceptionFactory, modConfig); 52 | 53 | if(modConfig.isHttpRequestHandlerEnabled()) { 54 | // in Vert.x 2x 100-continues was activated per default, in vert.x 3x it is off per default. 55 | HttpServerOptions options = new HttpServerOptions().setHandle100ContinueAutomatically(true); 56 | 57 | vertx.createHttpServer(options).requestHandler(handler).listen(modConfig.getPort(), result -> { 58 | if (result.succeeded()) { 59 | new EventBusAdapter(exceptionFactory).init(vertx, modConfig.getStorageAddress(), handler); 60 | promise.complete(); 61 | } else { 62 | promise.fail(exceptionFactory.newException( 63 | "vertx.HttpServer.listen(" + modConfig.getPort() + ") failed", result.cause())); 64 | } 65 | }); 66 | } else { 67 | new EventBusAdapter(exceptionFactory).init(vertx, modConfig.getStorageAddress(), handler); 68 | promise.complete(); 69 | } 70 | } 71 | }); 72 | } 73 | 74 | private Future createStorage(ModuleConfiguration moduleConfiguration) { 75 | Promise promise = Promise.promise(); 76 | 77 | switch (moduleConfiguration.getStorageType()) { 78 | case filesystem: 79 | promise.complete(new FileSystemStorage(vertx, exceptionFactory, moduleConfiguration.getRoot())); 80 | break; 81 | case s3: 82 | promise.complete(new S3FileSystemStorage(vertx, exceptionFactory, moduleConfiguration.getRoot(), 83 | moduleConfiguration.getAwsS3Region(), moduleConfiguration.getS3BucketName(), 84 | moduleConfiguration.getS3AccessKeyId(), moduleConfiguration.getS3SecretAccessKey(), 85 | moduleConfiguration.getS3UseTlsConnection(), moduleConfiguration.isLocalS3(), 86 | moduleConfiguration.getLocalS3Endpoint(), moduleConfiguration.getLocalS3Port(), 87 | moduleConfiguration.getCreateBucketIfNotPresentYet())); 88 | break; 89 | case redis: 90 | createRedisStorage(vertx, moduleConfiguration).onComplete(event -> { 91 | if(event.succeeded()){ 92 | promise.complete(event.result()); 93 | } else { 94 | promise.fail(exceptionFactory.newException("createRedisStorage() failed", event.cause())); 95 | } 96 | }); 97 | break; 98 | default: 99 | promise.fail(exceptionFactory.newException("Storage not supported: " + moduleConfiguration.getStorageType())); 100 | } 101 | 102 | return promise.future(); 103 | } 104 | 105 | private Future createRedisStorage(Vertx vertx, ModuleConfiguration moduleConfiguration) { 106 | Promise initPromise = Promise.promise(); 107 | 108 | if(redisProvider == null) { 109 | redisProvider = new DefaultRedisProvider(vertx, moduleConfiguration, exceptionFactory); 110 | } 111 | 112 | redisProvider.redis().onComplete(event -> { 113 | if(event.succeeded()) { 114 | initPromise.complete(new RedisStorage(vertx, moduleConfiguration, redisProvider, exceptionFactory)); 115 | } else { 116 | initPromise.fail(exceptionFactory.newException("redisProvider.redis() failed", event.cause())); 117 | } 118 | }); 119 | 120 | return initPromise.future(); 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/Storage.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.core.Handler; 4 | import org.swisspush.reststorage.util.LockMode; 5 | 6 | import java.util.List; 7 | import java.util.Optional; 8 | 9 | public interface Storage { 10 | 11 | /** 12 | * Gets the current percentage of the actual memory usage. Possible values are in range 0.0 to 100.0 13 | * 14 | * @return the current percentage of the actual memory usage 15 | */ 16 | Optional getCurrentMemoryUsage(); 17 | 18 | void get(String path, String etag, int offset, int count, Handler handler); 19 | 20 | void storageExpand(String path, String etag, List subResources, Handler handler); 21 | 22 | void put(String path, String etag, boolean merge, long expire, Handler handler); 23 | 24 | void put(String path, String etag, boolean merge, long expire, String lockOwner, LockMode lockMode, long lockExpire, Handler handler); 25 | 26 | void put(String path, String etag, boolean merge, long expire, String lockOwner, LockMode lockMode, long lockExpire, boolean storeCompressed, Handler handler); 27 | 28 | void delete(String path, String lockOwner, LockMode lockMode, long lockExpire, boolean confirmCollectionDelete, boolean deleteRecursive, Handler handler); 29 | 30 | void cleanup(Handler handler, String cleanupResourcesAmount); 31 | 32 | } 33 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/UrlParser.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | 7 | public class UrlParser { 8 | 9 | private static final Logger log = LoggerFactory.getLogger(UrlParser.class); 10 | 11 | public static RestStorageHandler.OffsetLimit offsetLimit(String offsetFromUrl, String limitFromUrl) { 12 | RestStorageHandler.OffsetLimit offsetValues = new RestStorageHandler.OffsetLimit(0, -1); 13 | if ((offsetFromUrl == null) && (limitFromUrl == null)) { 14 | return offsetValues; 15 | } 16 | int limit = -1; 17 | int offset = 0; 18 | if(offsetFromUrl != null) { 19 | try { 20 | offset = Integer.parseInt(offsetFromUrl); 21 | offset = Math.max(offset, 0); 22 | } catch (Exception e) { 23 | // do nothing here 24 | } 25 | } 26 | 27 | if(limitFromUrl != null) { 28 | try { 29 | limit = Integer.parseInt(limitFromUrl); 30 | limit = Math.max(limit, -1); 31 | } catch (Exception ex) { 32 | log.debug("TODO error handling", ex); 33 | } 34 | } 35 | 36 | offsetValues.offset = offset; 37 | offsetValues.limit = limit; 38 | return offsetValues; 39 | } 40 | 41 | static String path(String uri) { 42 | int i = uri.indexOf("://"); 43 | if (i == -1) { 44 | i = 0; 45 | } else { 46 | i = uri.indexOf('/', i + 3); 47 | if (i == -1) { 48 | // contains no / 49 | return "/"; 50 | } 51 | } 52 | 53 | int queryStart = uri.indexOf('?', i); 54 | if (queryStart == -1) { 55 | queryStart = uri.length(); 56 | } 57 | return uri.substring(i, queryStart); 58 | } 59 | 60 | static String query(String uri) { 61 | int i = uri.indexOf('?'); 62 | if (i == -1) { 63 | return null; 64 | } else { 65 | return uri.substring(i + 1 , uri.length()); 66 | } 67 | } 68 | 69 | } 70 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/exception/RestStorageExceptionFactory.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.exception; 2 | 3 | import io.vertx.core.eventbus.ReplyException; 4 | import io.vertx.core.eventbus.ReplyFailure; 5 | 6 | /** 7 | * Applies dependency inversion for exception instantiation. 8 | * 9 | * This class did arise because we had different use cases in different 10 | * applications. One of them has the need to perform fine-grained error 11 | * reporting. Whereas in the other application this led to performance issues. 12 | * So now through this abstraction, both applications can choose the behavior 13 | * they need. 14 | * 15 | * If dependency-injection gets applied properly, an app can even provide its 16 | * custom implementation to fine-tune the exact behavior even further. 17 | */ 18 | public interface RestStorageExceptionFactory { 19 | 20 | /** Convenience overload for {@link #newException(String, Throwable)}. */ 21 | public default Exception newException(String msg){ return newException(msg, null); } 22 | 23 | /** Convenience overload for {@link #newException(String, Throwable)}. */ 24 | public default Exception newException(Throwable cause){ return newException(null, cause); } 25 | 26 | public Exception newException(String message, Throwable cause); 27 | 28 | /** Convenience overload for {@link #newRuntimeException(String, Throwable)}. */ 29 | public default RuntimeException newRuntimeException(String msg){ return newRuntimeException(msg, null); } 30 | 31 | /** Convenience overload for {@link #newRuntimeException(String, Throwable)}. */ 32 | public default RuntimeException newRuntimeException(Throwable cause){ return newRuntimeException(null, cause); } 33 | 34 | public RuntimeException newRuntimeException(String message, Throwable cause); 35 | 36 | public ReplyException newReplyException(ReplyFailure failureType, int failureCode, String message); 37 | 38 | 39 | /** 40 | * See {@link RestStorageThriftyExceptionFactory}. 41 | */ 42 | public static RestStorageExceptionFactory newRestStorageThriftyExceptionFactory() { 43 | return new RestStorageThriftyExceptionFactory(); 44 | } 45 | 46 | /** 47 | * See {@link RestStorageWastefulExceptionFactory}. 48 | */ 49 | public static RestStorageExceptionFactory newRestStorageWastefulExceptionFactory() { 50 | return new RestStorageWastefulExceptionFactory(); 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/exception/RestStorageNoStackReplyException.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.exception; 2 | 3 | import io.vertx.core.eventbus.ReplyFailure; 4 | 5 | /** 6 | * There was once a fix in vertx for this (https://github.com/eclipse-vertx/vert.x/issues/4840) 7 | * but for whatever reason in our case we still see stack-trace recordings. Passing 8 | * this subclass to {@link io.vertx.core.eventbus.Message#reply(Object)} seems to 9 | * do the trick. 10 | */ 11 | public class RestStorageNoStackReplyException extends io.vertx.core.eventbus.ReplyException { 12 | 13 | public RestStorageNoStackReplyException(ReplyFailure failureType, int failureCode, String message) { 14 | super(failureType, failureCode, message); 15 | } 16 | 17 | public RestStorageNoStackReplyException(ReplyFailure failureType, String message) { 18 | this(failureType, -1, message); 19 | } 20 | 21 | public RestStorageNoStackReplyException(ReplyFailure failureType) { 22 | this(failureType, -1, null); 23 | } 24 | 25 | @Override 26 | public Throwable fillInStackTrace() { 27 | return this; 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/exception/RestStorageNoStacktraceException.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.exception; 2 | 3 | /** 4 | * Basically same as in vertx, But adding the forgotten contructors. 5 | */ 6 | public class RestStorageNoStacktraceException extends RuntimeException { 7 | 8 | public RestStorageNoStacktraceException() { 9 | } 10 | 11 | public RestStorageNoStacktraceException(String message) { 12 | super(message); 13 | } 14 | 15 | public RestStorageNoStacktraceException(String message, Throwable cause) { 16 | super(message, cause); 17 | } 18 | 19 | public RestStorageNoStacktraceException(Throwable cause) { 20 | super(cause); 21 | } 22 | 23 | @Override 24 | public Throwable fillInStackTrace() { 25 | return this; 26 | } 27 | 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/exception/RestStorageThriftyExceptionFactory.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.exception; 2 | 3 | import io.vertx.core.eventbus.ReplyException; 4 | import io.vertx.core.eventbus.ReplyFailure; 5 | 6 | /** 7 | * Trades maintainability for speed. For example prefers lightweight 8 | * exceptions without stacktrace recording. It may even decide to drop 'cause' 9 | * and 'suppressed' exceptions. Or to make other optimizations towards 10 | * performance. If an app needs more error details it should use 11 | * {@link RestStorageWastefulExceptionFactory}. If none of those fits the apps 12 | * needs, it can provide its own implementation. 13 | */ 14 | class RestStorageThriftyExceptionFactory implements RestStorageExceptionFactory { 15 | 16 | RestStorageThriftyExceptionFactory() { 17 | } 18 | 19 | @Override 20 | public Exception newException(String message, Throwable cause) { 21 | // This implementation cares about performance. So if 'cause' is already 22 | // of the correct type, we just re-use it directly. There's no need to 23 | // produce yet another exception instance. 24 | if (cause instanceof Exception) return (Exception) cause; 25 | return new RestStorageNoStacktraceException(message, cause); 26 | } 27 | 28 | @Override 29 | public RuntimeException newRuntimeException(String msg, Throwable cause) { 30 | // This implementation cares about performance. So if 'cause' is already 31 | // of the correct type, we just re-use it directly. There's no need to 32 | // produce yet another exception instance. 33 | if (cause instanceof RuntimeException) return (RuntimeException) cause; 34 | return new RestStorageNoStacktraceException(msg, cause); 35 | } 36 | 37 | @Override 38 | public ReplyException newReplyException(ReplyFailure failureType, int failureCode, String message) { 39 | return new RestStorageNoStackReplyException(failureType, failureCode, message); 40 | } 41 | 42 | } 43 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/exception/RestStorageWastefulExceptionFactory.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.exception; 2 | 3 | import io.vertx.core.eventbus.ReplyException; 4 | import io.vertx.core.eventbus.ReplyFailure; 5 | 6 | /** 7 | * Trades speed for maintainability. For example invests more resources like 8 | * recording stack traces (which likely provocates more logs) to get easier 9 | * to debug error messages and better hints of what is happening. It also 10 | * keeps details like 'causes' and 'suppressed' exceptions. If an app needs 11 | * more error details it should use {@link RestStorageThriftyExceptionFactory} 12 | * instead. If none of those impls fit the apps needs, it can provide its own 13 | * implementation. 14 | */ 15 | class RestStorageWastefulExceptionFactory implements RestStorageExceptionFactory { 16 | 17 | RestStorageWastefulExceptionFactory() { 18 | } 19 | 20 | @Override 21 | public Exception newException(String message, Throwable cause) { 22 | return new Exception(message, cause); 23 | } 24 | 25 | @Override 26 | public RuntimeException newRuntimeException(String msg, Throwable cause) { 27 | return new RuntimeException(msg, cause); 28 | } 29 | 30 | @Override 31 | public ReplyException newReplyException(ReplyFailure failureType, int failureCode, String message) { 32 | return new ReplyException(failureType, failureCode, message); 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/lock/Lock.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.lock; 2 | 3 | import io.vertx.core.Future; 4 | 5 | /** 6 | * Cluster wide locks allow you to obtain exclusive locks across the cluster. 7 | * This is useful when you want to do something or access a resource on only one node of a cluster at any one time. 8 | * 9 | * @author https://github.com/mcweba [Marc-Andre Weber] 10 | */ 11 | public interface Lock { 12 | /** 13 | * Try to acquire a lock. 14 | * The token parameter value must be unique across all clients and all lock requests. The lockExpiryMs 15 | * parameter defines the expiry of the lock. 16 | * When not manually released, the lock will be released automatically when expired. 17 | * 18 | * @param lock The name of the lock to acquire 19 | * @param token A unique token to define the owner of the lock 20 | * @param lockExpiryMs The lock expiry in milliseconds 21 | * @return Returns a Future holding a Boolean value whether the lock could be successfully acquired or not 22 | */ 23 | Future acquireLock(String lock, String token, long lockExpiryMs); 24 | 25 | /** 26 | * Try to release a lock. 27 | * The token parameter value is used to verify that only the owner of the lock can release it. 28 | * The token parameter value also prevents the original owner of an already expired lock to release a lock 29 | * which has been acquired by another client. 30 | * 31 | * @param lock The name of the lock to release 32 | * @param token A unique token to verify if the owner of the lock tries to release the lock 33 | * @return Returns a Promise holding a Boolean value whether the lock could be successfully released or not 34 | */ 35 | Future releaseLock(String lock, String token); 36 | } 37 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/lock/impl/RedisBasedLock.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.lock.impl; 2 | 3 | import io.vertx.core.AsyncResult; 4 | import io.vertx.core.Future; 5 | import io.vertx.core.Handler; 6 | import io.vertx.core.Promise; 7 | import io.vertx.core.json.JsonArray; 8 | import io.vertx.redis.client.Command; 9 | import io.vertx.redis.client.Response; 10 | import org.slf4j.Logger; 11 | import org.slf4j.LoggerFactory; 12 | import org.swisspush.reststorage.exception.RestStorageExceptionFactory; 13 | import org.swisspush.reststorage.redis.RedisProvider; 14 | import org.swisspush.reststorage.lock.Lock; 15 | import org.swisspush.reststorage.lock.lua.LockLuaScripts; 16 | import org.swisspush.reststorage.lock.lua.LuaScriptState; 17 | import org.swisspush.reststorage.lock.lua.ReleaseLockRedisCommand; 18 | import org.swisspush.reststorage.redis.RedisUtils; 19 | import org.swisspush.reststorage.util.FailedAsyncResult; 20 | 21 | import java.util.Collections; 22 | import java.util.List; 23 | 24 | /** 25 | * Implementation of the {@link Lock} interface based on a redis database. 26 | * 27 | * @author https://github.com/mcweba [Marc-Andre Weber] 28 | */ 29 | public class RedisBasedLock implements Lock { 30 | 31 | private final Logger log = LoggerFactory.getLogger(RedisBasedLock.class); 32 | 33 | public static final String STORAGE_PREFIX = "rest-storage-lock:"; 34 | private static final String[] EMPTY_STRING_ARRAY = new String[0]; 35 | 36 | private final LuaScriptState releaseLockLuaScriptState; 37 | private final RedisProvider redisProvider; 38 | private final RestStorageExceptionFactory exceptionFactory; 39 | 40 | public RedisBasedLock( 41 | RedisProvider redisProvider, 42 | RestStorageExceptionFactory exceptionFactory 43 | ) { 44 | this.redisProvider = redisProvider; 45 | this.exceptionFactory = exceptionFactory; 46 | this.releaseLockLuaScriptState = new LuaScriptState( 47 | LockLuaScripts.LOCK_RELEASE, redisProvider, exceptionFactory, false); 48 | } 49 | 50 | private void redisSetWithOptions(String key, String value, boolean nx, long px, Handler> handler) { 51 | JsonArray options = new JsonArray(); 52 | options.add("PX").add(px); 53 | if (nx) { 54 | options.add("NX"); 55 | } 56 | redisProvider.redis().onComplete( redisEv -> { 57 | if( redisEv.failed() ){ 58 | Throwable ex = exceptionFactory.newException("redisProvider.redis() failed", redisEv.cause()); 59 | handler.handle(new FailedAsyncResult<>(ex)); 60 | return; 61 | } 62 | var redisAPI = redisEv.result(); 63 | String[] payload = RedisUtils.toPayload(key, value, options).toArray(EMPTY_STRING_ARRAY); 64 | redisAPI.send(Command.SET, payload).onComplete( ev -> { 65 | if( ev.failed() ){ 66 | Throwable ex = exceptionFactory.newException("redisAPI.send(SET, ...) failed", ev.cause()); 67 | handler.handle(new FailedAsyncResult<>(ex)); 68 | }else{ 69 | handler.handle(ev); 70 | } 71 | }); 72 | }); 73 | } 74 | 75 | @Override 76 | public Future acquireLock(String lock, String token, long lockExpiryMs) { 77 | Promise promise = Promise.promise(); 78 | redisSetWithOptions(buildLockKey(lock), token, true, lockExpiryMs, event -> { 79 | if (event.succeeded()) { 80 | if (event.result() != null) { 81 | promise.complete("OK".equalsIgnoreCase(event.result().toString())); 82 | } else { 83 | promise.complete(false); 84 | } 85 | } else { 86 | promise.fail(exceptionFactory.newException("redisSetWithOptions() failed", event.cause())); 87 | } 88 | }); 89 | return promise.future(); 90 | } 91 | 92 | @Override 93 | public Future releaseLock(String lock, String token) { 94 | Promise promise = Promise.promise(); 95 | List keys = Collections.singletonList(buildLockKey(lock)); 96 | List arguments = Collections.singletonList(token); 97 | ReleaseLockRedisCommand cmd = new ReleaseLockRedisCommand(releaseLockLuaScriptState, 98 | keys, arguments, redisProvider, exceptionFactory, log, promise); 99 | cmd.exec(0); 100 | return promise.future(); 101 | } 102 | 103 | private String buildLockKey(String lock) { 104 | return STORAGE_PREFIX + lock; 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/lock/lua/LockLuaScripts.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.lock.lua; 2 | 3 | 4 | /** 5 | * @author https://github.com/mcweba [Marc-Andre Weber] 6 | */ 7 | public enum LockLuaScripts implements LuaScript { 8 | 9 | LOCK_RELEASE("lock_release.lua"); 10 | 11 | private final String file; 12 | 13 | LockLuaScripts(String file) { 14 | this.file = file; 15 | } 16 | 17 | @Override 18 | public String getFilename() { 19 | return file; 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/lock/lua/LuaScript.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.lock.lua; 2 | 3 | /** 4 | * @author https://github.com/mcweba [Marc-Andre Weber] 5 | */ 6 | public interface LuaScript { 7 | String getFilename(); 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/lock/lua/LuaScriptState.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.lock.lua; 2 | 3 | import org.apache.commons.codec.digest.DigestUtils; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | import org.swisspush.reststorage.exception.RestStorageExceptionFactory; 7 | import org.swisspush.reststorage.redis.RedisProvider; 8 | 9 | import java.io.BufferedReader; 10 | import java.io.IOException; 11 | import java.io.InputStreamReader; 12 | import java.util.Arrays; 13 | 14 | /** 15 | * Created by webermarca on 01.07.2016. 16 | */ 17 | public class LuaScriptState { 18 | private final LuaScript luaScriptType; 19 | /** 20 | * the script itself 21 | */ 22 | private String script; 23 | /** 24 | * if the script logs to the redis log 25 | */ 26 | private boolean logoutput = false; 27 | /** 28 | * the sha, over which the script can be accessed in redis 29 | */ 30 | private String sha; 31 | 32 | private final RedisProvider redisProvider; 33 | private final RestStorageExceptionFactory exceptionFactory; 34 | 35 | private final Logger log = LoggerFactory.getLogger(LuaScriptState.class); 36 | 37 | public LuaScriptState( 38 | LuaScript luaScriptType, 39 | RedisProvider redisProvider, 40 | RestStorageExceptionFactory exceptionFactory, 41 | boolean logoutput 42 | ) { 43 | this.luaScriptType = luaScriptType; 44 | this.redisProvider = redisProvider; 45 | this.exceptionFactory = exceptionFactory; 46 | this.logoutput = logoutput; 47 | this.composeLuaScript(luaScriptType); 48 | this.loadLuaScript(new RedisCommandDoNothing(), 0); 49 | } 50 | 51 | /** 52 | * Reads the script from the classpath and removes logging output if logoutput is false. 53 | * The script is stored in the class member script. 54 | * 55 | * @param luaScriptType 56 | */ 57 | private void composeLuaScript(LuaScript luaScriptType) { 58 | log.info("read the lua script for script type: {} with logoutput: {}", luaScriptType, logoutput); 59 | this.script = readLuaScriptFromClasspath(luaScriptType); 60 | this.sha = DigestUtils.sha1Hex(this.script); 61 | } 62 | 63 | private String readLuaScriptFromClasspath(LuaScript luaScriptType) { 64 | BufferedReader in = new BufferedReader(new InputStreamReader(this.getClass().getClassLoader().getResourceAsStream(luaScriptType.getFilename()))); 65 | StringBuilder sb; 66 | try { 67 | sb = new StringBuilder(); 68 | String line; 69 | while ((line = in.readLine()) != null) { 70 | if (!logoutput && line.contains("redis.log(redis.LOG_NOTICE,")) { 71 | continue; 72 | } 73 | sb.append(line).append("\n"); 74 | } 75 | 76 | } catch (IOException e) { 77 | throw new RuntimeException(e); 78 | } finally { 79 | try { 80 | in.close(); 81 | } catch (IOException e) { 82 | log.warn("stacktrace", e); 83 | } 84 | } 85 | return sb.toString(); 86 | } 87 | 88 | /** 89 | * Load the get script into redis and store the sha in the class member sha. 90 | * 91 | * @param redisCommand the redis command that should be executed, after the script is loaded. 92 | * @param executionCounter a counter to control recursion depth 93 | */ 94 | public void loadLuaScript(final RedisCommand redisCommand, int executionCounter) { 95 | final int executionCounterIncr = ++executionCounter; 96 | // check first if the lua script already exists in the store 97 | 98 | redisProvider.redis().onComplete(ev -> { 99 | if (ev.failed()) { 100 | log.error("stacktrace", exceptionFactory.newException("redisProvider.redis() failed", ev.cause())); 101 | return; 102 | } 103 | var redisAPI = ev.result(); 104 | redisAPI.script(Arrays.asList("exists", sha), existsEv -> { 105 | if (existsEv.failed()) { 106 | log.error("stacktrace", exceptionFactory.newException("Error checking whether lua script exists", existsEv.cause())); 107 | return; 108 | } 109 | Long exists = existsEv.result().get(0).toLong(); 110 | if (Long.valueOf(1).equals(exists)) { 111 | log.debug("RedisStorage script already exists in redis cache: {}", luaScriptType); 112 | redisCommand.exec(executionCounterIncr); 113 | } else { 114 | log.info("load lua script for script type: {} logutput: {}", luaScriptType, logoutput); 115 | redisAPI.script(Arrays.asList("load", script), loadEv -> { 116 | if (loadEv.failed()) { 117 | log.error("stacktrace", exceptionFactory.newException( 118 | "redisAPI.script(['load', script) failed", loadEv.cause())); 119 | return; 120 | } 121 | String newSha = loadEv.result().toString(); 122 | log.info("got sha from redis for lua script: {}: {}", luaScriptType, newSha); 123 | if (!newSha.equals(sha)) { 124 | log.warn("the sha calculated by myself: {} doesn't match with the sha from redis: {}. " + 125 | "We use the sha from redis", sha, newSha); 126 | } 127 | sha = newSha; 128 | log.info("execute redis command for script type: {} with new sha: {}", luaScriptType, sha); 129 | redisCommand.exec(executionCounterIncr); 130 | }); 131 | } 132 | }); 133 | }); 134 | } 135 | 136 | public String getScript() { 137 | return script; 138 | } 139 | 140 | public void setScript(String script) { 141 | this.script = script; 142 | } 143 | 144 | public boolean getLogoutput() { 145 | return logoutput; 146 | } 147 | 148 | public void setLogoutput(boolean logoutput) { 149 | this.logoutput = logoutput; 150 | } 151 | 152 | public String getSha() { 153 | return sha; 154 | } 155 | 156 | public void setSha(String sha) { 157 | this.sha = sha; 158 | } 159 | 160 | } 161 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/lock/lua/RedisCommand.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.lock.lua; 2 | 3 | /** 4 | * @author https://github.com/mcweba [Marc-Andre Weber] 5 | */ 6 | public interface RedisCommand { 7 | void exec(int executionCounter); 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/lock/lua/RedisCommandDoNothing.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.lock.lua; 2 | 3 | /** 4 | * @author https://github.com/mcweba [Marc-Andre Weber] 5 | */ 6 | public class RedisCommandDoNothing implements RedisCommand{ 7 | 8 | @Override 9 | public void exec(int executionCounter) { 10 | // do nothing here 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/lock/lua/ReleaseLockRedisCommand.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.lock.lua; 2 | 3 | import io.vertx.core.Promise; 4 | import org.slf4j.Logger; 5 | import org.swisspush.reststorage.exception.RestStorageExceptionFactory; 6 | import org.swisspush.reststorage.redis.RedisProvider; 7 | 8 | import java.util.ArrayList; 9 | import java.util.List; 10 | 11 | /** 12 | * @author https://github.com/mcweba [Marc-Andre Weber] 13 | */ 14 | public class ReleaseLockRedisCommand implements RedisCommand { 15 | 16 | private final LuaScriptState luaScriptState; 17 | private final List keys; 18 | private final List arguments; 19 | private final Promise promise; 20 | private final RedisProvider redisProvider; 21 | private final RestStorageExceptionFactory exceptionFactory; 22 | private final Logger log; 23 | 24 | public ReleaseLockRedisCommand( 25 | LuaScriptState luaScriptState, 26 | List keys, 27 | List arguments, 28 | RedisProvider redisProvider, 29 | RestStorageExceptionFactory exceptionFactory, 30 | Logger log, 31 | final Promise promise 32 | ) { 33 | this.luaScriptState = luaScriptState; 34 | this.keys = keys; 35 | this.arguments = arguments; 36 | this.redisProvider = redisProvider; 37 | this.exceptionFactory = exceptionFactory; 38 | this.log = log; 39 | this.promise = promise; 40 | } 41 | 42 | @Override 43 | public void exec(int executionCounter) { 44 | List args = new ArrayList<>(); 45 | args.add(luaScriptState.getSha()); 46 | args.add(String.valueOf(keys.size())); 47 | args.addAll(keys); 48 | args.addAll(arguments); 49 | 50 | redisProvider.redis().onComplete( redisEv -> { 51 | if( redisEv.failed() ){ 52 | promise.fail(exceptionFactory.newException("redisProvider.redis() failed", redisEv.cause())); 53 | return; 54 | } 55 | var redisAPI = redisEv.result(); 56 | redisAPI.evalsha(args, shaEv -> { 57 | if( shaEv.failed() ){ 58 | Throwable ex = shaEv.cause(); 59 | String message = ex.getMessage(); 60 | if (message != null && message.startsWith("NOSCRIPT")) { 61 | log.warn("ReleaseLockRedisCommand script couldn't be found, reload it", ex); 62 | log.warn("amount the script got loaded: " + executionCounter); 63 | if (executionCounter > 10) { 64 | promise.fail("amount the script got loaded is higher than 10, we abort"); 65 | } else { 66 | luaScriptState.loadLuaScript(new ReleaseLockRedisCommand(luaScriptState, keys, 67 | arguments, redisProvider, exceptionFactory, log, promise), executionCounter); 68 | } 69 | } else { 70 | promise.fail(exceptionFactory.newException("redisAPI.evalsha() failed", ex)); 71 | } 72 | return; 73 | } 74 | Long unlocked = shaEv.result().toLong(); 75 | promise.complete(unlocked > 0); 76 | }); 77 | }); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/redis/DefaultRedisReadyProvider.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import io.vertx.core.Future; 4 | import io.vertx.core.Vertx; 5 | import io.vertx.redis.client.RedisAPI; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.util.List; 10 | import java.util.Optional; 11 | import java.util.concurrent.atomic.AtomicBoolean; 12 | 13 | /** 14 | * Default implementation of the {@link RedisReadyProvider} based on the INFO command in Redis 15 | * 16 | * @author https://github.com/mcweba [Marc-Andre Weber] 17 | */ 18 | public class DefaultRedisReadyProvider implements RedisReadyProvider { 19 | 20 | private static final Logger log = LoggerFactory.getLogger(DefaultRedisReadyProvider.class); 21 | private static final String DELIMITER = ":"; 22 | private static final String LOADING = "loading"; 23 | final AtomicBoolean redisReady = new AtomicBoolean(true); 24 | final AtomicBoolean updateRedisReady = new AtomicBoolean(true); 25 | 26 | /** 27 | * Constructor defining the "ready-state" update interval 28 | * @param vertx 29 | * @param updateIntervalMs interval in ms how often to update the "ready-state" 30 | */ 31 | public DefaultRedisReadyProvider(Vertx vertx, int updateIntervalMs) { 32 | vertx.setPeriodic(updateIntervalMs, l -> { 33 | updateRedisReady.set(true); 34 | }); 35 | } 36 | 37 | @Override 38 | public Future ready(RedisAPI redisAPI) { 39 | if(updateRedisReady.compareAndSet(true, false)){ 40 | return updateRedisReadyState(redisAPI); 41 | } 42 | return Future.succeededFuture(redisReady.get()); 43 | } 44 | 45 | /** 46 | * Call the INFO command in Redis with a constraint to persistence related information 47 | * 48 | * @param redisAPI 49 | * @return async boolean true when Redis is ready, otherwise false 50 | */ 51 | public Future updateRedisReadyState(RedisAPI redisAPI) { 52 | return redisAPI.info(List.of("Persistence")).compose(response -> { 53 | boolean ready = getReadyStateFromResponse(response.toString()); 54 | redisReady.set(ready); 55 | return Future.succeededFuture(ready); 56 | }, throwable -> { 57 | log.error("Error reading redis info", throwable); 58 | redisReady.set(false); 59 | return Future.succeededFuture(false); 60 | }); 61 | } 62 | 63 | /** 64 | * Check the response having a loading:0 entry. If so, Redis is ready. When the response contains a 65 | * loading:1 entry or not related entry at all, we consider Redis to be not ready 66 | * 67 | * @param persistenceInfo the response from Redis _INFO_ command 68 | * @return boolean true when Redis is ready, otherwise false 69 | */ 70 | private boolean getReadyStateFromResponse(String persistenceInfo) { 71 | byte loadingValue; 72 | try { 73 | Optional loadingOpt = persistenceInfo 74 | .lines() 75 | .filter(source -> source.startsWith(LOADING + DELIMITER)) 76 | .findAny(); 77 | if (loadingOpt.isEmpty()) { 78 | log.warn("No 'loading' section received from redis. Unable to calculate ready state"); 79 | return false; 80 | } 81 | loadingValue = Byte.parseByte(loadingOpt.get().split(DELIMITER)[1]); 82 | if (loadingValue == 0) { 83 | return true; 84 | } 85 | 86 | } catch (NumberFormatException ex) { 87 | log.warn("Invalid 'loading' section received from redis. Unable to calculate ready state"); 88 | return false; 89 | } 90 | 91 | return false; 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/redis/EventBusRedisMetricsPublisher.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import io.vertx.core.Vertx; 4 | import io.vertx.core.json.JsonObject; 5 | 6 | public class EventBusRedisMetricsPublisher implements RedisMetricsPublisher { 7 | 8 | private final Vertx vertx; 9 | private final String monitoringAddress; 10 | private final String prefix; 11 | 12 | public EventBusRedisMetricsPublisher(Vertx vertx, String monitoringAddress, String prefix) { 13 | this.vertx = vertx; 14 | this.monitoringAddress = monitoringAddress; 15 | this.prefix = prefix; 16 | } 17 | 18 | @Override 19 | public void publishMetric(String name, long value) { 20 | vertx.eventBus().publish(monitoringAddress, 21 | new JsonObject().put("name", prefix + name).put("action", "set").put("n", value)); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/redis/RedisMetricsPublisher.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | public interface RedisMetricsPublisher { 4 | 5 | void publishMetric(String name, long value); 6 | } 7 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/redis/RedisMonitor.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import com.google.common.base.Splitter; 4 | import io.vertx.core.Vertx; 5 | import io.vertx.core.buffer.Buffer; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.util.*; 10 | 11 | import static java.util.Collections.emptyList; 12 | 13 | public class RedisMonitor { 14 | private final Vertx vertx; 15 | private final RedisProvider redisProvider; 16 | private final int periodMs; 17 | private final String expirableKey; 18 | private long timer; 19 | private final Logger log = LoggerFactory.getLogger(RedisMonitor.class); 20 | 21 | private static final String DELIMITER = ":"; 22 | 23 | private final RedisMetricsPublisher publisher; 24 | 25 | /** 26 | * @param vertx vertx 27 | * @param redisProvider RedisProvider 28 | * @param monitoringAddress The EventBus address to send metrics to 29 | * @param name name used in the metrics EventBus message 30 | * @param expirableKey name of the expirable resources entry 31 | * @param periodSec period in seconds to gather redis metrics 32 | */ 33 | public RedisMonitor(Vertx vertx, RedisProvider redisProvider, String monitoringAddress, String name, String expirableKey, int periodSec) { 34 | this(vertx, redisProvider, expirableKey, periodSec, 35 | new EventBusRedisMetricsPublisher(vertx, monitoringAddress, "redis." + name + ".") 36 | ); 37 | } 38 | 39 | public RedisMonitor(Vertx vertx, RedisProvider redisProvider, String expirableKey, int periodSec, RedisMetricsPublisher publisher) { 40 | this.vertx = vertx; 41 | this.redisProvider = redisProvider; 42 | this.expirableKey = expirableKey; 43 | this.periodMs = periodSec * 1000; 44 | this.publisher = publisher; 45 | } 46 | 47 | public void start() { 48 | timer = vertx.setPeriodic(periodMs, timer -> redisProvider.redis().onSuccess(redisAPI -> { 49 | redisAPI.info(emptyList()).onComplete(event -> { 50 | if (event.succeeded()) { 51 | collectMetrics(event.result().toBuffer()); 52 | } else { 53 | log.warn("Cannot collect INFO from redis", event.cause()); 54 | } 55 | }); 56 | 57 | redisAPI.zcard(expirableKey, reply -> { 58 | if (reply.succeeded()) { 59 | long value = reply.result().toLong(); 60 | publisher.publishMetric("expirable", value); 61 | } else { 62 | log.warn("Cannot collect zcard from redis for key {}", expirableKey, reply.cause()); 63 | } 64 | }); 65 | 66 | }).onFailure(throwable -> log.warn("Cannot collect INFO from redis", throwable))); 67 | } 68 | 69 | public void stop() { 70 | if (timer != 0) { 71 | vertx.cancelTimer(timer); 72 | timer = 0; 73 | } 74 | } 75 | 76 | private void collectMetrics(Buffer buffer) { 77 | Map map = new HashMap<>(); 78 | 79 | Splitter.on(System.lineSeparator()).omitEmptyStrings() 80 | .trimResults().splitToList(buffer.toString()).stream() 81 | .filter(input -> input != null && input.contains(DELIMITER) 82 | && !input.contains("executable") 83 | && !input.contains("config_file")).forEach(entry -> { 84 | List keyValue = Splitter.on(DELIMITER).omitEmptyStrings().trimResults().splitToList(entry); 85 | if (keyValue.size() == 2) { 86 | map.put(keyValue.get(0), keyValue.get(1)); 87 | } 88 | }); 89 | 90 | log.debug("got redis metrics {}", map); 91 | 92 | map.forEach((key, valueStr) -> { 93 | long value; 94 | try { 95 | if (key.startsWith("db")) { 96 | String[] pairs = valueStr.split(","); 97 | for (String pair : pairs) { 98 | String[] tokens = pair.split("="); 99 | if (tokens.length == 2) { 100 | value = Long.parseLong(tokens[1]); 101 | publisher.publishMetric("keyspace." + key + "." + tokens[0], value); 102 | } else { 103 | log.warn("Invalid keyspace property. Will be ignored"); 104 | } 105 | } 106 | } else if (key.contains("_cpu_")) { 107 | value = (long) (Double.parseDouble(valueStr) * 1000.0); 108 | publisher.publishMetric(key, value); 109 | } else if (key.contains("fragmentation_ratio")) { 110 | value = (long) (Double.parseDouble(valueStr)); 111 | publisher.publishMetric(key, value); 112 | } else { 113 | value = Long.parseLong(valueStr); 114 | publisher.publishMetric(key, value); 115 | } 116 | } catch (NumberFormatException e) { 117 | log.trace("ignore field '{}' because '{}' doesnt look number-ish enough", key, valueStr); 118 | } 119 | }); 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/redis/RedisProvider.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import io.vertx.core.Future; 4 | import io.vertx.redis.client.RedisAPI; 5 | 6 | /** 7 | * Provider for {@link RedisAPI} 8 | * 9 | * @author https://github.com/mcweba [Marc-Andre Weber] 10 | */ 11 | public interface RedisProvider { 12 | 13 | Future redis(); 14 | } 15 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/redis/RedisReadyProvider.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import io.vertx.core.Future; 4 | import io.vertx.redis.client.RedisAPI; 5 | 6 | /** 7 | * Provides the "ready state" of the Redis database. The connection to Redis may be already established, but Redis is not 8 | * yet ready to be used 9 | * 10 | * @author https://github.com/mcweba [Marc-Andre Weber] 11 | */ 12 | public interface RedisReadyProvider { 13 | 14 | /** 15 | * Get the "ready state" of the Redis database. 16 | * 17 | * @param redisAPI API to access redis database 18 | * @return An async boolean true when Redis can be used. Returns async boolean false otherwise or in case of an error 19 | */ 20 | Future ready(RedisAPI redisAPI); 21 | } 22 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/redis/RedisUtils.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import io.vertx.core.json.JsonArray; 4 | import io.vertx.core.json.JsonObject; 5 | 6 | import java.util.*; 7 | import java.util.stream.Stream; 8 | 9 | /** 10 | * Useful utilities for Redis 11 | * 12 | * @author https://github.com/mcweba [Marc-Andre Weber] 13 | */ 14 | public final class RedisUtils { 15 | 16 | private RedisUtils() {} 17 | 18 | /** 19 | * from https://github.com/vert-x3/vertx-redis-client/blob/3.9/src/main/java/io/vertx/redis/impl/RedisClientImpl.java#L94 20 | * 21 | * @param parameters 22 | * @return 23 | */ 24 | public static List toPayload(Object... parameters) { 25 | List result = new ArrayList<>(parameters.length); 26 | 27 | for (Object param : parameters) { 28 | // unwrap 29 | if (param instanceof JsonArray) { 30 | param = ((JsonArray) param).getList(); 31 | } 32 | // unwrap 33 | if (param instanceof JsonObject) { 34 | param = ((JsonObject) param).getMap(); 35 | } 36 | 37 | if (param instanceof Collection) { 38 | ((Collection) param).stream().filter(Objects::nonNull).forEach(o -> result.add(o.toString())); 39 | } else if (param instanceof Map) { 40 | for (Map.Entry pair : ((Map) param).entrySet()) { 41 | result.add(pair.getKey().toString()); 42 | result.add(pair.getValue().toString()); 43 | } 44 | } else if (param instanceof Stream) { 45 | ((Stream) param).forEach(e -> { 46 | if (e instanceof Object[]) { 47 | Collections.addAll(result, (String[]) e); 48 | } else { 49 | result.add(e.toString()); 50 | } 51 | }); 52 | } else if (param != null) { 53 | result.add(param.toString()); 54 | } 55 | } 56 | return result; 57 | } 58 | 59 | } 60 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/s3/FileWriteStream.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.s3; 2 | 3 | import io.vertx.core.AsyncResult; 4 | import io.vertx.core.Future; 5 | import io.vertx.core.Handler; 6 | import io.vertx.core.Promise; 7 | import io.vertx.core.buffer.Buffer; 8 | import io.vertx.core.streams.WriteStream; 9 | import org.slf4j.Logger; 10 | 11 | import java.io.OutputStream; 12 | import java.io.IOException; 13 | 14 | import static org.slf4j.LoggerFactory.getLogger; 15 | 16 | public class FileWriteStream implements WriteStream { 17 | private static final Logger log = getLogger(FileWriteStream.class); 18 | private final OutputStream outputStream; 19 | private Handler exceptionHandler; 20 | private Handler drainHandler; 21 | private boolean writeQueueFull = false; 22 | private int maxQueueSize = 1024; // Default max queue size, can be modified 23 | 24 | public FileWriteStream(OutputStream outputStream) { 25 | this.outputStream = outputStream; 26 | } 27 | 28 | @Override 29 | public WriteStream exceptionHandler(Handler handler) { 30 | this.exceptionHandler = handler; 31 | return this; 32 | } 33 | 34 | @Override 35 | public void write(Buffer buffer, Handler> handler) { 36 | try { 37 | outputStream.write(buffer.getBytes()); 38 | outputStream.flush(); 39 | handler.handle(Future.succeededFuture()); 40 | } catch (IOException e) { 41 | if (exceptionHandler != null) { 42 | exceptionHandler.handle(e); 43 | } 44 | log.error("Error writing to stream", e); 45 | handler.handle(Future.failedFuture(e)); 46 | } 47 | } 48 | 49 | @Override 50 | public Future write(Buffer buffer) { 51 | Promise promise = Promise.promise(); 52 | try { 53 | outputStream.write(buffer.getBytes()); 54 | outputStream.flush(); 55 | promise.complete(); 56 | } catch (IOException e) { 57 | if (exceptionHandler != null) { 58 | exceptionHandler.handle(e); 59 | } 60 | log.error("Error writing to stream", e); 61 | promise.fail(e); 62 | } 63 | return promise.future(); 64 | } 65 | 66 | @Override 67 | public void end(Handler> handler) { 68 | try { 69 | outputStream.flush(); 70 | outputStream.close(); 71 | handler.handle(Future.succeededFuture()); 72 | } catch (IOException e) { 73 | if (exceptionHandler != null) { 74 | exceptionHandler.handle(e); 75 | } 76 | log.error("Error when try to end stream", e); 77 | handler.handle(Future.failedFuture(e)); 78 | } 79 | } 80 | 81 | @Override 82 | public Future end() { 83 | Promise promise = Promise.promise(); 84 | try { 85 | outputStream.flush(); 86 | outputStream.close(); 87 | promise.complete(); 88 | } catch (IOException e) { 89 | if (exceptionHandler != null) { 90 | exceptionHandler.handle(e); 91 | } 92 | log.error("Error when try to end stream", e); 93 | promise.fail(e); 94 | } 95 | return promise.future(); 96 | } 97 | 98 | @Override 99 | public WriteStream setWriteQueueMaxSize(int maxSize) { 100 | this.maxQueueSize = maxSize; 101 | return this; 102 | } 103 | 104 | @Override 105 | public boolean writeQueueFull() { 106 | // Implement custom logic to manage queue size if needed 107 | return writeQueueFull; 108 | } 109 | 110 | @Override 111 | public WriteStream drainHandler(Handler handler) { 112 | this.drainHandler = handler; 113 | return this; 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/s3/S3FileSystemDirLister.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.s3; 2 | 3 | import io.vertx.core.Handler; 4 | import io.vertx.core.Promise; 5 | import io.vertx.core.Vertx; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | import org.swisspush.reststorage.CollectionResource; 9 | import org.swisspush.reststorage.DocumentResource; 10 | import org.swisspush.reststorage.Resource; 11 | 12 | import java.io.IOException; 13 | import java.nio.file.Files; 14 | import java.nio.file.Path; 15 | import java.util.ArrayList; 16 | import java.util.Collections; 17 | import java.util.stream.Stream; 18 | 19 | import static org.swisspush.reststorage.s3.S3FileSystemStorage.S3_PATH_SEPARATOR; 20 | 21 | 22 | /** 23 | * This type handles listing of directories in S3. 24 | * 25 | *

Internally it makes use of worker-threads to keep eventloop-thread 26 | * responsive.

27 | */ 28 | public class S3FileSystemDirLister { 29 | 30 | private static final Logger log = LoggerFactory.getLogger(S3FileSystemDirLister.class); 31 | private final Vertx vertx; 32 | private final String root; 33 | 34 | 35 | public S3FileSystemDirLister(Vertx vertx, String root) { 36 | this.vertx = vertx; 37 | this.root = root; 38 | } 39 | 40 | public void handleListingRequest(Path path, final int offset, final int count, final Handler handler) { 41 | // Delegate work to worker thread from thread pool. 42 | log.trace("Delegate to worker pool"); 43 | final long startTimeMillis = System.currentTimeMillis(); 44 | vertx.executeBlocking(promise -> { 45 | log.trace("Welcome on worker-thread."); 46 | listDirBlocking(path, offset, count, (Promise) (Promise) promise); 47 | log.trace("worker-thread says bye."); 48 | }, event -> { 49 | log.trace("Welcome back on eventloop-thread."); 50 | if (log.isDebugEnabled()) { 51 | final long durationMillis = System.currentTimeMillis() - startTimeMillis; 52 | log.debug("List directory contents of '{}' took {}ms", path, durationMillis); 53 | } 54 | if (event.failed()) { 55 | log.error("Directory listing failed.", event.cause()); 56 | final Resource erroneousResource = new Resource() {{ 57 | // Set fields according to documentation in Resource class. 58 | name = path.getFileName().toString(); 59 | exists = false; 60 | error = rejected = invalid = true; 61 | errorMessage = invalidMessage = event.cause().getMessage(); 62 | }}; 63 | handler.handle(erroneousResource); 64 | } else { 65 | handler.handle((Resource) event.result()); 66 | } 67 | }); 68 | log.trace("Work delegated."); 69 | } 70 | 71 | private void listDirBlocking(Path path, int offset, int count, Promise promise) { 72 | // Prepare our result. 73 | final CollectionResource collection = new CollectionResource() {{ 74 | items = new ArrayList<>(128); 75 | }}; 76 | try (Stream source = Files.list(path)) { 77 | source.forEach(entry -> { 78 | String entryName = entry.getFileName().toString(); 79 | log.trace("Processing entry '{}'", entryName); 80 | // Create resource representing currently processed directory entry. 81 | final Resource resource; 82 | if (entryName.endsWith(S3_PATH_SEPARATOR)) { 83 | resource = new CollectionResource(); 84 | entryName = entryName.replace(S3_PATH_SEPARATOR, ""); 85 | } else { 86 | resource = new DocumentResource(); 87 | } 88 | resource.name = entryName; 89 | collection.items.add(resource); 90 | }); 91 | } catch (IOException e) { 92 | promise.fail(e); 93 | return; 94 | } 95 | Collections.sort(collection.items); 96 | // Don't know exactly what we do here now. Seems we check 'limit' for a range request. 97 | int n = count; 98 | if (n == -1) { 99 | n = collection.items.size(); 100 | } 101 | // Don't know exactly what we do here. But it seems we evaluate 'start' of a range request. 102 | if (offset > -1) { 103 | if (offset >= collection.items.size() || (offset + n) >= collection.items.size() || (offset == 0 && n == -1)) { 104 | promise.complete(collection); 105 | } else { 106 | collection.items = collection.items.subList(offset, offset + n); 107 | promise.complete(collection); 108 | } 109 | } else { 110 | // TODO: Resolve future 111 | // Previous implementation did nothing here. Why? Should we do something here? 112 | // See: "https://github.com/hiddenalpha/vertx-rest-storage/blob/v2.5.2/src/main/java/org/swisspush/reststorage/FileSystemStorage.java#L77" 113 | log.warn("May we should do something here. I've no idea why old implementation did nothing."); 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/FailedAsyncResult.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import io.vertx.core.AsyncResult; 4 | 5 | public class FailedAsyncResult implements AsyncResult { 6 | private final Throwable cause; 7 | 8 | public FailedAsyncResult(Throwable cause) { 9 | this.cause = cause; 10 | } 11 | 12 | @Override 13 | public T result() { 14 | return null; 15 | } 16 | 17 | @Override 18 | public Throwable cause() { 19 | return cause; 20 | } 21 | 22 | @Override 23 | public boolean succeeded() { 24 | return false; 25 | } 26 | 27 | @Override 28 | public boolean failed() { 29 | return true; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/GZIPUtil.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import io.vertx.core.AsyncResult; 4 | import io.vertx.core.Handler; 5 | import io.vertx.core.Vertx; 6 | import org.slf4j.Logger; 7 | 8 | import java.io.ByteArrayInputStream; 9 | import java.io.ByteArrayOutputStream; 10 | import java.io.IOException; 11 | import java.util.zip.GZIPInputStream; 12 | import java.util.zip.GZIPOutputStream; 13 | 14 | /** 15 | * Util class to compress and decompress resources using the gzip algorithm 16 | * 17 | * @author https://github.com/mcweba [Marc-Andre Weber] 18 | */ 19 | public class GZIPUtil { 20 | 21 | /** 22 | * Compress the uncompressed data with the gzip algorithm. When the compression is done, the resultHandler is called 23 | * with the compressed data as result. 24 | * 25 | * @param vertx vertx 26 | * @param log the logger 27 | * @param uncompressedData the data to compress 28 | * @param resultHandler the resultHandler is called when the compression is done 29 | */ 30 | public static void compressResource(Vertx vertx, Logger log, byte[] uncompressedData, Handler> resultHandler) { 31 | vertx.executeBlocking(future -> { 32 | ByteArrayOutputStream baos = new ByteArrayOutputStream(); 33 | 34 | try (GZIPOutputStream os = new GZIPOutputStream(baos)) { 35 | os.write(uncompressedData); 36 | } catch (IOException ioe) { 37 | log.debug("Unable to compress resource: {}", ioe.getMessage()); 38 | future.fail(ioe); 39 | // Error, exit 40 | return; 41 | } 42 | // Success 43 | future.complete(baos.toByteArray()); 44 | }, resultHandler); 45 | } 46 | 47 | /** 48 | * Decompress the compressed (gzip) data. When the decompression is done, the resultHandler is called 49 | * with the decompressed data as result. 50 | * @param vertx vertx 51 | * @param log the logger 52 | * @param compressedData the data to decompress 53 | * @param resultHandler the resultHandler is called when the compression is done 54 | */ 55 | public static void decompressResource(Vertx vertx, Logger log, byte[] compressedData, Handler> resultHandler) { 56 | vertx.executeBlocking(future -> { 57 | byte[] buffer = new byte[1024]; 58 | ByteArrayOutputStream baos = new ByteArrayOutputStream(); 59 | try (ByteArrayInputStream bis = new ByteArrayInputStream(compressedData); 60 | GZIPInputStream gzipInputStream = new GZIPInputStream(bis)) { 61 | 62 | int bytes_read; 63 | 64 | while ((bytes_read = gzipInputStream.read(buffer)) > 0) { 65 | baos.write(buffer, 0, bytes_read); 66 | } 67 | 68 | gzipInputStream.close(); 69 | baos.close(); 70 | 71 | } catch (IOException ioe) { 72 | log.debug("Unable to decompress resource: {}", ioe.getMessage()); 73 | future.fail(ioe); 74 | // Error, exit 75 | return; 76 | } 77 | // Success 78 | future.complete(baos.toByteArray()); 79 | }, resultHandler); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/HttpRequestHeader.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import io.vertx.core.MultiMap; 4 | 5 | /** 6 | * Enum for HTTP request headers used in vertx-rest-storage 7 | * 8 | * @author https://github.com/mcweba [Marc-Andre Weber] 9 | */ 10 | public enum HttpRequestHeader { 11 | 12 | ETAG_HEADER("Etag"), 13 | IF_NONE_MATCH_HEADER("if-none-match"), 14 | LOCK_HEADER("x-lock"), 15 | LOCK_MODE_HEADER("x-lock-mode"), 16 | LOCK_EXPIRE_AFTER_HEADER("x-lock-expire-after"), 17 | EXPIRE_AFTER_HEADER("x-expire-after"), 18 | IMPORTANCE_LEVEL_HEADER("x-importance-level"), 19 | MAX_EXPAND_RESOURCES_HEADER("x-max-expand-resources"), 20 | COMPRESS_HEADER("x-stored-compressed"), 21 | CONTENT_TYPE("Content-Type"), 22 | CONTENT_LENGTH("Content-Length"); 23 | 24 | private final String name; 25 | 26 | HttpRequestHeader(String name) { 27 | this.name = name; 28 | } 29 | 30 | public String getName() { 31 | return name; 32 | } 33 | 34 | public static boolean containsHeader(MultiMap headers, HttpRequestHeader httpRequestHeader){ 35 | if(headers == null){ 36 | return false; 37 | } 38 | return headers.contains(httpRequestHeader.getName()); 39 | } 40 | 41 | /** 42 | * Get the value of the provided {@link HttpRequestHeader} as Integer. 43 | *

Returns null in the following cases:

44 | * 45 | *
    46 | *
  • headers are null
  • 47 | *
  • headers does not contain httpRequestHeader
  • 48 | *
  • httpRequestHeader is no parsable Integer i.e. empty string, non-digit characters, numbers to bigger than Integer allows
  • 49 | *
50 | * 51 | * @param headers the http request headers 52 | * @param httpRequestHeader the http request header to get the value from 53 | * @return a Integer representing the value of the httpRequestHeader or null 54 | */ 55 | public static Integer getInteger(MultiMap headers, HttpRequestHeader httpRequestHeader) { 56 | return getInteger(headers, httpRequestHeader, null); 57 | } 58 | 59 | /** 60 | * Get the value of the provided {@link HttpRequestHeader} or a default value as Integer. 61 | *

Returns the default value in the following cases:

62 | * 63 | *
    64 | *
  • headers are null
  • 65 | *
  • headers does not contain httpRequestHeader
  • 66 | *
  • httpRequestHeader is no parsable Integer i.e. empty string, non-digit characters, numbers to bigger than Integer allows
  • 67 | *
68 | * 69 | * @param headers the http request headers 70 | * @param httpRequestHeader the http request header to get the value from 71 | * @param defaultValue the default value to return when no value from httpRequestHeader is extractable 72 | * @return a Integer representing the value of the httpRequestHeader or the default value 73 | */ 74 | public static Integer getInteger(MultiMap headers, HttpRequestHeader httpRequestHeader, Integer defaultValue) { 75 | String headerValue = null; 76 | if(headers != null) { 77 | headerValue = headers.get(httpRequestHeader.getName()); 78 | } 79 | 80 | try { 81 | return Integer.parseInt(headerValue); 82 | } catch (Exception e) { 83 | return defaultValue; 84 | } 85 | } 86 | 87 | /** 88 | * Get the value of the provided {@link HttpRequestHeader} as Long. 89 | *

Returns null in the following cases:

90 | * 91 | *
    92 | *
  • headers are null
  • 93 | *
  • headers does not contain httpRequestHeader
  • 94 | *
  • httpRequestHeader is no parsable Long i.e. empty string, non-digit characters, numbers to bigger than Long allows
  • 95 | *
96 | * 97 | * @param headers the http request headers 98 | * @param httpRequestHeader the http request header to get the value from 99 | * @return a Long representing the value of the httpRequestHeader or null 100 | */ 101 | public static Long getLong(MultiMap headers, HttpRequestHeader httpRequestHeader) { 102 | return getLong(headers, httpRequestHeader, null); 103 | } 104 | 105 | /** 106 | * Get the value of the provided {@link HttpRequestHeader} or a default value as Long. 107 | *

Returns the default value in the following cases:

108 | * 109 | *
    110 | *
  • headers are null
  • 111 | *
  • headers does not contain httpRequestHeader
  • 112 | *
  • httpRequestHeader is no parsable Long i.e. empty string, non-digit characters, numbers to bigger than Long allows
  • 113 | *
114 | * 115 | * @param headers the http request headers 116 | * @param httpRequestHeader the http request header to get the value from 117 | * @param defaultValue the default value to return when no value from httpRequestHeader is extractable 118 | * @return a Long representing the value of the httpRequestHeader or the default value 119 | */ 120 | public static Long getLong(MultiMap headers, HttpRequestHeader httpRequestHeader, Long defaultValue) { 121 | String headerValue = null; 122 | if(headers != null) { 123 | headerValue = headers.get(httpRequestHeader.getName()); 124 | } 125 | 126 | try { 127 | return Long.parseLong(headerValue); 128 | } catch (Exception e) { 129 | return defaultValue; 130 | } 131 | } 132 | 133 | /** 134 | * Get the value of the provided {@link HttpRequestHeader} as String. 135 | *

Returns null in the following cases:

136 | * 137 | *
    138 | *
  • headers are null
  • 139 | *
  • headers does not contain httpRequestHeader
  • 140 | *
141 | * 142 | * @param headers the http request headers 143 | * @param httpRequestHeader the http request header to get the value from 144 | * @return a String representing the value of the httpRequestHeader or null 145 | */ 146 | public static String getString(MultiMap headers, HttpRequestHeader httpRequestHeader) { 147 | if(headers == null) { 148 | return null; 149 | } 150 | return headers.get(httpRequestHeader.getName()); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/HttpRequestParam.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import io.vertx.core.MultiMap; 4 | 5 | /** 6 | * Enum for HTTP request params used in vertx-rest-storage 7 | * 8 | * @author https://github.com/mcweba [Marc-Andre Weber] 9 | */ 10 | public enum HttpRequestParam { 11 | 12 | RECURSIVE_PARAMETER("recursive"), 13 | STORAGE_EXPAND_PARAMETER("storageExpand"), 14 | LIMIT_PARAMETER("limit"), 15 | OFFSET_PARAMETER("offset"); 16 | 17 | private final String name; 18 | 19 | HttpRequestParam(String name) { 20 | this.name = name; 21 | } 22 | 23 | public String getName() { 24 | return name; 25 | } 26 | 27 | public static boolean containsParam(MultiMap params, HttpRequestParam httpRequestParam){ 28 | if(params == null){ 29 | return false; 30 | } 31 | return params.contains(httpRequestParam.getName()); 32 | } 33 | 34 | /** 35 | * Get the value of the provided {@link HttpRequestParam} as String. 36 | *

Returns null in the following cases:

37 | * 38 | *
    39 | *
  • params are null
  • 40 | *
  • params does not contain httpRequestParam
  • 41 | *
42 | * 43 | * @param params the http request params 44 | * @param httpRequestParam the http request param to get the value from 45 | * @return a String representing the value of the httpRequestParam or null 46 | */ 47 | public static String getString(MultiMap params, HttpRequestParam httpRequestParam) { 48 | if(params == null) { 49 | return null; 50 | } 51 | return params.get(httpRequestParam.getName()); 52 | } 53 | 54 | /** 55 | * Get the value of the provided {@link HttpRequestParam} as boolean. 56 | *

Returns false in the following cases:

57 | * 58 | *
    59 | *
  • params are null
  • 60 | *
  • params does not contain httpRequestParam
  • 61 | *
  • value of httpRequestParam does not equal "true" ignoring the case
  • 62 | *
63 | * 64 | * @param params the http request params 65 | * @param httpRequestParam the http request param to get the value from 66 | * @return a boolean representing the value of the httpRequestParam 67 | */ 68 | public static boolean getBoolean(MultiMap params, HttpRequestParam httpRequestParam) { 69 | if(params == null) { 70 | return false; 71 | } 72 | String paramValue = params.get(httpRequestParam.getName()); 73 | return "true".equalsIgnoreCase(paramValue); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/LockMode.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | /** 4 | * Enum for all implemented 5 | * Lock Modes. 6 | * 7 | * @author https://github.com/ljucam [Mario Ljuca] 8 | * 9 | */ 10 | public enum LockMode { 11 | SILENT("silent"), 12 | REJECT("reject"); 13 | 14 | private final String lockMode; 15 | 16 | LockMode(String lockMode) { 17 | this.lockMode = lockMode; 18 | } 19 | 20 | public String text() { 21 | return lockMode; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/ResourceNameUtil.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import java.util.List; 4 | 5 | /** 6 | *

7 | * Utility class providing handy methods to handle resource names. 8 | *

9 | * 10 | * @author https://github.com/mcweba [Marc-Andre Weber] 11 | */ 12 | public final class ResourceNameUtil { 13 | 14 | public static final String COLON_REPLACEMENT = "§"; 15 | public static final String SEMICOLON_REPLACEMENT = "°"; 16 | 17 | private ResourceNameUtil() { 18 | // prevent instantiation 19 | } 20 | 21 | /** 22 | *

23 | * Replaces all colons in the provided resourceName with {@link ResourceNameUtil#COLON_REPLACEMENT}. 24 | *

25 | * 26 | *
27 |      * ResourceNameUtil.replaceColonsAndSemiColons(null)          = null
28 |      * ResourceNameUtil.replaceColonsAndSemiColons("")            = ""
29 |      * ResourceNameUtil.replaceColonsAndSemiColons("bob")         = "bob"
30 |      * ResourceNameUtil.replaceColonsAndSemiColons("bob_:_;_alice") = "bob_§_°_alice"
31 |      * 
32 | * 33 | * @param resourceName the String to replace the colons and semicolons, may be null 34 | * @return a string with the replaced values 35 | */ 36 | public static String replaceColonsAndSemiColons(String resourceName){ 37 | if(resourceName == null){ 38 | return null; 39 | } 40 | return resourceName.replaceAll(":", COLON_REPLACEMENT).replaceAll(";", SEMICOLON_REPLACEMENT); 41 | } 42 | 43 | /** 44 | *

45 | * Replaces all colons and semicolons in all strings of the provided resourceNames with {@link ResourceNameUtil#COLON_REPLACEMENT} and {@link ResourceNameUtil#SEMICOLON_REPLACEMENT}. 46 | *

47 | * 48 | * @param resourceNames the list of strings to replace the colons and semicolons, may be null 49 | */ 50 | public static void replaceColonsAndSemiColonsInList(List resourceNames){ 51 | if(resourceNames != null){ 52 | resourceNames.replaceAll(ResourceNameUtil::replaceColonsAndSemiColons); 53 | } 54 | } 55 | 56 | /** 57 | *

58 | * Resets the replaced colons and semicolons in the provided resourceName with a colon or semicolon. 59 | *

60 | * 61 | *
62 |      * ResourceNameUtil.resetReplacedColonsAndSemiColons(null)          = null
63 |      * ResourceNameUtil.resetReplacedColonsAndSemiColons("")            = ""
64 |      * ResourceNameUtil.resetReplacedColonsAndSemiColons("bob")         = "bob"
65 |      * ResourceNameUtil.resetReplacedColonsAndSemiColons("bob_§_°_alice") = "bob_:_;_alice"
66 |      * 
67 | * 68 | * @param resourceName the String to reset the replaced the colons and semicolons, may be null 69 | * @return a string with the resetted values 70 | */ 71 | public static String resetReplacedColonsAndSemiColons(String resourceName){ 72 | if(resourceName == null){ 73 | return null; 74 | } 75 | return resourceName.replaceAll(COLON_REPLACEMENT, ":").replaceAll(SEMICOLON_REPLACEMENT, ";"); 76 | } 77 | 78 | /** 79 | *

80 | * Resets the replaced colons and semicolons in all strings of the provided resourceNames with a colon or semicolon. 81 | *

82 | * 83 | * @param resourceNames the list of strings to reset the colons and semicolons, may be null 84 | */ 85 | public static void resetReplacedColonsAndSemiColonsInList(List resourceNames){ 86 | if(resourceNames != null){ 87 | resourceNames.replaceAll(ResourceNameUtil::resetReplacedColonsAndSemiColons); 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/ResourcesUtils.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import com.google.common.base.Charsets; 4 | import com.google.common.io.Resources; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.net.URL; 9 | 10 | /** 11 | *

12 | * Utility class providing handy methods to deal with Resources. 13 | *

14 | * 15 | * @author https://github.com/mcweba [Marc-Andre Weber] 16 | */ 17 | public class ResourcesUtils { 18 | 19 | private static final Logger log = LoggerFactory.getLogger(ResourcesUtils.class); 20 | 21 | private ResourcesUtils() { 22 | // prevent instantiation 23 | } 24 | 25 | /** 26 | *

27 | * Loads the resource with the provided name from the classpath. When param {@code exceptionWhenNotFound} 28 | * set to true, a {@link RuntimeException} is thrown when the resource cannot be loaded. 29 | *

30 | * 31 | * @param resourceName the name of the resource to load 32 | * @param exceptionWhenNotFound throw a {@link RuntimeException} when the resource could not be loaded 33 | * @throws RuntimeException when {@code exceptionWhenNotFound} is set to true and resource cannot be loaded 34 | * @return The content of the resource or null 35 | */ 36 | public static String loadResource(String resourceName, boolean exceptionWhenNotFound) { 37 | try { 38 | URL url = Resources.getResource(resourceName); 39 | return Resources.toString(url, Charsets.UTF_8); 40 | } catch (Exception e) { 41 | log.error("Error loading resource '{}'", resourceName, e); 42 | if(exceptionWhenNotFound){ 43 | throw new RuntimeException("Error loading required resource '"+resourceName+"'"); 44 | } 45 | return null; 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/Result.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | public class Result { 4 | 5 | private final Type type; 6 | private final TOk okValue; 7 | private final TErr errValue; 8 | 9 | public static Result ok(TOk value) { 10 | return new Result<>(Type.OK, value, null); 11 | } 12 | 13 | public static Result err(TErr value) { 14 | return new Result<>(Type.ERROR, null, value); 15 | } 16 | 17 | private Result(Type type, TOk okValue, TErr errValue) { 18 | if (okValue != null && errValue != null) { 19 | throw new IllegalStateException("A result cannot be ok and error at the same time"); 20 | } 21 | this.type = type; 22 | this.okValue = okValue; 23 | this.errValue = errValue; 24 | } 25 | 26 | /** 27 | * See https://doc.rust-lang.org/stable/std/result/enum.Result.html#method.unwrap 28 | * Returns the ok result or throws if this result is in error state. 29 | */ 30 | public TOk unwrap() throws RuntimeException { 31 | if (isOk()) { 32 | return getOk(); 33 | } else { 34 | throw new RuntimeException("Got an error result. Error value is '" + getErr() + "'"); 35 | } 36 | } 37 | 38 | public boolean isOk() { 39 | return this.type == Type.OK; 40 | } 41 | 42 | public TOk getOk() throws IllegalStateException { 43 | if (!isOk()) { 44 | throw new IllegalStateException("Cannot call this method for results in error state"); 45 | } 46 | return okValue; 47 | } 48 | 49 | public boolean isErr() { 50 | return !isOk(); 51 | } 52 | 53 | public TErr getErr() throws IllegalStateException { 54 | if (isOk()) { 55 | throw new IllegalStateException("Cannot call this method for results in ok state"); 56 | } 57 | return errValue; 58 | } 59 | 60 | @Override 61 | public boolean equals(Object o) { 62 | if (this == o) return true; 63 | if (o == null || getClass() != o.getClass()) return false; 64 | 65 | Result result = (Result) o; 66 | 67 | if (type != result.type) return false; 68 | if (okValue != null ? !okValue.equals(result.okValue) : result.okValue != null) return false; 69 | return errValue != null ? errValue.equals(result.errValue) : result.errValue == null; 70 | } 71 | 72 | @Override 73 | public int hashCode() { 74 | int result = type.hashCode(); 75 | result = 31 * result + (okValue != null ? okValue.hashCode() : 0); 76 | result = 31 * result + (errValue != null ? errValue.hashCode() : 0); 77 | return result; 78 | } 79 | 80 | @Override 81 | public String toString() { 82 | if (isOk()) { 83 | return "ResultOk{" + okValue + '}'; 84 | } else { 85 | return "ResultErr{" + errValue + '}'; 86 | } 87 | } 88 | 89 | private enum Type { 90 | OK, 91 | ERROR 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/main/java/org/swisspush/reststorage/util/StatusCode.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | /** 4 | * Enum for HTTP status codes 5 | * 6 | * @author https://github.com/mcweba [Marc-Andre Weber] 7 | */ 8 | public enum StatusCode { 9 | OK(200, "OK"), 10 | FOUND(302, "Found"), 11 | NOT_MODIFIED(304, "Not Modified"), 12 | BAD_REQUEST(400, "Bad Request"), 13 | UNAUTHORIZED(401, "Unauthorized"), 14 | NOT_FOUND(404, "Not Found"), 15 | METHOD_NOT_ALLOWED(405, "Method Not Allowed"), 16 | PAYLOAD_TOO_LARGE(413, "Payload Too Large"), 17 | INTERNAL_SERVER_ERROR(500, "Internal Server Error"), 18 | INSUFFICIENT_STORAGE(507, "Insufficient Storage"), 19 | CONFLICT(409, "Conflict"); 20 | 21 | private final int statusCode; 22 | private final String statusMessage; 23 | 24 | StatusCode(int statusCode, String statusMessage) { 25 | this.statusCode = statusCode; 26 | this.statusMessage = statusMessage; 27 | } 28 | 29 | public int getStatusCode() { 30 | return statusCode; 31 | } 32 | 33 | public String getStatusMessage() { 34 | return statusMessage; 35 | } 36 | 37 | @Override 38 | public String toString() { 39 | return statusCode + " " + statusMessage; 40 | } 41 | } -------------------------------------------------------------------------------- /src/main/resources/cleanup.lua: -------------------------------------------------------------------------------- 1 | local resourcesPrefix = ARGV[1] 2 | local collectionsPrefix = ARGV[2] 3 | local deltaResourcesPrefix = ARGV[3] 4 | local deltaEtagsPrefix = ARGV[4] 5 | local expirableSet = ARGV[5] 6 | local minscore = tonumber(ARGV[6]) 7 | local maxscore = tonumber(ARGV[7]) 8 | local confirmCollectionDelete = ARGV[8] 9 | local deleteRecursive = ARGV[9] 10 | local now = tonumber(ARGV[10]) 11 | local bulksize = tonumber(ARGV[11]) 12 | 13 | -- Important: The ARGV-Array is used again in the included del.lua script 14 | -- (see this funny comment with the percent sign below and Java-Method 15 | -- org.swisspush.reststorage.redis.RedisStorage.LuaScriptState.composeLuaScript) 16 | -- we need to initialize all parameters for del.lua here - otherwise we can have side effects 17 | -- See open issue https://github.com/swisspush/vertx-rest-storage/issues/83 18 | ARGV[10] = '' 19 | ARGV[11] = '' 20 | ARGV[12] = '' 21 | ARGV[13] = '' 22 | 23 | local resourcePrefixLength = string.len(resourcesPrefix) 24 | local counter = 0 25 | local KEYS = {} 26 | local resourcesToClean = redis.call('zrangebyscore',expirableSet,minscore,now,'limit',0,bulksize) 27 | for key,value in pairs(resourcesToClean) do 28 | redis.log(redis.LOG_NOTICE, "cleanup resource: "..value) 29 | KEYS[1] = string.sub(value, resourcePrefixLength+1, string.len(value)) 30 | 31 | --%(delscript) 32 | 33 | counter = counter + 1 34 | end 35 | return counter -------------------------------------------------------------------------------- /src/main/resources/del.lua: -------------------------------------------------------------------------------- 1 | local sep = ":"; 2 | local toDelete = KEYS[1] 3 | 4 | 5 | -- Important: This Script here is includes in cleanup.lua. The ARGV-Array is used again in this including script 6 | -- Remember to harmonize the cleanup.lua ARGV parameters with ordering, format and purpose here in THIS script 7 | -- Take care when using "return" in this script, see https://github.com/swisspush/vertx-rest-storage/issues/83 8 | local resourcesPrefix = ARGV[1] 9 | local collectionsPrefix = ARGV[2] 10 | local deltaResourcesPrefix = ARGV[3] 11 | local deltaEtagsPrefix = ARGV[4] 12 | local expirableSet = ARGV[5] 13 | local minscore = tonumber(ARGV[6]) 14 | local maxscore = tonumber(ARGV[7]) 15 | local confirmCollectionDelete = ARGV[8] 16 | local deleteRecursive = ARGV[9] 17 | local lockPrefix = ARGV[10] 18 | local lockOwner = ARGV[11] 19 | local lockMode = ARGV[12] 20 | local lockExpire = ARGV[13] 21 | 22 | local function deleteChildrenAndItself(path) 23 | if redis.call('exists',resourcesPrefix..path) == 1 then 24 | redis.log(redis.LOG_NOTICE, "del: "..resourcesPrefix..path) 25 | redis.call('zrem', expirableSet, resourcesPrefix..path) 26 | redis.call('del', resourcesPrefix..path) 27 | redis.call('del', deltaResourcesPrefix..path) 28 | redis.call('del', deltaEtagsPrefix..path) 29 | redis.call('del', lockPrefix..path) 30 | elseif redis.call('exists',collectionsPrefix..path) == 1 then 31 | local members = redis.call('zrangebyscore',collectionsPrefix..path,minscore,maxscore) 32 | for key,value in pairs(members) do 33 | local pathToDelete = path..":"..value 34 | deleteChildrenAndItself(pathToDelete) 35 | end 36 | redis.call('del', collectionsPrefix..path) 37 | else 38 | redis.log(redis.LOG_WARNING, "can't delete resource: "..path) 39 | end 40 | end 41 | 42 | local setLockIfClaimed = function() 43 | if lockOwner ~= nil and lockOwner ~= '' then 44 | redis.call('hmset', lockPrefix..KEYS[1], 'owner', lockOwner, 'mode', lockMode) 45 | redis.call('pexpireat',lockPrefix..KEYS[1], lockExpire) 46 | end 47 | end 48 | 49 | local scriptState = "notFound" 50 | 51 | local isResource = redis.call('exists',resourcesPrefix..toDelete) 52 | local isCollection = redis.call('exists',collectionsPrefix..toDelete) 53 | 54 | if confirmCollectionDelete == "true" and deleteRecursive == "false" and isCollection == 1 then 55 | redis.log(redis.LOG_NOTICE, "delete on collection requires recursive=true parameter") 56 | return "notEmpty" 57 | end 58 | 59 | if isResource == 1 or isCollection == 1 then 60 | 61 | if isResource and redis.call('exists',lockPrefix..toDelete) == 1 then 62 | local result = redis.call('hmget',lockPrefix..KEYS[1],'owner','mode') 63 | if result[1] ~= lockOwner then 64 | return result[2] 65 | end 66 | end 67 | 68 | local score = tonumber(redis.call('zscore',expirableSet,resourcesPrefix..toDelete)) 69 | local expired = 0 70 | if score ~= nil and minscore > score then 71 | redis.log(redis.LOG_NOTICE, "expired: "..resourcesPrefix..toDelete) 72 | expired = 1 73 | end 74 | 75 | if expired == 0 then 76 | 77 | -- REMOVE THE CHILDREN 78 | deleteChildrenAndItself(toDelete) 79 | 80 | if redis.call('zcount', collectionsPrefix..toDelete,minscore,maxscore) == 0 then 81 | 82 | -- REMOVE THE ORPHAN PARENTS 83 | local path = toDelete..sep 84 | local nodes = {path:match((path:gsub("[^"..sep.."]*"..sep, "([^"..sep.."]*)"..sep)))} 85 | local pathDepth=0 86 | local pathState 87 | local nodetable = {} 88 | local pathtable = {} 89 | for key,value in pairs(nodes) do 90 | if pathState == nil then 91 | pathState = value 92 | else 93 | pathState = pathState..sep..value 94 | end 95 | redis.log(redis.LOG_NOTICE, "add path: "..pathDepth.." "..pathState) 96 | pathtable[pathDepth] = pathState 97 | nodetable[pathDepth] = value 98 | pathDepth = pathDepth + 1 99 | end 100 | 101 | table.remove(pathtable,pathDepth) 102 | 103 | local orphanParents = 1 104 | local parentCount = redis.call('zcount', collectionsPrefix..pathtable[pathDepth-2],minscore,maxscore) 105 | redis.log(redis.LOG_NOTICE, "parentCount: "..parentCount) 106 | redis.log(redis.LOG_NOTICE, "pathDepth: "..pathDepth) 107 | if pathDepth > 1 and parentCount > 1 then 108 | orphanParents = 0 109 | end 110 | 111 | redis.log(redis.LOG_NOTICE, "orphanParents: "..orphanParents) 112 | 113 | local directParent = 1 114 | local stopDel = 0 115 | for pathDepthState = pathDepth, 2, -1 do 116 | redis.log(redis.LOG_NOTICE, "pathState: "..pathtable[pathDepthState-2].." "..pathDepthState) 117 | if orphanParents == 1 and stopDel == 0 then 118 | if redis.call('zcount', collectionsPrefix..pathtable[pathDepthState-2],0,maxscore) > 1 then 119 | stopDel = 1 120 | end 121 | redis.log(redis.LOG_NOTICE, "zrem: "..collectionsPrefix..pathtable[pathDepthState-2].." "..nodetable[pathDepthState-1]) 122 | redis.call('zrem', collectionsPrefix..pathtable[pathDepthState-2], nodetable[pathDepthState-1]) 123 | end 124 | if directParent == 1 then 125 | redis.log(redis.LOG_NOTICE, "remove direct parent") 126 | redis.log(redis.LOG_NOTICE, "zrem: "..collectionsPrefix..pathtable[pathDepth-2].." "..nodetable[pathDepthState-1]) 127 | redis.call('zrem', collectionsPrefix..pathtable[pathDepthState-2], nodetable[pathDepthState-1]) 128 | directParent = 0 129 | end 130 | end 131 | end 132 | 133 | if isResource then 134 | setLockIfClaimed() 135 | end 136 | 137 | scriptState = "deleted" 138 | end 139 | else 140 | redis.log(redis.LOG_WARNING, "resource "..toDelete.." not present, will remove possible entry in expirableSet anyway") 141 | -- remove orphan entry in the expirableSet anyway (if there is actually one) 142 | redis.call('zrem', expirableSet, resourcesPrefix..toDelete) 143 | end 144 | 145 | return scriptState -------------------------------------------------------------------------------- /src/main/resources/get.lua: -------------------------------------------------------------------------------- 1 | local sep = ":" 2 | local path = KEYS[1] 3 | local parentPathElement 4 | local resourcesPrefix = ARGV[1] 5 | local collectionsPrefix = ARGV[2] 6 | local expirableSet = ARGV[3] 7 | local timestamp = tonumber(ARGV[4]) 8 | local maxtime = tonumber(ARGV[5]) 9 | local offset = tonumber(ARGV[6]) 10 | local count = tonumber(ARGV[7]) 11 | local etag = ARGV[8] 12 | 13 | local function not_empty(x) 14 | return (type(x) == "table") and (not x.err) and (#x ~= 0) 15 | end 16 | 17 | local function string_not_empty(s) 18 | return s ~= nil and s ~= '' 19 | end 20 | 21 | if redis.call('exists',resourcesPrefix..path) == 1 then 22 | local score = tonumber(redis.call('zscore',expirableSet,resourcesPrefix..path)) 23 | if score ~= nil and score < timestamp then 24 | return "notFound" 25 | else 26 | local result = redis.call('hmget',resourcesPrefix..path,'resource','etag','compressed') 27 | if not_empty(result) then 28 | if string_not_empty(etag) then 29 | local etagStorage = result[2] 30 | if etagStorage == etag then 31 | return "notModified" 32 | end 33 | end 34 | table.insert(result, 1, "TYPE_RESOURCE") 35 | return result 36 | else 37 | return "notFound" 38 | end 39 | end 40 | elseif redis.call('exists',collectionsPrefix..path) == 1 then 41 | local members = {} 42 | if offset ~= nil and count ~= nil and offset > -1 then 43 | members = redis.call('zrangebyscore',collectionsPrefix..path, timestamp, maxtime,'limit',offset, count) 44 | else 45 | members = redis.call('zrangebyscore',collectionsPrefix..path, timestamp, maxtime) 46 | end 47 | local children = {} 48 | table.insert(children, 1, "TYPE_COLLECTION") 49 | for key,value in pairs(members) do 50 | local childPath = collectionsPrefix..path..sep..value 51 | if redis.call('type', childPath)["ok"] == "zset" then 52 | table.insert(children, value..sep) 53 | else 54 | table.insert(children, value) 55 | end 56 | end 57 | return children 58 | else 59 | return "notFound" 60 | end -------------------------------------------------------------------------------- /src/main/resources/lock_release.lua: -------------------------------------------------------------------------------- 1 | local lockKey = KEYS[1] 2 | local token = ARGV[1] 3 | 4 | if redis.call("get", lockKey) == token then 5 | return redis.call("del", lockKey) 6 | else 7 | return 0 8 | end -------------------------------------------------------------------------------- /src/main/resources/mod.json: -------------------------------------------------------------------------------- 1 | { 2 | "main": "org.swisspush.reststorage.RestStorageMod" 3 | } 4 | -------------------------------------------------------------------------------- /src/main/resources/put.lua: -------------------------------------------------------------------------------- 1 | local sep = ":" 2 | local path = KEYS[1]..sep 3 | local resourcesPrefix = ARGV[1] 4 | local collectionsPrefix = ARGV[2] 5 | local expirableSet = ARGV[3] 6 | local merge = ARGV[4] 7 | local expiration = tonumber(ARGV[5]) 8 | local maxexpiration = tonumber(ARGV[6]) 9 | local resourceValue = ARGV[7] 10 | local resourceHash = ARGV[8] 11 | local lockPrefix = ARGV[9] 12 | local lockOwner = ARGV[10] 13 | local lockMode = ARGV[11] 14 | local lockExpire = ARGV[12] 15 | local compress = tonumber(ARGV[13]) 16 | 17 | if redis.call('exists',collectionsPrefix..KEYS[1]) == 1 then 18 | return "existingCollection" 19 | end 20 | 21 | if redis.call('exists',lockPrefix..KEYS[1]) == 1 then 22 | local result = redis.call('hmget',lockPrefix..KEYS[1],'owner','mode') 23 | if result[1] ~= lockOwner then 24 | return result[2] 25 | end 26 | end 27 | 28 | local setLockIfClaimed = function() 29 | if lockOwner ~= nil and lockOwner ~= '' then 30 | redis.call('hmset', lockPrefix..KEYS[1], 'owner', lockOwner, 'mode', lockMode) 31 | redis.call('pexpireat',lockPrefix..KEYS[1], lockExpire) 32 | end 33 | end 34 | 35 | local compressionModeNotChanged = function(doCompress) 36 | local isCompressed = redis.call('hexists',resourcesPrefix..KEYS[1],'compressed') 37 | return doCompress == isCompressed 38 | end 39 | 40 | if redis.call('exists',resourcesPrefix..KEYS[1]) == 1 then 41 | local etag = redis.call('hget',resourcesPrefix..KEYS[1],'etag') 42 | if etag == resourceHash and expiration == maxexpiration and compressionModeNotChanged(compress) then 43 | setLockIfClaimed() 44 | return "notModified"; 45 | end 46 | end 47 | 48 | local not_empty = function(x) 49 | return (type(x) == "table") and (not x.err) and (#x ~= 0) 50 | end 51 | 52 | local pathState 53 | local collections = {} 54 | local nodes = {path:match((path:gsub("[^"..sep.."]*"..sep, "([^"..sep.."]*)"..sep)))} 55 | 56 | for key,value in pairs(nodes) do 57 | if pathState == nil then 58 | pathState = value 59 | else 60 | collections[pathState] = value 61 | pathState = pathState..sep..value 62 | end 63 | redis.log(redis.LOG_NOTICE, "pathState: "..resourcesPrefix..pathState) 64 | redis.log(redis.LOG_NOTICE, "path: "..resourcesPrefix..KEYS[1]) 65 | if redis.call('exists',resourcesPrefix..pathState) == 1 and resourcesPrefix..pathState ~= resourcesPrefix..KEYS[1] then 66 | return "existingResource".." "..resourcesPrefix..pathState 67 | end 68 | end 69 | for key,value in pairs(collections) do 70 | local collectionKey = collectionsPrefix..key 71 | local actualExpiration = expiration 72 | local contentMax = redis.call('zrange',collectionKey..sep..value,-1,-1, "withscores")[2] 73 | if contentMax ~= nil and contentMax ~= '' then 74 | contentMax = tonumber(contentMax) 75 | if contentMax > actualExpiration then 76 | actualExpiration = contentMax 77 | end 78 | end 79 | redis.log(redis.LOG_NOTICE, "zadd: "..collectionKey.." "..actualExpiration.." "..value) 80 | redis.call('zadd',collectionKey,actualExpiration,value) 81 | end 82 | redis.log(redis.LOG_NOTICE, "merge: "..merge) 83 | 84 | if merge == "true" then 85 | local s = redis.call('hget',resourcesPrefix..KEYS[1],'resource') 86 | redis.log(redis.LOG_NOTICE, "merge: "..tostring(s).." "..resourceValue) 87 | if s then 88 | s = cjson.decode(s) 89 | for k,v in pairs(cjson.decode(resourceValue)) do 90 | if v == cjson.null then s[k] = nil else s[k] = v end 91 | end 92 | resourceValue = cjson.encode(s) 93 | end 94 | end 95 | 96 | redis.log(redis.LOG_NOTICE, "update: "..resourcesPrefix..KEYS[1]) 97 | if compress == 1 then 98 | redis.call('hmset',resourcesPrefix..KEYS[1],'resource',resourceValue,'etag',resourceHash,'compressed',1) 99 | else 100 | redis.call('hmset',resourcesPrefix..KEYS[1],'resource',resourceValue,'etag',resourceHash) 101 | redis.call('hdel',resourcesPrefix..KEYS[1],'compressed') 102 | end 103 | 104 | if expiration ~= maxexpiration then 105 | redis.log(redis.LOG_NOTICE, "zadd: "..expirableSet.." "..expiration.." "..resourcesPrefix..KEYS[1]) 106 | redis.call('zadd',expirableSet,expiration,resourcesPrefix..KEYS[1]) 107 | elseif expiration == maxexpiration then 108 | redis.log(redis.LOG_NOTICE, "zrem: "..expirableSet.." "..resourcesPrefix..KEYS[1]) 109 | redis.call('zrem', expirableSet, resourcesPrefix..KEYS[1]) 110 | end 111 | 112 | setLockIfClaimed() 113 | 114 | return "OK"; -------------------------------------------------------------------------------- /src/main/resources/storageExpand.lua: -------------------------------------------------------------------------------- 1 | -- -------------------------------------------------------------------------------------------- 2 | -- Copyright 2014 by Swiss Post, Information Technology Services 3 | -- -------------------------------------------------------------------------------------------- 4 | -- $Id$ 5 | -- -------------------------------------------------------------------------------------------- 6 | 7 | local sep = ":" 8 | local path = KEYS[1] 9 | local parentPathElement 10 | local resourcesPrefix = ARGV[1] 11 | local collectionsPrefix = ARGV[2] 12 | local expirableSet = ARGV[3] 13 | local timestamp = tonumber(ARGV[4]) 14 | local maxtime = tonumber(ARGV[5]) 15 | local subResources = ARGV[6] 16 | local subResourcesCount = tonumber(ARGV[7]) 17 | 18 | local function splitToTable(divider,str) 19 | if (divider=='') then return false end 20 | local pos,arr = 0,{} 21 | for st,sp in function() return string.find(str,divider,pos,true) end do 22 | table.insert(arr,string.sub(str,pos,st-1)) 23 | pos = sp + 1 24 | end 25 | table.insert(arr,string.sub(str,pos)) 26 | return arr 27 | end 28 | 29 | local function isCollection(resName) 30 | if(string.find(resName, "/", -1) ~= nil) then 31 | return true 32 | end 33 | return false 34 | end 35 | 36 | local function isCompressed(resourcePath) 37 | if redis.call('hexists',resourcePath,'compressed') == 1 then 38 | return true 39 | else 40 | return false 41 | end 42 | end 43 | 44 | local result = {} 45 | local subResourcesTable = splitToTable(";", subResources); 46 | 47 | for i=1,subResourcesCount do 48 | local subResName = subResourcesTable[i] 49 | if(isCollection(subResName)) then 50 | subResName = string.sub(subResName, 1, string.len(subResName)-1) 51 | local colPath = collectionsPrefix..path..sep..subResName 52 | if redis.call('exists',colPath) == 1 then 53 | local colMembers = redis.call('zrangebyscore',colPath, timestamp, maxtime) 54 | for k, v in ipairs(colMembers) do 55 | if redis.call('exists',colPath..sep..v) == 1 then 56 | colMembers[k] = v.."/" 57 | end 58 | end 59 | table.insert(result, {subResName, cjson.encode(colMembers)}) 60 | end 61 | else 62 | local resPath = resourcesPrefix..path..sep..subResName 63 | if redis.call('exists',resPath) == 1 then 64 | if isCompressed(resPath) then 65 | return "compressionNotSupported" 66 | end 67 | local score = tonumber(redis.call('zscore',expirableSet,resPath)) 68 | if score == nil or score > timestamp then 69 | local res = (redis.call('hget',resPath,'resource')) 70 | if(res) then 71 | table.insert(result, {subResName, res}) 72 | end 73 | end 74 | end 75 | end 76 | end 77 | 78 | local resEncoded = cjson.encode(result) 79 | 80 | if (resEncoded=='{}') then 81 | return "notFound" 82 | end 83 | 84 | return resEncoded -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/CleanupIntegrationTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.restassured.http.ContentType; 4 | import io.vertx.ext.unit.Async; 5 | import io.vertx.ext.unit.TestContext; 6 | import io.vertx.ext.unit.junit.VertxUnitRunner; 7 | import org.junit.Test; 8 | import org.junit.runner.RunWith; 9 | import org.swisspush.reststorage.redis.RedisStorageIntegrationTestCase; 10 | 11 | import java.util.concurrent.TimeUnit; 12 | 13 | import static io.restassured.RestAssured.get; 14 | import static io.restassured.RestAssured.given; 15 | import static org.awaitility.Awaitility.await; 16 | import static org.hamcrest.Matchers.*; 17 | import static org.hamcrest.Matchers.hasKey; 18 | import static org.hamcrest.core.IsEqual.equalTo; 19 | 20 | @RunWith(VertxUnitRunner.class) 21 | public class CleanupIntegrationTest extends RedisStorageIntegrationTestCase { 22 | 23 | @Test 24 | public void testNothingToCleanup(TestContext testContext) { 25 | Async async = testContext.async(); 26 | validateCleanupResults(0,0); 27 | async.complete(); 28 | } 29 | 30 | @Test 31 | public void testCleanupAmountBelowBulkSize(TestContext testContext) { 32 | Async async = testContext.async(); 33 | generateResourcesAndWaitUntilExpired(100); 34 | validateCleanupResults(100,0); 35 | async.complete(); 36 | } 37 | 38 | @Test 39 | public void testCleanupAmountHigherThanBulkSize(TestContext testContext) { 40 | Async async = testContext.async(); 41 | generateResourcesAndWaitUntilExpired(300); 42 | validateCleanupResults(300,0); 43 | async.complete(); 44 | } 45 | 46 | private void generateResourcesAndWaitUntilExpired(int amountOfResources){ 47 | for (int i = 1; i <= amountOfResources; i++) { 48 | given(). 49 | header("x-expire-after", "1"). 50 | body("{ \"foo\": \"bar1\" }"). 51 | when(). 52 | put("resource_"+i). 53 | then(). 54 | assertThat().statusCode(200); 55 | } 56 | await().atMost(3, TimeUnit.SECONDS).until(() -> get("resource_"+ amountOfResources).statusCode(), equalTo(404)); 57 | } 58 | 59 | private void validateCleanupResults(int cleanedResources, int expiredResourcesLeft){ 60 | given() 61 | .post("/server/_cleanup") 62 | .then() 63 | .assertThat() 64 | .statusCode(200) 65 | .contentType(ContentType.JSON) 66 | .body("", allOf(hasKey("cleanedResources"), hasKey("expiredResourcesLeft"))) 67 | .body("cleanedResources", equalTo(cleanedResources)) 68 | .body("expiredResourcesLeft", equalTo(expiredResourcesLeft)); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/ConfigurableTestCase.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.restassured.builder.RequestSpecBuilder; 4 | import io.restassured.specification.RequestSpecification; 5 | import io.vertx.core.Vertx; 6 | import io.vertx.ext.unit.TestContext; 7 | import org.junit.After; 8 | 9 | import static org.swisspush.reststorage.util.HttpRequestHeader.CONTENT_TYPE; 10 | 11 | public abstract class ConfigurableTestCase { 12 | protected Vertx vertx; 13 | 14 | // restAssured Configuration 15 | protected static final int REST_STORAGE_PORT = 8989; 16 | protected static RequestSpecification REQUEST_SPECIFICATION = new RequestSpecBuilder() 17 | .addHeader(CONTENT_TYPE.getName(), "application/json") 18 | .setPort(8989) 19 | .setBasePath("/") 20 | .build(); 21 | 22 | @After 23 | public void tearDown(TestContext context) { 24 | vertx.close(context.asyncAssertSuccess()); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/EtagIntegrationTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.ext.unit.Async; 4 | import io.vertx.ext.unit.TestContext; 5 | import io.vertx.ext.unit.junit.VertxUnitRunner; 6 | import org.junit.Test; 7 | import org.junit.runner.RunWith; 8 | import org.swisspush.reststorage.redis.RedisStorageIntegrationTestCase; 9 | 10 | import static io.restassured.RestAssured.*; 11 | import static org.hamcrest.CoreMatchers.equalTo; 12 | import static org.hamcrest.CoreMatchers.nullValue; 13 | import static org.hamcrest.core.IsNot.not; 14 | 15 | @RunWith(VertxUnitRunner.class) 16 | public class EtagIntegrationTest extends RedisStorageIntegrationTestCase { 17 | private final String ETAG_HEADER = "Etag"; 18 | private final String IF_NONE_MATCH_HEADER = "if-none-match"; 19 | private final String EXPIRE_AFTER_HEADER = "x-expire-after"; 20 | private final String MAX_EXPIRE_IN_MILLIS = "99999999999999"; 21 | 22 | @Test 23 | public void testEtag(TestContext context) { 24 | Async async = context.async(); 25 | with().body("{ \"foo\": \"bar\" }").put("resources/res1"); 26 | 27 | String etag = get("resources/res1").getHeader(ETAG_HEADER); 28 | context.assertNotNull(etag, "Etag header should be available in response headers"); 29 | context.assertFalse(etag.isEmpty(), "Etag header should not be empty"); 30 | 31 | // get requests with no if-none-match header should result in statuscode 200 32 | when().get("resources/res1").then().assertThat() 33 | .header(ETAG_HEADER, equalTo(etag)) 34 | .statusCode(200); 35 | when().get("resources/res1").then().assertThat() 36 | .header(ETAG_HEADER, equalTo(etag)) 37 | .statusCode(200); 38 | 39 | // not modified resource should result in statuscode 304 40 | given().header(IF_NONE_MATCH_HEADER, etag).when().get("resources/res1").then().assertThat() 41 | .header(ETAG_HEADER, equalTo(etag)) 42 | .statusCode(304) 43 | .header("Transfer-Encoding", nullValue()) 44 | .header("Content-length", equalTo("0")); 45 | 46 | // non matching etags should result in statuscode 200 47 | given().header(IF_NONE_MATCH_HEADER, "NonMatchingEtag").when().get("resources/res1").then().assertThat() 48 | .header(ETAG_HEADER, equalTo(etag)) 49 | .statusCode(200); 50 | 51 | //update the resource 52 | with().body("{ \"foo\": \"bar2\" }").put("resources/res1"); 53 | 54 | // etag should have changed 55 | when().get("resources/res1").then().assertThat() 56 | .header(ETAG_HEADER, not(equalTo(etag))) 57 | .statusCode(200); 58 | async.complete(); 59 | } 60 | 61 | @Test 62 | public void testEtagPUTWithoutHeader(TestContext context){ 63 | Async async = context.async(); 64 | String content = "{ \"foo\": \"bar\" }"; 65 | with().body(content).put("resources/res1"); 66 | String etag = get("resources/res1").getHeader(ETAG_HEADER); 67 | String content2 = "{ \"foo2\": \"bar2\" }"; 68 | given().body(content2).when().put("resources/res1").then().assertThat().statusCode(200); 69 | when().get("resources/res1").then().assertThat() 70 | .header(ETAG_HEADER, not(equalTo(etag))) 71 | .header(ETAG_HEADER, not(equalTo(""))) 72 | .body(equalTo(content2)) 73 | .statusCode(200); 74 | async.complete(); 75 | } 76 | 77 | @Test 78 | public void testEtagPUTWithHeader(TestContext context){ 79 | Async async = context.async(); 80 | String content = "{ \"foo\": \"bar\" }"; 81 | with().body(content).put("resources/res1"); 82 | String etag = get("resources/res1").getHeader(ETAG_HEADER); 83 | String content2 = "{ \"foo2\": \"bar2\" }"; 84 | given().header(IF_NONE_MATCH_HEADER, etag).body(content2).when().put("resources/res1").then().assertThat().statusCode(304); 85 | when().get("resources/res1").then().assertThat() 86 | .header(ETAG_HEADER, equalTo(etag)) 87 | .header(ETAG_HEADER, not(equalTo(""))) 88 | .body(equalTo(content)) 89 | .statusCode(200); 90 | async.complete(); 91 | } 92 | 93 | @Test 94 | public void testEtagAndExpiryPUTWithHeader(TestContext context){ 95 | Async async = context.async(); 96 | String content = "{ \"foo\": \"bar\" }"; 97 | with().body(content).put("resources/res1"); 98 | String etag = get("resources/res1").getHeader(ETAG_HEADER); 99 | String content2 = "{ \"foo2\": \"bar2\" }"; 100 | 101 | // Test with an x-expire-after header. Expecting a code 200 and update of the data 102 | given().header(IF_NONE_MATCH_HEADER, etag).header(EXPIRE_AFTER_HEADER, "1000").body(content2).when().put("resources/res1").then().assertThat().statusCode(200); 103 | when().get("resources/res1").then().assertThat() 104 | .header(ETAG_HEADER, equalTo(etag)) 105 | .header(ETAG_HEADER, not(equalTo(""))) 106 | .body(equalTo(content2)) 107 | .statusCode(200); 108 | 109 | // Test with no x-expire-after header. Expecting a code 304 (not modified) and no update of the data 110 | etag = get("resources/res1").getHeader(ETAG_HEADER); 111 | String content3 = "{ \"foo3\": \"bar3\" }"; 112 | given().header(IF_NONE_MATCH_HEADER, etag).body(content3).when().put("resources/res1").then().assertThat().statusCode(304); 113 | when().get("resources/res1").then().assertThat() 114 | .header(ETAG_HEADER, equalTo(etag)) 115 | .header(ETAG_HEADER, not(equalTo(""))) 116 | .body(equalTo(content2)) 117 | .statusCode(200); 118 | 119 | // Test with large x-expire-after header => equals maximum expiry. Expecting a code 304 (not modified) and no update of the data 120 | etag = get("resources/res1").getHeader(ETAG_HEADER); 121 | String content4 = "{ \"foo4\": \"bar4\" }"; 122 | given().header(IF_NONE_MATCH_HEADER, etag).header(EXPIRE_AFTER_HEADER, MAX_EXPIRE_IN_MILLIS).body(content4).when().put("resources/res1").then().assertThat().statusCode(304); 123 | when().get("resources/res1").then().assertThat() 124 | .header(ETAG_HEADER, equalTo(etag)) 125 | .header(ETAG_HEADER, not(equalTo(""))) 126 | .body(equalTo(content2)) 127 | .statusCode(200); 128 | async.complete(); 129 | } 130 | 131 | @Test 132 | public void testInitialEtagPUT(TestContext context){ 133 | Async async = context.async(); 134 | String content = "{ \"foo\": \"bar\" }"; 135 | with().header(IF_NONE_MATCH_HEADER, "myFancyEtagValue").body(content).put("resources/res1"); 136 | String etag = get("resources/res1").getHeader(ETAG_HEADER); 137 | context.assertEquals("myFancyEtagValue", etag); 138 | String content2 = "{ \"foo2\": \"bar2\" }"; 139 | given().header(IF_NONE_MATCH_HEADER, etag).body(content2).when().put("resources/res1").then().assertThat().statusCode(304); 140 | when().get("resources/res1").then().assertThat() 141 | .header(ETAG_HEADER, equalTo(etag)) 142 | .header(ETAG_HEADER, not(equalTo(""))) 143 | .body(equalTo(content)) 144 | .statusCode(200); 145 | async.complete(); 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/FilesystemStorageIntegrationTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.restassured.http.ContentType; 4 | import io.vertx.ext.unit.Async; 5 | import io.vertx.ext.unit.TestContext; 6 | import io.vertx.ext.unit.junit.VertxUnitRunner; 7 | import org.junit.Test; 8 | import org.junit.runner.RunWith; 9 | 10 | import static io.restassured.RestAssured.*; 11 | import static org.hamcrest.Matchers.equalTo; 12 | import static org.hamcrest.Matchers.hasItems; 13 | 14 | @RunWith(VertxUnitRunner.class) 15 | public class FilesystemStorageIntegrationTest extends FilesystemStorageTestCase { 16 | 17 | @Test 18 | public void testGetCollection(TestContext testContext) { 19 | Async async = testContext.async(); 20 | String path = TEST_FILES_PATH + "/collection/sub/resources/"; 21 | with().body("

nemo.html

").put(path + "nemo.html"); 22 | with().body("

index.html

").put(path + "index.html"); 23 | get(path).then().assertThat().statusCode(200) 24 | .contentType("application/json") 25 | .body("resources", hasItems("index.html", "nemo.html")); 26 | async.complete(); 27 | } 28 | 29 | @Test 30 | public void testDeleteCollectionWithRecursiveParameter(TestContext testContext) { 31 | Async async = testContext.async(); 32 | String path = TEST_FILES_PATH + "/collection/sub/resources/"; 33 | with().body("

nemo.html

").put(path + "nemo.html"); 34 | get(path +"nemo.html").then().assertThat(). 35 | statusCode(200). 36 | assertThat(). 37 | contentType(ContentType.HTML); 38 | 39 | //delete non-empty collection 40 | with().param("recursive", true).delete(path).then().assertThat().statusCode(200); 41 | 42 | get(path +"nemo.html").then().assertThat(). 43 | statusCode(404); 44 | 45 | async.complete(); 46 | } 47 | 48 | @Test 49 | public void testDeleteCollectionWithoutRecursiveParameter(TestContext testContext) { 50 | Async async = testContext.async(); 51 | String path = TEST_FILES_PATH + "/collection/sub/resources/"; 52 | with().body("

nemo.html

").put(path + "nemo.html"); 53 | get(path +"nemo.html").then().assertThat(). 54 | statusCode(200). 55 | assertThat(). 56 | contentType(ContentType.HTML); 57 | 58 | //delete non-empty collection 59 | delete(path).then().assertThat().statusCode(400).body(equalTo("Bad Request: directory not empty. Use recursive=true parameter to delete")); 60 | 61 | get(path +"nemo.html").then().assertThat(). 62 | statusCode(200); 63 | 64 | async.complete(); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/FilesystemStorageTestCase.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.restassured.RestAssured; 4 | import io.restassured.parsing.Parser; 5 | import io.vertx.core.DeploymentOptions; 6 | import io.vertx.core.Vertx; 7 | import io.vertx.core.file.FileSystem; 8 | import io.vertx.ext.unit.TestContext; 9 | import io.vertx.ext.unit.junit.VertxUnitRunner; 10 | import org.junit.After; 11 | import org.junit.Before; 12 | import org.junit.runner.RunWith; 13 | import org.swisspush.reststorage.util.ModuleConfiguration; 14 | 15 | @RunWith(VertxUnitRunner.class) 16 | public abstract class FilesystemStorageTestCase extends ConfigurableTestCase { 17 | 18 | protected static final String TEST_FILES_PATH = "filesystemStorageTestFiles"; 19 | 20 | @Before 21 | public void setUp(TestContext context) { 22 | vertx = Vertx.vertx(); 23 | 24 | // RestAssured Configuration 25 | RestAssured.port = REST_STORAGE_PORT; 26 | RestAssured.requestSpecification = REQUEST_SPECIFICATION; 27 | RestAssured.registerParser("application/json; charset=utf-8", Parser.JSON); 28 | RestAssured.defaultParser = Parser.JSON; 29 | 30 | ModuleConfiguration modConfig = new ModuleConfiguration() 31 | .root( "./target/fileStorage" ) 32 | .storageType(ModuleConfiguration.StorageType.filesystem) 33 | .confirmCollectionDelete(true) 34 | .storageAddress("rest-storage"); 35 | 36 | RestStorageMod restStorageMod = new RestStorageMod(); 37 | vertx.deployVerticle(restStorageMod, new DeploymentOptions().setConfig(modConfig.asJsonObject()), context.asyncAssertSuccess(stringAsyncResult1 -> { 38 | // standard code: will called @Before every test 39 | RestAssured.basePath = ""; 40 | })); 41 | } 42 | 43 | @After 44 | public void deleteTestFiles(TestContext context){ 45 | final FileSystem fileSystem = vertx.fileSystem(); 46 | final String relPath = "./target/fileStorage/"+TEST_FILES_PATH; 47 | if( fileSystem.existsBlocking(relPath) ){ 48 | fileSystem.deleteRecursiveBlocking(relPath, true); 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/JedisFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * ------------------------------------------------------------------------------------------------ 3 | * Copyright 2014 by Swiss Post, Information Technology Services 4 | * ------------------------------------------------------------------------------------------------ 5 | * $Id$ 6 | * ------------------------------------------------------------------------------------------------ 7 | */ 8 | 9 | package org.swisspush.reststorage; 10 | 11 | import redis.clients.jedis.Jedis; 12 | 13 | /** 14 | * Created by kammermannf on 07.06.2015. 15 | */ 16 | public class JedisFactory { 17 | 18 | public static Jedis createJedis() { 19 | return new Jedis("localhost", 6379, 5000); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/MimeTypeResolverTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import org.junit.Assert; 4 | import org.junit.Test; 5 | 6 | public class MimeTypeResolverTest { 7 | 8 | private final MimeTypeResolver mimeTypeResolver = new MimeTypeResolver("gugus/gaga"); 9 | 10 | @Test 11 | public void testDefault() { 12 | String s = mimeTypeResolver.resolveMimeType("/aaa/bbb/ccc");// no file extension --> default 13 | Assert.assertEquals("Default-MimeType", "gugus/gaga", s); 14 | } 15 | 16 | @Test 17 | public void testUnknownFileExtension() { 18 | String s = mimeTypeResolver.resolveMimeType("/aaa/bbb/ccc.i_am_an_unknown_file_extension");// unknown extension --> text/plain 19 | Assert.assertEquals("Default-MimeType", "text/plain", s); 20 | } 21 | 22 | @Test 23 | public void testJson() { 24 | String s = mimeTypeResolver.resolveMimeType("/aaa/bbb/ccc.json"); 25 | Assert.assertEquals("Default-MimeType", "application/json", s); 26 | } 27 | 28 | @Test 29 | public void testTarGz() { 30 | String s = mimeTypeResolver.resolveMimeType("/aaa/bbb/ccc.tar.gz"); 31 | Assert.assertEquals("Default-MimeType", "application/gzip", s); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/OffsetIntegrationTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.vertx.ext.unit.Async; 4 | import io.vertx.ext.unit.TestContext; 5 | import io.vertx.ext.unit.junit.VertxUnitRunner; 6 | import org.junit.Test; 7 | import org.junit.runner.RunWith; 8 | import org.swisspush.reststorage.redis.RedisStorageIntegrationTestCase; 9 | 10 | import static io.restassured.RestAssured.given; 11 | import static io.restassured.RestAssured.with; 12 | import static org.hamcrest.core.IsCollectionContaining.hasItem; 13 | import static org.hamcrest.core.IsCollectionContaining.hasItems; 14 | import static org.hamcrest.core.IsNot.not; 15 | 16 | @RunWith(VertxUnitRunner.class) 17 | public class OffsetIntegrationTest extends RedisStorageIntegrationTestCase { 18 | 19 | @Test 20 | public void testInvalidOffsets(TestContext context) { 21 | Async async = context.async(); 22 | for(int i=1; i<=10; i++) { 23 | with().body("{ \"foo\": \"bar"+i+"\" }") 24 | .put("resources/res"+i); 25 | } 26 | 27 | // get with invalid offsets 28 | given().param("delta", 0).when().get("resources/?limit=bla") 29 | .then().assertThat().body("resources", hasItem("res10")); 30 | 31 | given().param("delta", 0).when().get("resources/?offset=bla") 32 | .then().assertThat().body("resources", hasItem("res10")); 33 | 34 | given().param("delta", 0).when().get("resources/?offset=bla&limit=blo") 35 | .then().assertThat().body("resources", hasItem("res10")); 36 | 37 | given().param("delta", 0).when().get("resources/?offset=-99&limit-1") 38 | .then().assertThat().body("resources", hasItem("res10")); 39 | 40 | given().param("delta", 0).when().get("resources/?offset=-1&limit=-1") 41 | .then().assertThat().body("resources", hasItem("res10")); 42 | async.complete(); 43 | } 44 | 45 | @Test 46 | public void testValidLimits(TestContext context) { 47 | Async async = context.async(); 48 | for(int i=1; i<=10; i++) { 49 | with().body("{ \"foo\": \"bar"+i+"\" }") 50 | .put("resources/res"+i); 51 | } 52 | 53 | // get with valid offsets 54 | given().param("delta", 0).when().get("resources/?limit=10") 55 | .then().assertThat() 56 | .body("resources", hasItems("res1","res2","res3","res4","res5","res6","res7","res8","res9","res10")); 57 | 58 | given().param("delta", 0).when().get("resources?limit=99") 59 | .then().assertThat() 60 | .body("resources", hasItems("res1","res2","res3","res4","res5","res6","res7","res8","res9","res10")); 61 | 62 | given().param("delta", 0).when().get("resources?limit=5") 63 | .then().assertThat() 64 | .body("resources", hasItems("res10","res1","res2","res3","res4")) 65 | .body("resources", not(hasItems("res5","res6","res7","res8","res9"))); 66 | 67 | given().param("delta", 0).when().get("resources?limit=8") 68 | .then().assertThat() 69 | .body("resources", hasItems("res1","res10","res2","res3","res4","res5","res7")) 70 | .body("resources", not(hasItems("res8","res9"))); 71 | async.complete(); 72 | } 73 | 74 | @Test 75 | public void testValidOffsets(TestContext context) { 76 | Async async = context.async(); 77 | for(int i=1; i<=10; i++) { 78 | with().body("{ \"foo\": \"bar"+i+"\" }") 79 | .put("resources/res"+i); 80 | } 81 | 82 | // get with valid offsets 83 | given().param("delta", 0).when().get("resources/?offset=2") 84 | .then().assertThat() 85 | .body("resources", not(hasItems("res10","res1"))) 86 | .body("resources", hasItems("res2","res3","res4","res5","res6","res7","res8","res9")); 87 | 88 | given().param("delta", 0).when().get("resources?offset=0") 89 | .then().assertThat() 90 | .body("resources", hasItems("res1","res2","res3","res4","res5","res6","res7","res8","res9","res10")); 91 | 92 | given().param("delta", 0).when().get("resources?offset=5") 93 | .then().assertThat() 94 | .body("resources", not(hasItems("res10","res1","res2","res3","res4"))) 95 | .body("resources", hasItems("res5","res6","res7","res8","res9")); 96 | 97 | given().param("delta", 0).when().get("resources?offset=11") 98 | .then().assertThat() 99 | .body("resources", not(hasItems("res1","res2","res3","res4","res5","res6","res7","res8","res9","res10")) ); 100 | async.complete(); 101 | } 102 | 103 | @Test 104 | public void testInvalidLimitsOffsets(TestContext context) { 105 | Async async = context.async(); 106 | for(int i=1; i<=10; i++) { 107 | with().body("{ \"foo\": \"bar"+i+"\" }") 108 | .put("resources/res"+i); 109 | } 110 | 111 | // get with valid offsets 112 | given().param("delta", 0).when().get("resources/?offset=2&limit=bla") 113 | .then().assertThat() 114 | .body("resources", hasItems("res2","res3","res4","res5","res6","res7","res8","res9")); 115 | 116 | given().param("delta", 0).when().get("resources?offset=bla&limit=3") 117 | .then().assertThat() 118 | .body("resources", hasItems("res1","res10","res2")); 119 | 120 | given().param("delta", 0).when().get("resources?offset=1-5&limit=5") 121 | .then().assertThat() 122 | .body("resources", hasItems("res1","res10","res2","res3","res4")); 123 | 124 | given().param("delta", 0).when().get("resources?offset=99&limit=4") 125 | .then().assertThat() 126 | .body("resources", not(hasItems("res1","res2","res3","res4","res5","res6","res7","res8","res9","res10")) ); 127 | async.complete(); 128 | } 129 | 130 | @Test 131 | public void testValidLimitsOffsets(TestContext context) { 132 | Async async = context.async(); 133 | for(int i=1; i<=10; i++) { 134 | with().body("{ \"foo\": \"bar"+i+"\" }") 135 | .put("resources/res"+i); 136 | } 137 | 138 | // get with valid offsets 139 | given().param("delta", 0).when().get("resources/?offset=2&limit=-1") 140 | .then().assertThat() 141 | .body("resources", hasItems("res2","res3","res4","res5","res6","res7","res8","res9")); 142 | 143 | given().param("delta", 0).when().get("resources?offset=0&limit=3") 144 | .then().assertThat() 145 | .body("resources", hasItems("res1","res10","res2")); 146 | 147 | given().param("delta", 0).when().get("resources?offset=2&limit=2") 148 | .then().assertThat() 149 | .body("resources", hasItems("res2","res3")); 150 | 151 | given().param("delta", 0).when().get("resources?offset=1&limit=10") 152 | .then().assertThat() 153 | .body("resources", hasItems("res10","res2","res3","res4","res5","res6","res7","res8","res9")); 154 | async.complete(); 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/PathLevelIntegrationTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.restassured.RestAssured; 4 | import io.vertx.ext.unit.Async; 5 | import io.vertx.ext.unit.TestContext; 6 | import io.vertx.ext.unit.junit.VertxUnitRunner; 7 | import org.junit.Test; 8 | import org.junit.runner.RunWith; 9 | import org.swisspush.reststorage.redis.RedisStorageIntegrationTestCase; 10 | 11 | import static io.restassured.RestAssured.*; 12 | 13 | @RunWith(VertxUnitRunner.class) 14 | public class PathLevelIntegrationTest extends RedisStorageIntegrationTestCase { 15 | 16 | @Test 17 | public void testTryToPutResourceOverCollection(TestContext context) { 18 | Async async = context.async(); 19 | RestAssured.basePath = ""; 20 | with().put("/tests/crush/test1/test2/test3"); 21 | // here we assume, that on the path server is already a collection 22 | given(). 23 | body("{ \"foo\": \"bar\" }"). 24 | when(). 25 | put("/tests"). 26 | then(). 27 | assertThat().statusCode(405); 28 | async.complete(); 29 | } 30 | 31 | @Test 32 | public void testPut4levels(TestContext context) { 33 | Async async = context.async(); 34 | 35 | given(). 36 | body("{ \"foo\": \"bar\" }"). 37 | when(). 38 | put("/tests/crush/test1/test2/test3/test4"). 39 | then(). 40 | assertThat().statusCode(200); 41 | 42 | // test level 1 with and without trailing slash 43 | context.assertEquals("[test2/]", get("/tests/crush/test1").body().jsonPath().get("test1").toString()); 44 | context.assertEquals("[test2/]", get("/tests/crush/test1/").body().jsonPath().get("test1").toString()); 45 | 46 | // test level 2 with and without trailing slash 47 | context.assertEquals("[test3/]", get("/tests/crush/test1/test2").body().jsonPath().get("test2").toString()); 48 | context.assertEquals("[test3/]", get("/tests/crush/test1/test2/").body().jsonPath().get("test2").toString()); 49 | 50 | // test level 3 with and without trailing slash 51 | context.assertEquals("[test4]", get("/tests/crush/test1/test2/test3").body().jsonPath().get("test3").toString()); 52 | context.assertEquals("[test4]", get("/tests/crush/test1/test2/test3/").body().jsonPath().get("test3").toString()); 53 | 54 | // test4 level 55 | context.assertEquals("{ \"foo\": \"bar\" }", get("/tests/crush/test1/test2/test3/test4").body().asString()); 56 | 57 | async.complete(); 58 | } 59 | 60 | @Test 61 | public void testPutResourceOverCollection(TestContext context) { 62 | Async async = context.async(); 63 | 64 | given(). 65 | body("{ \"foo\": \"bar\" }"). 66 | when(). 67 | put("/tests/crush/test1/test2/test3/test4"). 68 | then(). 69 | assertThat().statusCode(200); 70 | 71 | given(). 72 | body("{ \"foo\": \"bar\" }"). 73 | when(). 74 | put("/tests/crush/test1/test2"). 75 | then(). 76 | assertThat().statusCode(405); 77 | 78 | async.complete(); 79 | } 80 | 81 | @Test 82 | public void testPutCollectionOverResource(TestContext context) { 83 | Async async = context.async(); 84 | 85 | given(). 86 | body("{ \"foo\": \"bar\" }"). 87 | when(). 88 | put("/tests/crush/test1/test2"). 89 | then(). 90 | assertThat().statusCode(200); 91 | 92 | given(). 93 | body("{ \"foo\": \"bar\" }"). 94 | when(). 95 | put("/tests/crush/test1/test2/test3/test4"). 96 | then(). 97 | assertThat().statusCode(405); 98 | 99 | async.complete(); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/RedirectIntegrationTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import io.restassured.RestAssured; 4 | import io.restassured.http.ContentType; 5 | import io.vertx.ext.unit.Async; 6 | import io.vertx.ext.unit.TestContext; 7 | import io.vertx.ext.unit.junit.VertxUnitRunner; 8 | import org.junit.Test; 9 | import org.junit.runner.RunWith; 10 | import org.swisspush.reststorage.redis.RedisStorageIntegrationTestCase; 11 | 12 | import static io.restassured.RestAssured.get; 13 | import static io.restassured.RestAssured.with; 14 | 15 | @RunWith(VertxUnitRunner.class) 16 | public class RedirectIntegrationTest extends RedisStorageIntegrationTestCase { 17 | 18 | @Test 19 | public void testGetHTMLResourceWithoutTrailingSlash(TestContext testContext) { 20 | Async async = testContext.async(); 21 | RestAssured.basePath = "/pages"; 22 | with().body("

nemo.html

").put("nemo.html"); 23 | get("nemo.html").then().assertThat(). 24 | statusCode(200). 25 | assertThat(). 26 | contentType(ContentType.HTML); 27 | async.complete(); 28 | } 29 | 30 | @Test 31 | public void testGetHTMLResourceWithTrailingSlash(TestContext testContext) { 32 | Async async = testContext.async(); 33 | RestAssured.basePath = "/pages"; 34 | with().body("

nemo.html

").put("nemo.html"); 35 | get("nemo.html/").then().assertThat(). 36 | statusCode(200). 37 | assertThat(). 38 | contentType(ContentType.HTML); 39 | async.complete(); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/Return200onDeleteNonExistingTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import org.junit.Test; 4 | import org.swisspush.reststorage.redis.RedisStorageIntegrationTestCase; 5 | import org.swisspush.reststorage.util.ModuleConfiguration; 6 | 7 | import static io.restassured.RestAssured.when; 8 | 9 | public class Return200onDeleteNonExistingTest extends RedisStorageIntegrationTestCase { 10 | 11 | @Override 12 | protected void updateModuleConfiguration(ModuleConfiguration modConfig) { 13 | modConfig.return200onDeleteNonExisting(true); 14 | } 15 | 16 | @Test 17 | public void expect200() { 18 | when().delete("resources/does-not-exist").then().assertThat().statusCode(200); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/Return404onDeleteNonExistingTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage; 2 | 3 | import org.junit.Test; 4 | import org.swisspush.reststorage.redis.RedisStorageIntegrationTestCase; 5 | 6 | import static io.restassured.RestAssured.when; 7 | 8 | public class Return404onDeleteNonExistingTest extends RedisStorageIntegrationTestCase { 9 | 10 | @Test 11 | public void expect404() { 12 | // default config response with 404 if we delete a non-existing resource 13 | when().delete("resources/does-not-exist").then().assertThat().statusCode(404); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/lua/AbstractLuaScriptTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * ------------------------------------------------------------------------------------------------ 3 | * Copyright 2014 by Swiss Post, Information Technology Services 4 | * ------------------------------------------------------------------------------------------------ 5 | * $Id$ 6 | * ------------------------------------------------------------------------------------------------ 7 | */ 8 | 9 | package org.swisspush.reststorage.lua; 10 | 11 | import io.vertx.ext.unit.junit.VertxUnitRunner; 12 | import org.junit.After; 13 | import org.junit.Before; 14 | import org.junit.runner.RunWith; 15 | import org.swisspush.reststorage.JedisFactory; 16 | import org.swisspush.reststorage.util.LockMode; 17 | import redis.clients.jedis.Jedis; 18 | 19 | import java.io.BufferedReader; 20 | import java.io.IOException; 21 | import java.io.InputStreamReader; 22 | import java.util.ArrayList; 23 | import java.util.UUID; 24 | 25 | /** 26 | * Abstract class containing common methods for LuaScript tests 27 | */ 28 | @RunWith(VertxUnitRunner.class) 29 | public abstract class AbstractLuaScriptTest { 30 | 31 | final static String prefixResources = "rest-storage:resources"; 32 | final static String prefixCollections = "rest-storage:collections"; 33 | final static String expirableSet = "rest-storage:expirable"; 34 | final static String prefixDeltaResources = "delta:resources"; 35 | final static String prefixDeltaEtags = "delta:etags"; 36 | final static String prefixLock = "redis-storage:locks"; 37 | 38 | static final String MAX_EXPIRE = "99999999999999"; 39 | 40 | Jedis jedis = null; 41 | 42 | @Before 43 | public void connect() { 44 | jedis = JedisFactory.createJedis(); 45 | } 46 | 47 | @After 48 | public void disconnect() { 49 | jedis.flushAll(); 50 | jedis.close(); 51 | } 52 | 53 | protected double getNowAsDouble() { 54 | return (double) System.currentTimeMillis(); 55 | } 56 | 57 | protected String getNowAsString() { 58 | return String.valueOf(System.currentTimeMillis()); 59 | } 60 | 61 | protected String readScript(String scriptFileName) { 62 | return readScript(scriptFileName, false); 63 | } 64 | 65 | protected String readScript(String scriptFileName, boolean stripLogNotice) { 66 | BufferedReader in = new BufferedReader(new InputStreamReader(this.getClass().getClassLoader().getResourceAsStream(scriptFileName))); 67 | StringBuilder sb; 68 | try { 69 | sb = new StringBuilder(); 70 | String line; 71 | while ((line = in.readLine()) != null) { 72 | if (stripLogNotice && line.contains("redis.LOG_NOTICE,")) { 73 | continue; 74 | } 75 | sb.append(line).append("\n"); 76 | } 77 | 78 | } catch (IOException e) { 79 | throw new RuntimeException(e); 80 | } finally { 81 | try { 82 | in.close(); 83 | } catch (IOException e) { 84 | // Ignore 85 | } 86 | } 87 | return sb.toString(); 88 | } 89 | 90 | protected String evalScriptPut(final String resourceName, final String resourceValue) { 91 | return evalScriptPut(resourceName, resourceValue, MAX_EXPIRE); 92 | } 93 | 94 | protected String evalScriptPut(final String resourceName, final String resourceValue, final String expire) { 95 | return evalScriptPut(resourceName, resourceValue, expire, UUID.randomUUID().toString()); 96 | } 97 | 98 | protected String evalScriptPut(final String resourceName, final String resourceValue, final String expire, final String etag) { 99 | return evalScriptPut(resourceName, resourceValue, expire, etag, "", LockMode.SILENT, 300); 100 | } 101 | 102 | protected String evalScriptPut(final String resourceName, final String resourceValue, final String expire, final String etag, boolean storeCompressed) { 103 | return evalScriptPut(resourceName, resourceValue, expire, etag, "", LockMode.SILENT, 300, storeCompressed); 104 | } 105 | 106 | protected String evalScriptPut(final String resourceName, final String resourceValue, final String expire, final String etag, final String lockOwner, final LockMode lockMode, final long lockExpire) { 107 | return evalScriptPut(resourceName, resourceValue, expire, etag, lockOwner, lockMode, lockExpire, false); 108 | } 109 | 110 | @SuppressWarnings({"rawtypes", "unchecked", "serial"}) 111 | protected String evalScriptPut(final String resourceName, final String resourceValue, final String expire, final String etag, final String lockOwner, final LockMode lockMode, final long lockExpire, boolean storeCompressed) { 112 | String putScript = readScript("put.lua"); 113 | String etagTmp; 114 | if (etag != null && !etag.isEmpty()) { 115 | etagTmp = etag; 116 | } else { 117 | etagTmp = UUID.randomUUID().toString(); 118 | } 119 | 120 | String lockExpireInMillis = String.valueOf(System.currentTimeMillis() + (lockExpire * 1000)); 121 | 122 | final String etagValue = etagTmp; 123 | return (String) jedis.eval(putScript, new ArrayList() { 124 | { 125 | add(resourceName); 126 | } 127 | }, new ArrayList() { 128 | { 129 | add(prefixResources); 130 | add(prefixCollections); 131 | add(expirableSet); 132 | add("false"); 133 | add(expire); 134 | add("99999999999999"); 135 | add(resourceValue); 136 | add(etagValue); 137 | add(prefixLock); 138 | add(lockOwner); 139 | add(lockMode.text()); 140 | add(lockExpireInMillis); 141 | add(storeCompressed ? "1" : "0"); 142 | } 143 | } 144 | ); 145 | } 146 | 147 | protected Object evalScriptGet(final String resourceName) { 148 | return evalScriptGet(resourceName, String.valueOf(System.currentTimeMillis())); 149 | } 150 | 151 | protected Object evalScriptGet(final String resourceName, final String timestamp) { 152 | return evalScriptGet(resourceName, timestamp, "", ""); 153 | } 154 | 155 | protected Object evalScriptGetOffsetCount(final String resourceName1, final String offset, final String count) { 156 | return evalScriptGet(resourceName1, String.valueOf(System.currentTimeMillis()), offset, count); 157 | } 158 | 159 | @SuppressWarnings({ "rawtypes", "unchecked", "serial" }) 160 | protected Object evalScriptGet(final String resourceName, final String timestamp, final String offset, final String count) { 161 | String getScript = readScript("get.lua"); 162 | return jedis.eval(getScript, new ArrayList() { 163 | { 164 | add(resourceName); 165 | } 166 | }, new ArrayList() { 167 | { 168 | add(prefixResources); 169 | add(prefixCollections); 170 | add(expirableSet); 171 | add(timestamp); 172 | add("99999999999999"); 173 | add(offset); 174 | add(count); 175 | } 176 | } 177 | ); 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/mocks/FailFastRestStorage.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.mocks; 2 | 3 | import io.vertx.core.Handler; 4 | import org.swisspush.reststorage.DocumentResource; 5 | import org.swisspush.reststorage.Resource; 6 | import org.swisspush.reststorage.Storage; 7 | import org.swisspush.reststorage.util.LockMode; 8 | 9 | import java.util.List; 10 | import java.util.Optional; 11 | 12 | 13 | public class FailFastRestStorage implements Storage { 14 | 15 | protected final String msg; 16 | 17 | public FailFastRestStorage() { 18 | this("Method not implemented in mock. Override method to provide your behaviour."); 19 | } 20 | 21 | public FailFastRestStorage(String msg) { 22 | this.msg = msg; 23 | } 24 | 25 | @Override 26 | public Optional getCurrentMemoryUsage() { 27 | throw new UnsupportedOperationException(msg); } 28 | 29 | @Override 30 | public void get(String path, String etag, int offset, int count, Handler handler) { 31 | throw new UnsupportedOperationException(msg); 32 | } 33 | 34 | @Override 35 | public void storageExpand(String path, String etag, List subResources, Handler handler) { 36 | throw new UnsupportedOperationException(msg); 37 | } 38 | 39 | @Override 40 | public void put(String path, String etag, boolean merge, long expire, Handler handler) { 41 | throw new UnsupportedOperationException(msg); 42 | } 43 | 44 | @Override 45 | public void put(String path, String etag, boolean merge, long expire, String lockOwner, LockMode lockMode, long lockExpire, Handler handler) { 46 | throw new UnsupportedOperationException(msg); 47 | } 48 | 49 | @Override 50 | public void put(String path, String etag, boolean merge, long expire, String lockOwner, LockMode lockMode, long lockExpire, boolean storeCompressed, Handler handler) { 51 | throw new UnsupportedOperationException(msg); 52 | } 53 | 54 | @Override 55 | public void delete(String path, String lockOwner, LockMode lockMode, long lockExpire, boolean confirmCollectionDelete, boolean deleteRecursive, Handler handler) { 56 | throw new UnsupportedOperationException(msg); 57 | } 58 | 59 | @Override 60 | public void cleanup(Handler handler, String cleanupResourcesAmount) { 61 | throw new UnsupportedOperationException(msg); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/mocks/FailFastVertxAsyncFile.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.mocks; 2 | 3 | import io.vertx.core.AsyncResult; 4 | import io.vertx.core.Future; 5 | import io.vertx.core.Handler; 6 | import io.vertx.core.buffer.Buffer; 7 | import io.vertx.core.file.AsyncFile; 8 | import io.vertx.core.file.AsyncFileLock; 9 | 10 | 11 | public class FailFastVertxAsyncFile implements AsyncFile { 12 | 13 | protected final String msg; 14 | 15 | public FailFastVertxAsyncFile() { 16 | this("Override this to provide your behaviour."); 17 | } 18 | 19 | public FailFastVertxAsyncFile(String msg) { 20 | this.msg = msg; 21 | } 22 | 23 | @Override 24 | public AsyncFile handler(Handler handler) { 25 | throw new UnsupportedOperationException(msg); 26 | } 27 | 28 | @Override 29 | public AsyncFile pause() { 30 | throw new UnsupportedOperationException(msg); 31 | } 32 | 33 | @Override 34 | public AsyncFile resume() { 35 | throw new UnsupportedOperationException(msg); 36 | } 37 | 38 | @Override 39 | public AsyncFile endHandler(Handler handler) { 40 | throw new UnsupportedOperationException(msg); 41 | } 42 | 43 | 44 | @Override 45 | public AsyncFile setWriteQueueMaxSize(int i) { 46 | throw new UnsupportedOperationException(msg); 47 | } 48 | 49 | @Override 50 | public boolean writeQueueFull() { 51 | throw new UnsupportedOperationException(msg); 52 | } 53 | 54 | @Override 55 | public AsyncFile drainHandler(Handler handler) { 56 | throw new UnsupportedOperationException(msg); 57 | } 58 | 59 | @Override 60 | public AsyncFile exceptionHandler(Handler handler) { 61 | throw new UnsupportedOperationException(msg); 62 | } 63 | 64 | @Override 65 | public Future write(Buffer data) { 66 | throw new UnsupportedOperationException(msg); 67 | } 68 | 69 | @Override 70 | public void write(Buffer data, Handler> handler) { 71 | throw new UnsupportedOperationException(msg); 72 | } 73 | 74 | @Override 75 | public void end(Handler> handler) { 76 | throw new UnsupportedOperationException(msg); 77 | } 78 | 79 | @Override 80 | public AsyncFile fetch(long amount) { 81 | throw new UnsupportedOperationException(msg); 82 | } 83 | 84 | @Override 85 | public Future close() { 86 | throw new UnsupportedOperationException(msg); 87 | } 88 | 89 | 90 | @Override 91 | public void close(Handler> handler) { 92 | throw new UnsupportedOperationException(msg); 93 | } 94 | 95 | @Override 96 | public void write(Buffer buffer, long position, Handler> handler) { 97 | throw new UnsupportedOperationException(msg); 98 | } 99 | 100 | @Override 101 | public Future write(Buffer buffer, long position) { 102 | throw new UnsupportedOperationException(msg); 103 | } 104 | 105 | 106 | @Override 107 | public AsyncFile read(Buffer buffer, int i, long l, int i1, Handler> handler) { 108 | throw new UnsupportedOperationException(msg); 109 | } 110 | 111 | @Override 112 | public Future read(Buffer buffer, int offset, long position, int length) { 113 | throw new UnsupportedOperationException(msg); 114 | } 115 | 116 | @Override 117 | public Future flush() { 118 | throw new UnsupportedOperationException(msg); 119 | } 120 | 121 | 122 | @Override 123 | public AsyncFile flush(Handler> handler) { 124 | throw new UnsupportedOperationException(msg); 125 | } 126 | 127 | @Override 128 | public AsyncFile setReadPos(long l) { 129 | throw new UnsupportedOperationException(msg); 130 | } 131 | 132 | @Override 133 | public AsyncFile setReadLength(long readLength) { 134 | throw new UnsupportedOperationException(msg); 135 | } 136 | 137 | @Override 138 | public long getReadLength() { 139 | throw new UnsupportedOperationException(msg); 140 | } 141 | 142 | @Override 143 | public AsyncFile setWritePos(long l) { 144 | throw new UnsupportedOperationException(msg); 145 | } 146 | 147 | @Override 148 | public long getWritePos() { 149 | throw new UnsupportedOperationException(msg); 150 | } 151 | 152 | @Override 153 | public AsyncFile setReadBufferSize(int i) { 154 | throw new UnsupportedOperationException(msg); 155 | } 156 | 157 | @Override 158 | public long sizeBlocking() { 159 | throw new UnsupportedOperationException(msg); 160 | } 161 | 162 | @Override 163 | public Future size() { 164 | throw new UnsupportedOperationException(msg); 165 | } 166 | 167 | @Override 168 | public AsyncFileLock tryLock() { 169 | throw new UnsupportedOperationException(msg); 170 | } 171 | 172 | @Override 173 | public AsyncFileLock tryLock(long position, long size, boolean shared) { 174 | throw new UnsupportedOperationException(msg); 175 | } 176 | 177 | @Override 178 | public Future lock() { 179 | throw new UnsupportedOperationException(msg); 180 | } 181 | 182 | @Override 183 | public void lock(Handler> handler) { 184 | throw new UnsupportedOperationException(msg); 185 | } 186 | 187 | @Override 188 | public Future lock(long position, long size, boolean shared) { 189 | throw new UnsupportedOperationException(msg); 190 | } 191 | 192 | @Override 193 | public void lock(long position, long size, boolean shared, Handler> handler) { 194 | throw new UnsupportedOperationException(msg); 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/mocks/FailFastVertxWriteStream.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.mocks; 2 | 3 | import io.vertx.core.AsyncResult; 4 | import io.vertx.core.Future; 5 | import io.vertx.core.Handler; 6 | import io.vertx.core.streams.WriteStream; 7 | 8 | 9 | public class FailFastVertxWriteStream implements WriteStream { 10 | 11 | protected final String msg; 12 | 13 | public FailFastVertxWriteStream() { 14 | this("Method not implemented in mock. Override method to provide your behaviour."); 15 | } 16 | 17 | public FailFastVertxWriteStream(String msg) { 18 | this.msg = msg; 19 | } 20 | 21 | @Override 22 | public WriteStream exceptionHandler(Handler handler) { 23 | throw new UnsupportedOperationException(msg); 24 | } 25 | 26 | @Override 27 | public Future write(T data) { 28 | throw new UnsupportedOperationException(msg); 29 | } 30 | 31 | @Override 32 | public void write(T data, Handler> handler) { 33 | throw new UnsupportedOperationException(msg); 34 | } 35 | 36 | @Override 37 | public void end(Handler> handler) { 38 | throw new UnsupportedOperationException(msg); 39 | } 40 | 41 | @Override 42 | public WriteStream setWriteQueueMaxSize(int i) { 43 | throw new UnsupportedOperationException(msg); 44 | } 45 | 46 | @Override 47 | public boolean writeQueueFull() { 48 | throw new UnsupportedOperationException(msg); 49 | } 50 | 51 | @Override 52 | public WriteStream drainHandler(Handler handler) { 53 | throw new UnsupportedOperationException(msg); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/redis/DefaultRedisReadyProviderTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import io.vertx.core.AsyncResult; 4 | import io.vertx.core.Future; 5 | import io.vertx.core.Vertx; 6 | import io.vertx.core.buffer.Buffer; 7 | import io.vertx.ext.unit.Async; 8 | import io.vertx.ext.unit.TestContext; 9 | import io.vertx.ext.unit.junit.VertxUnitRunner; 10 | import io.vertx.redis.client.RedisAPI; 11 | import io.vertx.redis.client.impl.types.BulkType; 12 | import org.junit.Before; 13 | import org.junit.Test; 14 | import org.junit.runner.RunWith; 15 | import org.mockito.Mockito; 16 | import org.swisspush.reststorage.util.ResourcesUtils; 17 | 18 | import static org.mockito.Mockito.*; 19 | 20 | /** 21 | * Tests for the {@link DefaultRedisReadyProvider} class 22 | * 23 | * @author https://github.com/mcweba [Marc-Andre Weber] 24 | */ 25 | @RunWith(VertxUnitRunner.class) 26 | public class DefaultRedisReadyProviderTest { 27 | 28 | private final String REDIS_INFO_LOADING = ResourcesUtils.loadResource("redis_info_persistance_loading_1", true); 29 | private final String REDIS_INFO_NOT_LOADING = ResourcesUtils.loadResource("redis_info_persistance_loading_0", true); 30 | 31 | private Vertx vertx; 32 | private RedisAPI redisAPI; 33 | private DefaultRedisReadyProvider readyProvider; 34 | 35 | @Before 36 | public void setUp() { 37 | this.vertx = Vertx.vertx(); 38 | redisAPI = Mockito.mock(RedisAPI.class); 39 | readyProvider = new DefaultRedisReadyProvider(vertx, 1000); 40 | } 41 | 42 | private void assertReadiness(TestContext testContext, AsyncResult event, Boolean expectedReadiness) { 43 | testContext.assertTrue(event.succeeded()); 44 | testContext.assertEquals(expectedReadiness, event.result()); 45 | } 46 | 47 | @Test 48 | public void testRedisReady(TestContext testContext) { 49 | Async async = testContext.async(); 50 | Mockito.when(redisAPI.info(any())).thenReturn(Future.succeededFuture(BulkType.create(Buffer.buffer(REDIS_INFO_NOT_LOADING), false))); 51 | 52 | readyProvider.ready(redisAPI).onComplete(event -> { 53 | assertReadiness(testContext, event, true); 54 | async.complete(); 55 | }); 56 | } 57 | 58 | @Test 59 | public void testRedisReadyMultipleCalls(TestContext testContext) { 60 | Async async = testContext.async(); 61 | Mockito.when(redisAPI.info(any())).thenReturn(Future.succeededFuture(BulkType.create(Buffer.buffer(REDIS_INFO_NOT_LOADING), false))); 62 | 63 | readyProvider.ready(redisAPI).onComplete(event -> { 64 | assertReadiness(testContext, event, true); 65 | readyProvider.ready(redisAPI).onComplete(event2 -> { 66 | assertReadiness(testContext, event2, true); 67 | async.complete(); 68 | }); 69 | }); 70 | 71 | verify(redisAPI, times(1)).info(any()); 72 | } 73 | 74 | @Test 75 | public void testRedisNotReady(TestContext testContext) { 76 | Async async = testContext.async(); 77 | Mockito.when(redisAPI.info(any())).thenReturn(Future.succeededFuture(BulkType.create(Buffer.buffer(REDIS_INFO_LOADING), false))); 78 | 79 | readyProvider.ready(redisAPI).onComplete(event -> { 80 | assertReadiness(testContext, event, false); 81 | async.complete(); 82 | }); 83 | } 84 | 85 | @Test 86 | public void testRedisNotReadyInvalidInfoResponse(TestContext testContext) { 87 | Async async = testContext.async(); 88 | Mockito.when(redisAPI.info(any())).thenReturn(Future.succeededFuture(BulkType.create(Buffer.buffer("some invalid info response"), false))); 89 | 90 | readyProvider.ready(redisAPI).onComplete(event -> { 91 | assertReadiness(testContext, event, false); 92 | async.complete(); 93 | }); 94 | } 95 | 96 | @Test 97 | public void testRedisNotReadyExceptionWhenAccessingRedisAPI(TestContext testContext) { 98 | Async async = testContext.async(); 99 | Mockito.when(redisAPI.info(any())).thenReturn(Future.failedFuture("Boooom")); 100 | 101 | readyProvider.ready(redisAPI).onComplete(event -> { 102 | assertReadiness(testContext, event, false); 103 | async.complete(); 104 | }); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/redis/RedisMonitorTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import io.vertx.core.Future; 4 | import io.vertx.core.Vertx; 5 | import io.vertx.core.buffer.Buffer; 6 | import io.vertx.ext.unit.TestContext; 7 | import io.vertx.ext.unit.junit.VertxUnitRunner; 8 | import io.vertx.redis.client.RedisAPI; 9 | import io.vertx.redis.client.impl.types.BulkType; 10 | import org.junit.Before; 11 | import org.junit.Test; 12 | import org.junit.runner.RunWith; 13 | import org.mockito.ArgumentCaptor; 14 | import org.mockito.Mockito; 15 | import org.swisspush.reststorage.util.ResourcesUtils; 16 | 17 | import java.util.List; 18 | 19 | import static org.mockito.Mockito.*; 20 | 21 | /** 22 | * Tests for the {@link RedisMonitor} class 23 | * 24 | * @author https://github.com/mcweba [Marc-Andre Weber] 25 | */ 26 | @RunWith(VertxUnitRunner.class) 27 | public class RedisMonitorTest { 28 | 29 | private Vertx vertx; 30 | private RedisAPI redisAPI; 31 | private RedisProvider redisProvider; 32 | private RedisMonitor redisMonitor; 33 | private RedisMetricsPublisher publisher; 34 | 35 | private final String REDIS_INFO = ResourcesUtils.loadResource("redis_info_output", true); 36 | 37 | @Before 38 | public void setUp() { 39 | vertx = Vertx.vertx(); 40 | 41 | redisAPI = Mockito.mock(RedisAPI.class); 42 | redisProvider = Mockito.mock(RedisProvider.class); 43 | when(redisProvider.redis()).thenReturn(Future.succeededFuture(redisAPI)); 44 | 45 | publisher = Mockito.mock(RedisMetricsPublisher.class); 46 | redisMonitor = new RedisMonitor(vertx, redisProvider, "main", 1, publisher); 47 | } 48 | 49 | @Test 50 | public void testRedisInfoParsing(TestContext testContext) { 51 | redisMonitor.start(); 52 | Mockito.when(redisAPI.info(any())).thenReturn(Future.succeededFuture(BulkType.create(Buffer.buffer(REDIS_INFO), false))); 53 | 54 | ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(String.class); 55 | ArgumentCaptor valueCaptor = ArgumentCaptor.forClass(Long.class); 56 | 57 | verify(publisher, timeout(1200).times(59)).publishMetric(keyCaptor.capture(), valueCaptor.capture()); 58 | 59 | List keys = keyCaptor.getAllValues(); 60 | testContext.assertTrue(keys.containsAll(List.of("redis_git_sha1", "redis_git_dirty", 61 | "arch_bits", "process_id", "tcp_port", "uptime_in_seconds", "uptime_in_days", "hz", "lru_clock", 62 | "connected_clients", "client_longest_output_list", "client_biggest_input_buf", "blocked_clients", 63 | "used_memory", "used_memory_rss", "used_memory_peak", "used_memory_lua", "mem_fragmentation_ratio", "loading", 64 | "rdb_changes_since_last_save", "rdb_bgsave_in_progress", "rdb_last_save_time", "rdb_last_bgsave_time_sec", 65 | "rdb_current_bgsave_time_sec", "aof_enabled", "aof_rewrite_in_progress", "aof_rewrite_scheduled", 66 | "aof_last_rewrite_time_sec", "aof_current_rewrite_time_sec", "total_connections_received", "total_commands_processed", 67 | "instantaneous_ops_per_sec", "rejected_connections", "sync_full", "sync_partial_ok", "sync_partial_err", 68 | "expired_keys", "evicted_keys", "keyspace_hits", "keyspace_misses", "pubsub_channels", "pubsub_patterns", 69 | "latest_fork_usec", "connected_slaves", "master_repl_offset", "repl_backlog_active", "repl_backlog_size", 70 | "repl_backlog_first_byte_offset", "repl_backlog_histlen", "used_cpu_sys", "used_cpu_user", 71 | "used_cpu_sys_children", "used_cpu_user_children", "keyspace.db0.keys", "keyspace.db0.expires", "keyspace.db0.avg_ttl", 72 | "keyspace.db1.keys", "keyspace.db1.expires", "keyspace.db1.avg_ttl"))); 73 | 74 | // assert non numeric entries not published 75 | testContext.assertFalse(keys.containsAll(List.of("redis_version", "redis_build_id", "redis_mode", "os", 76 | "multiplexing_api", "gcc_version", "run_id", "executable", "config_file", "used_memory_human", 77 | "used_memory_peak_human", "mem_allocator", "rdb_last_bgsave_status", "aof_last_bgrewrite_status", 78 | "aof_last_write_status", "role"))); 79 | 80 | // assert some key value pairs 81 | List allValues = valueCaptor.getAllValues(); 82 | testContext.assertEquals(170L, allValues.get(keys.indexOf("connected_clients"))); 83 | testContext.assertEquals(1L, allValues.get(keys.indexOf("mem_fragmentation_ratio"))); 84 | testContext.assertEquals(423L, allValues.get(keys.indexOf("total_commands_processed"))); 85 | testContext.assertEquals(1048576L, allValues.get(keys.indexOf("repl_backlog_size"))); 86 | testContext.assertEquals(-1L, allValues.get(keys.indexOf("aof_last_rewrite_time_sec"))); 87 | testContext.assertEquals(36440L, allValues.get(keys.indexOf("used_cpu_sys"))); 88 | testContext.assertEquals(0L, allValues.get(keys.indexOf("used_cpu_sys_children"))); 89 | testContext.assertEquals(2L, allValues.get(keys.indexOf("keyspace.db0.keys"))); 90 | testContext.assertEquals(1L, allValues.get(keys.indexOf("keyspace.db0.expires"))); 91 | testContext.assertEquals(10235L, allValues.get(keys.indexOf("keyspace.db0.avg_ttl"))); 92 | 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/redis/RedisStorageIntegrationTestCase.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.redis; 2 | 3 | import io.restassured.RestAssured; 4 | import io.restassured.parsing.Parser; 5 | import io.vertx.core.DeploymentOptions; 6 | import io.vertx.core.Vertx; 7 | import io.vertx.ext.unit.TestContext; 8 | import io.vertx.ext.unit.junit.VertxUnitRunner; 9 | import org.junit.After; 10 | import org.junit.Before; 11 | import org.junit.runner.RunWith; 12 | import org.swisspush.reststorage.ConfigurableTestCase; 13 | import org.swisspush.reststorage.JedisFactory; 14 | import org.swisspush.reststorage.RestStorageMod; 15 | import org.swisspush.reststorage.util.ModuleConfiguration; 16 | import redis.clients.jedis.Jedis; 17 | 18 | @RunWith(VertxUnitRunner.class) 19 | public abstract class RedisStorageIntegrationTestCase extends ConfigurableTestCase { 20 | 21 | Jedis jedis = null; 22 | 23 | @Before 24 | public void setUp(TestContext context) { 25 | vertx = Vertx.vertx(); 26 | jedis = JedisFactory.createJedis(); 27 | 28 | // RestAssured Configuration 29 | RestAssured.port = REST_STORAGE_PORT; 30 | RestAssured.requestSpecification = REQUEST_SPECIFICATION; 31 | RestAssured.registerParser("application/json; charset=utf-8", Parser.JSON); 32 | RestAssured.defaultParser = Parser.JSON; 33 | 34 | ModuleConfiguration modConfig = new ModuleConfiguration() 35 | .storageType(ModuleConfiguration.StorageType.redis) 36 | .confirmCollectionDelete(true) 37 | .maxStorageExpandSubresources(5) 38 | .storageAddress("rest-storage"); 39 | 40 | updateModuleConfiguration(modConfig); 41 | 42 | RestStorageMod restStorageMod = new RestStorageMod(); 43 | vertx.deployVerticle(restStorageMod, new DeploymentOptions().setConfig(modConfig.asJsonObject()), context.asyncAssertSuccess(stringAsyncResult1 -> { 44 | // standard code: will called @Before every test 45 | RestAssured.basePath = ""; 46 | })); 47 | } 48 | 49 | /** 50 | * chance for specific unit test classes to change config here 51 | */ 52 | protected void updateModuleConfiguration(ModuleConfiguration modConfig) { 53 | } 54 | 55 | @After 56 | public void tearDown(TestContext context) { 57 | jedis.flushAll(); 58 | jedis.close(); 59 | vertx.close(context.asyncAssertSuccess()); 60 | } 61 | 62 | protected void assertExpirableSetCount(TestContext testContext, Long count){ 63 | testContext.assertEquals(count, jedis.zcount("rest-storage:expirable", 0d, Double.MAX_VALUE)); 64 | } 65 | } -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/redis/ResourceCompressionIntegrationTest.java: -------------------------------------------------------------------------------- 1 | 2 | package org.swisspush.reststorage.redis; 3 | 4 | import io.restassured.specification.RequestSpecification; 5 | import io.vertx.ext.unit.Async; 6 | import io.vertx.ext.unit.TestContext; 7 | import io.vertx.ext.unit.junit.VertxUnitRunner; 8 | import org.junit.Test; 9 | import org.junit.runner.RunWith; 10 | 11 | import static io.restassured.RestAssured.*; 12 | import static org.hamcrest.CoreMatchers.containsString; 13 | import static org.hamcrest.CoreMatchers.equalTo; 14 | import static org.swisspush.reststorage.util.HttpRequestHeader.COMPRESS_HEADER; 15 | import static org.swisspush.reststorage.util.HttpRequestHeader.IF_NONE_MATCH_HEADER; 16 | 17 | @RunWith(VertxUnitRunner.class) 18 | public class ResourceCompressionIntegrationTest extends RedisStorageIntegrationTestCase { 19 | 20 | @Test 21 | public void testPutGetWithCompression(TestContext context) { 22 | Async async = context.async(); 23 | putResource("{ \"foo\": \"bar\" }", true, 200); 24 | getResource("res", 200, "foo", "bar"); 25 | async.complete(); 26 | } 27 | 28 | /** 29 | * The resource has always to be overwritten, if the compression state differs between the sent resource 30 | * and the stored resource. This behaviour is, to prevent unexpected behaviour considering the etag mechanism. 31 | */ 32 | @Test 33 | public void testStoreCompressedAndUnCompressed() { 34 | 35 | String originalContent = "{\"content\": \"originalContent\"}"; 36 | String modifiedContent = "{\"content\": \"modifiedContent\"}"; 37 | 38 | // Scenario 1: PUT 1 = {uncompressed, etag1}, PUT 2 = {compressed, etag1} 39 | putResource(originalContent, false, 200); 40 | putResource(modifiedContent, true, 200); 41 | getResource("res", 200, "content", "modifiedContent"); 42 | 43 | // Scenario 2: PUT 1 = {compressed, etag1}, PUT 2 = {uncompressed, etag1} 44 | jedis.flushAll(); 45 | putResource(originalContent, true, 200); 46 | putResource(modifiedContent, false, 200); 47 | getResource("res", 200, "content", "modifiedContent"); 48 | 49 | // Scenario 3: PUT 1 = {compressed, etag1}, PUT 2 = {compressed, etag1} 50 | jedis.flushAll(); 51 | putResource(originalContent, true, 200); 52 | putResource(modifiedContent, true, 304); 53 | getResource("res", 200, "content", "originalContent"); 54 | 55 | // Scenario 4: PUT 1 = {uncompressed, etag1}, PUT 2 = {uncompressed, etag1} 56 | jedis.flushAll(); 57 | putResource(originalContent, false, 200); 58 | putResource(modifiedContent, false, 304); 59 | getResource("res", 200, "content", "originalContent"); 60 | } 61 | 62 | @Test 63 | public void testCompressAndMerge(TestContext context) { 64 | Async async = context.async(); 65 | with() 66 | .header(COMPRESS_HEADER.getName(), "true") 67 | .param("merge", "true") 68 | .body("{ \"foo\": \"bar2\" }") 69 | .put("res") 70 | .then().assertThat() 71 | .statusCode(400) 72 | .body(containsString("Invalid parameter/header combination: merge parameter and " + COMPRESS_HEADER.getName() + " header cannot be used concurrently")); 73 | async.complete(); 74 | } 75 | 76 | @Test 77 | public void testGetFailHandlingForCorruptCompressedData(TestContext context) { 78 | Async async = context.async(); 79 | putResource("{ \"foo\": \"bar\" }", true, 200); 80 | 81 | // cripple compressed data to make it impossible to decompress 82 | jedis.hset("rest-storage:resources:res", "resource", "xxx"); 83 | 84 | when() 85 | .get("res") 86 | .then().assertThat() 87 | .statusCode(500) 88 | .body(containsString("Error during decompression of resource: Not in GZIP format")); 89 | 90 | async.complete(); 91 | } 92 | 93 | private void putResource(String body, boolean storeCompressed, int statusCode){ 94 | RequestSpecification spec = given().header(IF_NONE_MATCH_HEADER.getName(), "etag1"); 95 | if(storeCompressed){ 96 | spec = spec.header(COMPRESS_HEADER.getName(), "true"); 97 | } 98 | spec.body(body).put("res").then().assertThat().statusCode(statusCode); 99 | } 100 | 101 | private void getResource(String path, int statusCode, String bodyProperty, String equalToValue){ 102 | when() 103 | .get(path) 104 | .then().assertThat() 105 | .statusCode(statusCode) 106 | .body(bodyProperty, equalTo(equalToValue)); 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/util/GZIPUtilTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import io.vertx.core.Vertx; 4 | import org.slf4j.Logger; 5 | import io.vertx.ext.unit.Async; 6 | import io.vertx.ext.unit.TestContext; 7 | import io.vertx.ext.unit.junit.VertxUnitRunner; 8 | import org.apache.commons.io.IOUtils; 9 | import org.junit.Test; 10 | import org.junit.runner.RunWith; 11 | import org.mockito.Mockito; 12 | 13 | import java.io.*; 14 | import java.nio.charset.StandardCharsets; 15 | import java.util.Arrays; 16 | import java.util.zip.GZIPInputStream; 17 | 18 | /** 19 | * Tests for {@link GZIPUtil} class. 20 | * 21 | * @author https://github.com/mcweba [Marc-Andre Weber] 22 | */ 23 | @RunWith(VertxUnitRunner.class) 24 | public class GZIPUtilTest { 25 | 26 | @Test 27 | public void testCompressResource(TestContext testContext) { 28 | Async async = testContext.async(); 29 | String uncompressedString = "My uncompresed Resource"; 30 | byte[] uncompressed = uncompressedString.getBytes(StandardCharsets.UTF_8); 31 | GZIPUtil.compressResource(Vertx.vertx(), Mockito.mock(Logger.class), uncompressed, compressResourceResult -> { 32 | testContext.assertTrue(compressResourceResult.succeeded()); 33 | testContext.assertNotEquals(uncompressed, compressResourceResult.result(), "Compressed and uncompressed Resource should not be equal"); 34 | 35 | try { 36 | InputStream input = new ByteArrayInputStream(compressResourceResult.result()); 37 | GZIPInputStream gzipInputStream = new GZIPInputStream(input); 38 | 39 | byte[] buffer = new byte[1024]; 40 | ByteArrayOutputStream out = new ByteArrayOutputStream(); 41 | 42 | int len; 43 | while ((len = gzipInputStream.read(buffer)) > 0) { 44 | out.write(buffer, 0, len); 45 | } 46 | 47 | gzipInputStream.close(); 48 | out.close(); 49 | byte[] decompressed = out.toByteArray(); 50 | 51 | testContext.assertTrue(Arrays.equals(uncompressed, decompressed), "Compressed and decompressed Resource should be equal"); 52 | testContext.assertEquals(uncompressedString, new String(decompressed, StandardCharsets.UTF_8)); 53 | } catch (Exception e) { 54 | testContext.fail(e); 55 | } 56 | async.complete(); 57 | }); 58 | } 59 | 60 | @Test 61 | public void testDecompressResource(TestContext testContext) throws Exception { 62 | Async async = testContext.async(); 63 | byte[] compressedData = IOUtils.toByteArray(this.getClass().getClassLoader().getResourceAsStream("testResource.gz")); 64 | GZIPUtil.decompressResource(Vertx.vertx(), Mockito.mock(Logger.class), compressedData, decompressResourceResult -> { 65 | testContext.assertEquals("This is an uncompressed content from a gzip file", new String(decompressResourceResult.result(), StandardCharsets.UTF_8)); 66 | async.complete(); 67 | }); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/util/HttpRequestParamTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import io.vertx.core.MultiMap; 4 | import io.vertx.core.http.impl.headers.HeadersMultiMap; 5 | import io.vertx.ext.unit.TestContext; 6 | import io.vertx.ext.unit.junit.VertxUnitRunner; 7 | import org.junit.Before; 8 | import org.junit.Test; 9 | import org.junit.runner.RunWith; 10 | 11 | import static org.swisspush.reststorage.util.HttpRequestParam.*; 12 | 13 | /** 14 | *

15 | * Tests for the {@link HttpRequestParam} class 16 | *

17 | * 18 | * @author https://github.com/mcweba [Marc-Andre Weber] 19 | */ 20 | @RunWith(VertxUnitRunner.class) 21 | public class HttpRequestParamTest { 22 | 23 | MultiMap params; 24 | 25 | @Before 26 | public void setUp() { 27 | params = new HeadersMultiMap(); 28 | } 29 | 30 | @Test 31 | public void testContainsHeader(TestContext context){ 32 | context.assertFalse(HttpRequestParam.containsParam(null, STORAGE_EXPAND_PARAMETER)); 33 | context.assertFalse(HttpRequestParam.containsParam(params, STORAGE_EXPAND_PARAMETER)); 34 | 35 | params.set("storageExpand", "true"); 36 | context.assertTrue(HttpRequestParam.containsParam(params, STORAGE_EXPAND_PARAMETER)); 37 | 38 | params.clear(); 39 | params.set("storageexpand", "true"); 40 | context.assertTrue(HttpRequestParam.containsParam(params, STORAGE_EXPAND_PARAMETER)); 41 | 42 | params.clear(); 43 | params.set("STORAGEEXPAND", "true"); 44 | context.assertTrue(HttpRequestParam.containsParam(params, STORAGE_EXPAND_PARAMETER)); 45 | 46 | params.clear(); 47 | params.set("xstorageExpand", "true"); 48 | context.assertFalse(HttpRequestParam.containsParam(params, STORAGE_EXPAND_PARAMETER)); 49 | } 50 | 51 | @Test 52 | public void testGetString(TestContext context){ 53 | params.set(LIMIT_PARAMETER.getName(), "99"); 54 | context.assertEquals("99", getString(params, LIMIT_PARAMETER)); 55 | 56 | params.set(LIMIT_PARAMETER.getName(), "444"); 57 | context.assertEquals("444", getString(params, LIMIT_PARAMETER)); 58 | 59 | params.set(LIMIT_PARAMETER.getName(), "0"); 60 | context.assertEquals("0", getString(params, LIMIT_PARAMETER)); 61 | 62 | params.set(LIMIT_PARAMETER.getName(), "9999999999999999999"); 63 | context.assertEquals("9999999999999999999", getString(params, LIMIT_PARAMETER)); 64 | 65 | params.set(LIMIT_PARAMETER.getName(), ""); 66 | context.assertEquals("", getString(params, LIMIT_PARAMETER)); 67 | 68 | params.set(LIMIT_PARAMETER.getName(), "xyz"); 69 | context.assertEquals("xyz", getString(params, LIMIT_PARAMETER)); 70 | 71 | params.clear(); 72 | context.assertNull(getString(params, LIMIT_PARAMETER)); 73 | 74 | context.assertNull(getString(null, LIMIT_PARAMETER)); 75 | } 76 | 77 | @Test 78 | public void testGetBoolean(TestContext context){ 79 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "true"); 80 | context.assertTrue(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 81 | 82 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "TRUE"); 83 | context.assertTrue(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 84 | 85 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "trUe"); 86 | context.assertTrue(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 87 | 88 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "false"); 89 | context.assertFalse(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 90 | 91 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "yes"); 92 | context.assertFalse(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 93 | 94 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "124"); 95 | context.assertFalse(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 96 | 97 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "foo"); 98 | context.assertFalse(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 99 | 100 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "1"); 101 | context.assertFalse(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 102 | 103 | params.set(STORAGE_EXPAND_PARAMETER.getName(), "0"); 104 | context.assertFalse(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 105 | 106 | params.set(STORAGE_EXPAND_PARAMETER.getName(), ""); 107 | context.assertFalse(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 108 | 109 | params.clear(); 110 | context.assertFalse(getBoolean(params, STORAGE_EXPAND_PARAMETER)); 111 | 112 | context.assertFalse(getBoolean(null, STORAGE_EXPAND_PARAMETER)); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/test/java/org/swisspush/reststorage/util/ResourceNameUtilTest.java: -------------------------------------------------------------------------------- 1 | package org.swisspush.reststorage.util; 2 | 3 | import io.vertx.ext.unit.TestContext; 4 | import io.vertx.ext.unit.junit.VertxUnitRunner; 5 | import org.junit.Test; 6 | import org.junit.runner.RunWith; 7 | 8 | import java.util.Arrays; 9 | import java.util.List; 10 | 11 | /** 12 | * Tests for {@link ResourceNameUtil} class. 13 | * 14 | * @author https://github.com/mcweba [Marc-Andre Weber] 15 | */ 16 | @RunWith(VertxUnitRunner.class) 17 | public class ResourceNameUtilTest { 18 | 19 | @Test 20 | public void testReplaceColonsAndSemiColons(TestContext testContext) { 21 | testContext.assertEquals(null, ResourceNameUtil.replaceColonsAndSemiColons(null)); 22 | testContext.assertEquals("", ResourceNameUtil.replaceColonsAndSemiColons("")); 23 | testContext.assertEquals("StringWithoutColonsAndSemiColons", ResourceNameUtil.replaceColonsAndSemiColons("StringWithoutColonsAndSemiColons")); 24 | testContext.assertEquals("1_hello-@$&()*+,=-._~!'", ResourceNameUtil.replaceColonsAndSemiColons("1_hello-@$&()*+,=-._~!'")); 25 | testContext.assertEquals("1_hello_§_°_123", ResourceNameUtil.replaceColonsAndSemiColons("1_hello_:_;_123")); 26 | testContext.assertEquals("§§§°°°", ResourceNameUtil.replaceColonsAndSemiColons(":::;;;")); 27 | } 28 | 29 | @Test 30 | public void testReplaceColonsAndSemiColonsInList(TestContext testContext) { 31 | 32 | List resources = Arrays.asList("res_1", "res_:_;_2", "", ":::;;;"); 33 | 34 | ResourceNameUtil.replaceColonsAndSemiColonsInList(resources); 35 | 36 | testContext.assertEquals("res_1", resources.get(0)); 37 | testContext.assertEquals("res_§_°_2", resources.get(1)); 38 | testContext.assertEquals("", resources.get(2)); 39 | testContext.assertEquals("§§§°°°", resources.get(3)); 40 | } 41 | 42 | @Test 43 | public void testResetReplacedColonsAndSemiColons(TestContext testContext) { 44 | testContext.assertEquals(null, ResourceNameUtil.resetReplacedColonsAndSemiColons(null)); 45 | testContext.assertEquals("", ResourceNameUtil.resetReplacedColonsAndSemiColons("")); 46 | testContext.assertEquals("StringWithoutColonsAndSemiColons", ResourceNameUtil.resetReplacedColonsAndSemiColons("StringWithoutColonsAndSemiColons")); 47 | testContext.assertEquals("1_hello-@$&()*+,=-._~!'", ResourceNameUtil.resetReplacedColonsAndSemiColons("1_hello-@$&()*+,=-._~!'")); 48 | testContext.assertEquals("1_hello_:_;_123", ResourceNameUtil.resetReplacedColonsAndSemiColons("1_hello_§_°_123")); 49 | testContext.assertEquals(":::;;;", ResourceNameUtil.resetReplacedColonsAndSemiColons("§§§°°°")); 50 | } 51 | 52 | @Test 53 | public void testResetReplacedColonsAndSemiColonsInList(TestContext testContext) { 54 | 55 | List resources = Arrays.asList("res_1", "res_§_°_2", "", "§§§°°°"); 56 | 57 | ResourceNameUtil.resetReplacedColonsAndSemiColonsInList(resources); 58 | 59 | testContext.assertEquals("res_1", resources.get(0)); 60 | testContext.assertEquals("res_:_;_2", resources.get(1)); 61 | testContext.assertEquals("", resources.get(2)); 62 | testContext.assertEquals(":::;;;", resources.get(3)); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/test/resources/logging.properties: -------------------------------------------------------------------------------- 1 | #test log 2 | .level=SEVERE 3 | handlers= java.util.logging.ConsoleHandler 4 | java.util.logging.ConsoleHandler.level=ALL -------------------------------------------------------------------------------- /src/test/resources/redis_info_output: -------------------------------------------------------------------------------- 1 | # Server 2 | redis_version:2.8.7 3 | redis_git_sha1:00000000 4 | redis_git_dirty:0 5 | redis_build_id:f37a784f559f7f7c 6 | redis_mode:standalone 7 | os:CYGWIN_NT-10.0 1.7.28(0.271/5/3) x86_64 8 | arch_bits:64 9 | multiplexing_api:select 10 | gcc_version:4.8.2 11 | process_id:23796 12 | run_id:931e4e7a92fdb00664f19ea0485a9446d425efcc 13 | tcp_port:6379 14 | uptime_in_seconds:796 15 | uptime_in_days:0 16 | hz:10 17 | lru_clock:1234155 18 | executable:C:\work\tools\redis 3.2.1\redis-server.exe 19 | config_file:/redis/redis.conf 20 | 21 | # Clients 22 | connected_clients:170 23 | client_longest_output_list:0 24 | client_biggest_input_buf:0 25 | blocked_clients:0 26 | 27 | # Memory 28 | used_memory:5078712 29 | used_memory_human:4.84M 30 | used_memory_rss:5078712 31 | used_memory_peak:5142312 32 | used_memory_peak_human:4.90M 33 | used_memory_lua:79872 34 | mem_fragmentation_ratio:1.00 35 | mem_allocator:libc 36 | 37 | # Persistence 38 | loading:0 39 | rdb_changes_since_last_save:40 40 | rdb_bgsave_in_progress:0 41 | rdb_last_save_time:1648119323 42 | rdb_last_bgsave_status:ok 43 | rdb_last_bgsave_time_sec:-1 44 | rdb_current_bgsave_time_sec:-1 45 | aof_enabled:0 46 | aof_rewrite_in_progress:0 47 | aof_rewrite_scheduled:0 48 | aof_last_rewrite_time_sec:-1 49 | aof_current_rewrite_time_sec:-1 50 | aof_last_bgrewrite_status:ok 51 | aof_last_write_status:ok 52 | 53 | # Stats 54 | total_connections_received:170 55 | total_commands_processed:423 56 | instantaneous_ops_per_sec:0 57 | rejected_connections:0 58 | sync_full:0 59 | sync_partial_ok:0 60 | sync_partial_err:0 61 | expired_keys:39 62 | evicted_keys:0 63 | keyspace_hits:0 64 | keyspace_misses:52 65 | pubsub_channels:0 66 | pubsub_patterns:0 67 | latest_fork_usec:0 68 | 69 | # Replication 70 | role:master 71 | connected_slaves:0 72 | master_repl_offset:0 73 | repl_backlog_active:0 74 | repl_backlog_size:1048576 75 | repl_backlog_first_byte_offset:0 76 | repl_backlog_histlen:0 77 | 78 | # CPU 79 | used_cpu_sys:36.44 80 | used_cpu_user:39.33 81 | used_cpu_sys_children:0.00 82 | used_cpu_user_children:0.00 83 | 84 | # Keyspace 85 | db0:keys=2,expires=1,avg_ttl=10235 86 | db1:keys=44,expires=232,avg_ttl=111111 87 | dbX:99 88 | -------------------------------------------------------------------------------- /src/test/resources/redis_info_persistance_loading_0: -------------------------------------------------------------------------------- 1 | # Persistence 2 | loading:0 3 | async_loading:0 4 | current_cow_peak:0 5 | current_cow_size:0 6 | current_cow_size_age:0 7 | current_fork_perc:0.00 8 | current_save_keys_processed:0 9 | current_save_keys_total:0 10 | rdb_changes_since_last_save:108 11 | rdb_bgsave_in_progress:0 12 | rdb_last_save_time:1718713249 13 | rdb_last_bgsave_status:ok 14 | rdb_last_bgsave_time_sec:0 15 | rdb_current_bgsave_time_sec:-1 16 | rdb_saves:296 17 | rdb_last_cow_size:0 18 | rdb_last_load_keys_expired:0 19 | rdb_last_load_keys_loaded:0 20 | aof_enabled:0 21 | aof_rewrite_in_progress:0 22 | aof_rewrite_scheduled:0 23 | aof_last_rewrite_time_sec:-1 24 | aof_current_rewrite_time_sec:-1 25 | aof_last_bgrewrite_status:ok 26 | aof_rewrites:0 27 | aof_rewrites_consecutive_failures:0 28 | aof_last_write_status:ok 29 | aof_last_cow_size:0 30 | module_fork_in_progress:0 31 | module_fork_last_cow_size:0 32 | -------------------------------------------------------------------------------- /src/test/resources/redis_info_persistance_loading_1: -------------------------------------------------------------------------------- 1 | # Persistence 2 | loading:1 3 | async_loading:0 4 | current_cow_peak:0 5 | current_cow_size:0 6 | current_cow_size_age:0 7 | current_fork_perc:0.00 8 | current_save_keys_processed:0 9 | current_save_keys_total:0 10 | rdb_changes_since_last_save:108 11 | rdb_bgsave_in_progress:0 12 | rdb_last_save_time:1718713249 13 | rdb_last_bgsave_status:ok 14 | rdb_last_bgsave_time_sec:0 15 | rdb_current_bgsave_time_sec:-1 16 | rdb_saves:296 17 | rdb_last_cow_size:0 18 | rdb_last_load_keys_expired:0 19 | rdb_last_load_keys_loaded:0 20 | aof_enabled:0 21 | aof_rewrite_in_progress:0 22 | aof_rewrite_scheduled:0 23 | aof_last_rewrite_time_sec:-1 24 | aof_current_rewrite_time_sec:-1 25 | aof_last_bgrewrite_status:ok 26 | aof_rewrites:0 27 | aof_rewrites_consecutive_failures:0 28 | aof_last_write_status:ok 29 | aof_last_cow_size:0 30 | module_fork_in_progress:0 31 | module_fork_last_cow_size:0 32 | -------------------------------------------------------------------------------- /src/test/resources/testResource.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swisspost/vertx-rest-storage/dc7248148201fc33a033be7a304af546de88f3d1/src/test/resources/testResource.gz --------------------------------------------------------------------------------