├── .circleci
└── config.yml
├── .github
└── CODEOWNERS
├── .gitignore
├── CHANGES.adoc
├── ORIGINATOR
├── README.md
├── docs
└── cybersyn.jpg
├── jmh
├── .gitignore
├── README.md
├── pom.xml
└── src
│ └── main
│ └── java
│ └── io
│ └── aleph
│ └── dirigiste
│ └── Benchmarks.java
├── project.clj
├── src
└── io
│ └── aleph
│ └── dirigiste
│ ├── Executor.java
│ ├── Executors.java
│ ├── IPool.java
│ ├── Pool.java
│ ├── Pools.java
│ └── Stats.java
└── test
├── clojure
└── dirigiste
│ ├── executor_test.clj
│ └── pool_test.clj
└── java
└── io
└── aleph
└── dirigiste
├── ExecutorTest.java
├── PoolTest.java
└── StatsTest.java
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | # Use the latest 2.1 version of CircleCI pipeline process engine.
2 | # See: https://circleci.com/docs/2.0/configuration-reference
3 | # For a detailed guide to building and testing with clojure, read the docs:
4 | # https://circleci.com/docs/2.0/language-clojure/ for more details
5 | version: 2.1
6 |
7 | # Define a job to be invoked later in a workflow.
8 | # See: https://circleci.com/docs/2.0/configuration-reference/#jobs
9 | jobs:
10 | build:
11 | # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
12 | # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
13 | docker:
14 | # specify the version you desire here
15 | - image: circleci/clojure:lein-2.9.5
16 |
17 | # Specify service dependencies here if necessary
18 | # CircleCI maintains a library of pre-built images
19 | # documented at https://circleci.com/docs/2.0/circleci-images/
20 | # - image: circleci/postgres:9.4
21 |
22 | working_directory: ~/repo
23 |
24 | environment:
25 | LEIN_ROOT: "true"
26 | # Customize the JVM maximum heap limit
27 | JVM_OPTS: -Xmx3200m
28 |
29 | # Add steps to the job
30 | # See: https://circleci.com/docs/2.0/configuration-reference/#steps
31 | steps:
32 | - checkout
33 |
34 | # Download and cache dependencies
35 | - restore_cache:
36 | keys:
37 | - v1-dependencies-{{ checksum "project.clj" }}
38 | # fallback to using the latest cache if no exact match is found
39 | - v1-dependencies-
40 |
41 | - run: lein deps
42 |
43 | - save_cache:
44 | paths:
45 | - ~/.m2
46 | key: v1-dependencies-{{ checksum "project.clj" }}
47 |
48 | # run clj tests!
49 | - run: lein test
50 |
51 | # run java tests!
52 | - run: lein junit
53 |
54 | # Invoke jobs via workflows
55 | # See: https://circleci.com/docs/2.0/configuration-reference/#workflows
56 | workflows:
57 | sample: # This is the name of the workflow, feel free to change it to better match your workflow.
58 | # Inside the workflow, you define the jobs you want to run.
59 | jobs:
60 | - build
61 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @KingMob
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /target
2 | /classes
3 | /checkouts
4 | pom.xml
5 | pom.xml.asc
6 | *.jar
7 | *.class
8 | /.lein-*
9 | /.nrepl-port
10 | /.idea
11 | /*.iml
12 | /doc
13 | push
14 | .DS_Store
15 | .java-version
16 | .clj-kondo/.cache/
17 | .lsp/.cache
18 | .portal/vs-code.edn
19 |
--------------------------------------------------------------------------------
/CHANGES.adoc:
--------------------------------------------------------------------------------
1 | == 1.0.4
2 |
3 | * https://github.com/clj-commons/dirigiste/pull/40[#40] Protect workers from
4 | shutting down from racy interrupts
5 | * https://github.com/clj-commons/dirigiste/pull/34[#34] Allow pool size
6 | adjustment by 1
7 | * Config fix for lein-junit so Java tests will run, but not
8 | get added to the jar file
9 | * Updated CircleCI config to run Java tests
10 |
11 | Contributions by Timothy Dean, Matthew Davidson, and Oleksandr Yakushev
12 |
13 | == 1.0.3
14 |
15 | * Fixed bug ensuring queues are properly removed
16 | * Fixed bug in `awaitTermination` return value
17 | * Fixed pool race condition
18 | * Added JMH benchmarks
19 | * Simplified locking strategy
20 | * Fixed Javadoc link
21 |
22 | Contributions by Arnaud Geiser
23 |
24 | == 1.0.2
25 |
26 | * Switch to ThreadLocalRandom
27 | * Switch to `Collections.addAll()` in `cleanup`
28 | * Call `getQueueLength` only once in control loop
29 |
30 | Contributions by Reynald Borer
31 |
32 | == 1.0.1
33 |
34 | * Add CircleCI support
35 | * Convert to clj-commons group
36 | * Mark `_isShutdown` as volatile
37 |
38 | Contributions by vemv, Erik Assum, and Matthew Davidson
39 |
40 | == <1.0.0
41 |
42 | Built by Zach Tellman
43 |
--------------------------------------------------------------------------------
/ORIGINATOR:
--------------------------------------------------------------------------------
1 | @ztellman
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://clojars.org/org.clj-commons/dirigiste)
2 | [](https://circleci.com/gh/clj-commons/dirigiste)
3 | 
4 |
5 | (pronounced deer-eh-jeest)
6 |
7 | In the default JVM thread pools, once a thread is created it will only be retired when it hasn't performed a task in the last minute. In practice, this means that there are as many threads as the peak historical number of concurrent tasks handled by the pool, forever. These thread pools are also poorly instrumented, making it difficult to tune their latency or throughput.
8 |
9 | Dirigiste provides a fast, richly instrumented version of a `java.util.concurrent.ExecutorService`, and provides a means to feed that instrumentation into a control mechanism that can grow or shrink the pool as needed. Default implementations that optimize the pool size for thread utilization are provided.
10 |
11 | It also provides an object pool mechanism that uses a similar feedback mechanism to resize itself, and is significantly simpler than the [Apache Commons object pool implementation](http://commons.apache.org/proper/commons-pool/).
12 |
13 | Javadocs can be found [here](https://clj-commons.github.io/dirigiste/).
14 |
15 | ### usage
16 |
17 | In Leiningen:
18 |
19 | ```clojure
20 | [org.clj-commons/dirigiste "1.0.3"]
21 | ```
22 |
23 | In Maven:
24 |
25 | ```xml
26 |
27 | io.aleph
28 | dirigiste
29 | 0.1.5
30 |
31 | ```
32 |
33 | ### executors
34 |
35 | Using the default utilization executor is simple, via [`Executors.utilizationExecutor(...)`](https://clj-commons.org/dirigiste/io/aleph/dirigiste/Executors.html#utilizationExecutor\(double,int\)):
36 |
37 | ```java
38 | import io.aleph.dirigiste.Executors;
39 |
40 | ...
41 |
42 | ExecutorService e = Executors.utilizationExecutor(0.9, 64);
43 | ```
44 |
45 | This will create an executor which will try to size the pool such that 90% of the threads are active, but will not grow beyond 64 threads.
46 |
47 | This executor exposes [`getStats`](https://clj-commons.org/dirigiste/io/aleph/dirigiste/Executor.html#getStats\(\)) and [`getLastStats`](https://clj-commons.org/dirigiste/io/aleph/dirigiste/Executor.html#getLastStats\(\)) methods, which can be used to examine the performance characteristics of the executor. `getLastStats` uses the last value passed to the control loop, so can return immediately. `getStats` returns the statistics gathered since the last control update, and so may contain 0 or more samples, and requires some amount of computation.
48 |
49 | Since instrumentation will cause some small overhead, you may specify which dimensions you wish to collect, via the [`Metric`](https://clj-commons.org/dirigiste/io/aleph/dirigiste/Stats.Metric.html) class. The possible fields are as follows:
50 |
51 | | metric | description |
52 | |-------|-------------|
53 | | `QUEUE_LATENCY` | the time spent on the queue for each task, in nanoseconds |
54 | | `TASK_LATENCY` | the time taken to complete a task, including time spent on the queue, in nanoseconds |
55 | | `QUEUE_LENGTH` | the length of the queue |
56 | | `TASK_ARRIVAL_RATE` | the rate of incoming tasks per second |
57 | | `TASK_COMPLETION_RATE` | the rate of completed tasks per second |
58 | | `TASK_REJECTION_RATE` | the rate of rejected tasks per second |
59 | | `UTILIZATION` | the portion of threads which are active, from 0 to 1 |
60 |
61 | These metrics are surfaced via the [`Stats`](https://clj-commons.org/dirigiste/io/aleph/dirigiste/Stats.html) class, which provides `getMean...()` and `get...(double quantile)` for each metric. By default, the utilization executor will only measure utilization, but if we want to get the full range of metrics, we can instantiate it like this:
62 |
63 | ```java
64 | Executors.utilizationExecutor(0.9, 64, EnumSet.allOf(Stats.Metric));
65 | ```
66 |
67 | This will allow us to track metrics which aren't required for the control loop, but are useful elsewhere.
68 |
69 | ### pools
70 |
71 | All pools are defined via their generator, which is used to create and destroy the pooled objects:
72 |
73 | ```java
74 | public interface Pool.Generator {
75 | V generate(K key) throws Exception;
76 | void destroy(K key, V val);
77 | }
78 | ```
79 |
80 | All pooled objects have an associated key. If objects have no external resources that must be explicitly disposed, `destroy` can be a no-op.
81 |
82 | Object pools have three major functions, `acquire`, `release`, and `dispose`. Typically, objects will be taken out of the pool via `acquire`, and returned back via `release` once they've served their purpose:
83 |
84 | ```java
85 | pool = Pools.utilizationPool(generator, 0.9, 4, 1024);
86 | Object obj = pool.acquire("foo");
87 | useObject(obj);
88 | pool.release("foo", obj);
89 | ```
90 |
91 | However, if the object has expired, we can `dispose` of it:
92 |
93 | ```java
94 | pool.dispose("foo", obj);
95 | ```
96 |
97 | A pooled object can be disposed of at any time, without having first been acquired.
98 |
99 | To support non-blocking code, we may also acquire an object via a callback mechanism:
100 |
101 | ```java
102 | pool.acquire("foo",
103 | new AcquireCallback() {
104 | public void handleObject(Object obj) {
105 | useObject(obj);
106 | pool.release("foo", obj);
107 | }
108 | });
109 | ```
110 |
111 | ### creating a custom controller
112 |
113 | The [`Executor.Controller`](https://clj-commons.org/dirigiste/io/aleph/dirigiste/Executor.Controller.html) interface is fairly straightforward:
114 |
115 | ```java
116 | public interface Executor.Controller {
117 | boolean shouldIncrement(int currThreads);
118 | int adjustment(Stats stats);
119 | }
120 | ```
121 |
122 | The first method, `shouldIncrement`, controls whether a new thread should be spun up. This means that the thread limit can be dynamic, for instance dependent on the available memory. This method will be called whenever `adjustment` calls for more threads, or when a task is unable to be added to the queue.
123 |
124 | The second method, `adjustment`, takes a `Stats` object, and returns a number representing how the pool size should be adjusted. The frequency with which `adjustment` is called is dictated by the `controlPeriod` parameter to the [`Executor`](https://clj-commons.org/dirigiste/io/aleph/dirigiste/Executor.html#%3Cinit%3E\(java.util.concurrent.ThreadFactory,java.util.concurrent.BlockingQueue,io.aleph.dirigiste.Executor.Controller,int,java.util.EnumSet,long,long,java.util.concurrent.TimeUnit\)) constructor, and the number of samples in the `Stats` object is controlled by the `samplePeriod` parameter.
125 |
126 | The utilization controller is quite simple:
127 |
128 | ```java
129 | Executor.Controller utilizationController(final double targetUtilization, final int maxThreadCount) {
130 | return new Controller() {
131 | public boolean shouldIncrement(int numWorkers) {
132 | return numWorkers < maxThreadCount;
133 | }
134 |
135 | public int adjustment(Stats stats) {
136 | double correction = stats.getUtilization(0.9) / targetUtilization;
137 | return (int) Math.ceil(stats.getNumWorkers() * correction) - stats.getNumWorkers();
138 | }
139 | };
140 | }
141 | ```
142 |
143 | It adjusts the number of threads using the `targetUtilization` compared against the 90th percentile measured utilization over the last `controlPeriod`. Obviously more sophisticated methods are possible, but they're left as an exercise for the reader.
144 |
145 | [`IPool.Controller`](https://clj-commons.org/dirigiste/io/aleph/dirigiste/IPool.Controller.html) works much the same, except that `adjustment` takes a `Map` of keys onto `Stats` objects, and returns a `Map` of keys onto `Integer` objects. The utilization controller is otherwise much the same:
146 |
147 | ```java
148 | public IPool.Controller utilizationController(final double targetUtilization, final int maxObjectsPerKey, final int maxTotalObjects) {
149 |
150 | return new IPool.Controller() {
151 | public boolean shouldIncrement(Objec t key, int objectsForKey, int totalObjects) {
152 | return (objectsForKey < maxObjectsPerKey) && (totalObjects < maxTotalObjects);
153 | }
154 |
155 | public Map adjustment(Map stats) {
156 | Map adj = new HashMap();
157 |
158 | for ( e : stats.entrySet()) {
159 | Map.Entry entry = (Map.Entry) e;
160 | Stats s = (Stats) entry.getValue();
161 | int numWorkers = s.getNumWorkers();
162 | double correction = s.getUtilization(0.9) / targetUtilization;
163 | int n = (int) Math.ceil(s.getNumWorkers() * correction) - numWorkers;
164 |
165 | adj.put(entry.getKey(), new Integer(n));
166 | }
167 |
168 | return adj;
169 | }
170 | };
171 | }
172 | ```
173 |
174 | ### license
175 |
176 | Copyright © 2015 Zachary Tellman
177 |
178 | Distributed under the MIT License
179 |
--------------------------------------------------------------------------------
/docs/cybersyn.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/clj-commons/dirigiste/efae0cae2acfd6acfc7fdb637ed15f1abbd296e0/docs/cybersyn.jpg
--------------------------------------------------------------------------------
/jmh/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 |
--------------------------------------------------------------------------------
/jmh/README.md:
--------------------------------------------------------------------------------
1 | ### JMH
2 |
3 | Those JMH tests can be executed through the following command:
4 |
5 | ``` sh
6 | mvn verify && java -jar target/benchmarks.jar
7 | ```
8 |
--------------------------------------------------------------------------------
/jmh/pom.xml:
--------------------------------------------------------------------------------
1 |
31 |
32 |
34 | 4.0.0
35 |
36 | io.aleph.dirigiste
37 | jmh
38 | 1.0
39 | jar
40 |
41 | JMH benchmark sample: Java
42 |
43 |
47 |
48 |
49 |
50 | org.openjdk.jmh
51 | jmh-core
52 | ${jmh.version}
53 |
54 |
55 | org.openjdk.jmh
56 | jmh-generator-annprocess
57 | ${jmh.version}
58 | provided
59 |
60 |
61 | org.clj-commons
62 | dirigiste
63 | 1.0.2
64 |
65 |
66 |
67 |
68 | UTF-8
69 |
70 |
73 | 1.35
74 |
75 |
78 | 1.8
79 |
80 |
83 | benchmarks
84 |
85 |
86 |
87 |
88 |
89 | org.apache.maven.plugins
90 | maven-compiler-plugin
91 | 3.8.0
92 |
93 | ${javac.target}
94 | ${javac.target}
95 | ${javac.target}
96 |
97 |
98 |
99 | org.apache.maven.plugins
100 | maven-shade-plugin
101 | 3.2.1
102 |
103 |
104 | package
105 |
106 | shade
107 |
108 |
109 | ${uberjar.name}
110 |
111 |
112 | org.openjdk.jmh.Main
113 |
114 |
115 |
116 |
117 |
118 |
122 | *:*
123 |
124 | META-INF/*.SF
125 | META-INF/*.DSA
126 | META-INF/*.RSA
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 | maven-clean-plugin
139 | 2.5
140 |
141 |
142 | maven-deploy-plugin
143 | 2.8.1
144 |
145 |
146 | maven-install-plugin
147 | 2.5.1
148 |
149 |
150 | maven-jar-plugin
151 | 2.4
152 |
153 |
154 | maven-javadoc-plugin
155 | 2.9.1
156 |
157 |
158 | maven-resources-plugin
159 | 2.6
160 |
161 |
162 | maven-site-plugin
163 | 3.3
164 |
165 |
166 | maven-source-plugin
167 | 2.2.1
168 |
169 |
170 | maven-surefire-plugin
171 | 2.17
172 |
173 |
174 |
175 |
176 |
177 |
178 |
--------------------------------------------------------------------------------
/jmh/src/main/java/io/aleph/dirigiste/Benchmarks.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import io.aleph.dirigiste.IPool.Controller;
4 | import io.aleph.dirigiste.IPool.Generator;
5 | import java.util.UUID;
6 | import java.util.concurrent.TimeUnit;
7 | import org.openjdk.jmh.annotations.Benchmark;
8 | import org.openjdk.jmh.annotations.Fork;
9 | import org.openjdk.jmh.annotations.Measurement;
10 | import org.openjdk.jmh.annotations.Scope;
11 | import org.openjdk.jmh.annotations.Setup;
12 | import org.openjdk.jmh.annotations.State;
13 | import org.openjdk.jmh.annotations.Threads;
14 | import org.openjdk.jmh.annotations.Warmup;
15 |
16 | @State(Scope.Benchmark)
17 | @Warmup(iterations = 1, time = 5)
18 | @Fork(1)
19 | public class Benchmarks {
20 |
21 | private final Controller controller = Pools.utilizationController(0.9, 8, 1024);
22 | private Generator generator;
23 |
24 | private Pool pool;
25 |
26 | @Setup
27 | public void setup() {
28 | generator = new Generator() {
29 | @Override
30 | public UUID generate(Integer key) {
31 | return UUID.randomUUID();
32 | }
33 |
34 | @Override
35 | public void destroy(Integer key, UUID val) {
36 | }
37 | };
38 | pool = new Pool<>(generator, controller, 65536, 1, 100, TimeUnit.MICROSECONDS);
39 | }
40 |
41 | @Benchmark
42 | @Measurement(batchSize = 1, iterations = 3)
43 | @Threads(1)
44 | public void thread1_batchSize1() throws InterruptedException {
45 | UUID value = pool.acquire(1);
46 | pool.dispose(1,value);
47 | }
48 |
49 | @Benchmark
50 | @Measurement(batchSize = 1, iterations = 3)
51 | @Threads(5)
52 | public void thread5_batchSize1() throws InterruptedException {
53 | UUID value = pool.acquire(1);
54 | pool.dispose(1,value);
55 | }
56 |
57 | @Benchmark
58 | @Measurement(batchSize = 1, iterations = 3)
59 | @Threads(20)
60 | public void thread20_batchSize1() throws InterruptedException {
61 | UUID value = pool.acquire(1);
62 | pool.dispose(1,value);
63 | }
64 |
65 | @Benchmark
66 | @Measurement(batchSize = 1, iterations = 3)
67 | @Threads(100)
68 | public void thread100_batchSize1() throws InterruptedException {
69 | UUID value = pool.acquire(1);
70 | pool.dispose(1,value);
71 | }
72 |
73 | @Benchmark
74 | @Measurement(batchSize = 100, iterations = 3)
75 | @Threads(1)
76 | public void thread1_batchSize100() throws InterruptedException {
77 | UUID value = pool.acquire(1);
78 | pool.dispose(1,value);
79 | }
80 |
81 | @Benchmark
82 | @Measurement(batchSize = 100, iterations = 3)
83 | @Threads(5)
84 | public void thread5_batchSize100() throws InterruptedException {
85 | UUID value = pool.acquire(1);
86 | pool.dispose(1,value);
87 | }
88 |
89 | @Benchmark
90 | @Measurement(batchSize = 100, iterations = 3)
91 | @Threads(20)
92 | public void thread20_batchSize100() throws InterruptedException {
93 | UUID value = pool.acquire(1);
94 | pool.dispose(1,value);
95 | }
96 |
97 | @Benchmark
98 | @Measurement(batchSize = 100, iterations = 3)
99 | @Threads(100)
100 | public void thread100_batchSize100() throws InterruptedException {
101 | UUID value = pool.acquire(1);
102 | pool.dispose(1,value);
103 | }
104 | }
105 |
--------------------------------------------------------------------------------
/project.clj:
--------------------------------------------------------------------------------
1 | (defproject org.clj-commons/dirigiste "1.0.4"
2 | :deploy-repositories [["clojars" {:url "https://repo.clojars.org"
3 | :username :env/clojars_username
4 | :password :env/clojars_password
5 | :sign-releases true}]]
6 | :url "https://github.com/clj-commons/dirigiste"
7 | :description "Centrally-planned thread and object pools"
8 | :license {:name "MIT License"}
9 | :dependencies []
10 | :profiles {:dev {:dependencies [[org.clojure/clojure "1.11.1"]
11 | [junit/junit "4.13.2"]]
12 | :java-source-paths ["test/java"]}
13 | :test {:dependencies [[junit/junit "4.13.2"]]}}
14 | :java-source-paths ["src"]
15 | :test-paths ["test/clojure"]
16 | :javac-options ["-target" "1.8" "-source" "1.8"]
17 | :plugins [[lein-junit "1.1.9"]]
18 | :junit ["test/java"]
19 |
20 | ;; Maven properties for the Maven God
21 | :scm {:url "git@github.com:clj-commons/dirigiste.git"}
22 | :pom-plugins [[org.codehaus.mojo/build-helper-maven-plugin "1.7"
23 | {:executions
24 | ([:execution [:id "add-test-source"]
25 | [:phase "generate-test-sources"]
26 | [:goals [:goal "add-test-source"]]
27 | [:configuration [:sources [:source "test/java"]]]])}]]
28 | :pom-addition ([:properties
29 | [:maven.compiler.source 1.8]
30 | [:maven.compiler.target 1.8]]
31 | [:organization
32 | [:name "CLJ Commons"]
33 | [:url "http://clj-commons.org/"]]
34 | [:developers [:developer
35 | [:id "kingmob"]
36 | [:name "Matthew Davidson"]
37 | [:url "http://modulolotus.net"]
38 | [:email "matthew@modulolotus.net"]]])
39 | :classifiers {:javadoc {:java-source-paths ^:replace []
40 | :source-paths ^:replace []
41 | :resource-paths ^:replace []}
42 | :sources {:java-source-paths ^:replace ["src"]
43 | :resource-paths ^:replace []}})
44 |
--------------------------------------------------------------------------------
/src/io/aleph/dirigiste/Executor.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import java.util.concurrent.*;
4 | import java.util.concurrent.atomic.*;
5 | import java.util.List;
6 | import java.util.ArrayList;
7 | import java.util.EnumSet;
8 |
9 | public class Executor extends AbstractExecutorService {
10 |
11 | public interface Controller {
12 |
13 | /**
14 | * @param currThreads the current number of active threads
15 | * @return whether an additional thread should be spun up, a return value of false may cause a RejectedExecutionException to be thrown elsewhere
16 | */
17 | boolean shouldIncrement(int currThreads);
18 |
19 | /**
20 | * @param stats the statistics gathered since the last call to 'adjustment'
21 | * @return if positive, the number of threads that should be spun up, if negative the number of threads that should be spun down
22 | */
23 | int adjustment(Stats stats);
24 | }
25 |
26 | class Worker {
27 | public volatile Runnable _runnable;
28 | public volatile boolean _isShutdown = false;
29 |
30 | private final AtomicInteger _completed = new AtomicInteger(0);
31 |
32 | private long _birth = System.nanoTime();
33 | private final AtomicLong _start = new AtomicLong(0);
34 | private final AtomicLong _totalDuration = new AtomicLong(0);
35 |
36 | private final CountDownLatch _latch = new CountDownLatch(1);
37 | private final Thread _thread;
38 |
39 | Worker() {
40 |
41 | final boolean taskCompletionRate = _metrics.contains(Stats.Metric.TASK_COMPLETION_RATE);
42 | final boolean workerUtilization = _metrics.contains(Stats.Metric.UTILIZATION);
43 |
44 | Runnable runnable =
45 | new Runnable() {
46 | public void run() {
47 | _birth = System.nanoTime();
48 |
49 | while (!_isShutdown) {
50 | try {
51 | Runnable r = (Runnable) _queue.poll(1000, TimeUnit.MILLISECONDS);
52 |
53 | if (r != null) {
54 | _runnable = r;
55 |
56 | if (workerUtilization) {
57 | _start.set(System.nanoTime());
58 | }
59 |
60 | try {
61 | r.run();
62 | } catch (Throwable e) {
63 |
64 | } finally {
65 | _runnable = null;
66 |
67 | if (workerUtilization) {
68 | _totalDuration.addAndGet(System.nanoTime() - _start.getAndSet(0));
69 | }
70 |
71 | if (taskCompletionRate) {
72 | _completed.incrementAndGet();
73 | }
74 | }
75 | }
76 | } catch (InterruptedException e) {
77 | // The worker thread may occasionally catch a
78 | // stray interrupt targetted at the task that
79 | // raced through. We don't want such races to
80 | // kill the worker thread, so ignore and
81 | // continue with the loop. If the interrupt is
82 | // meant for the worker thread, then _isShutdown
83 | // woud be armed as well.
84 | }
85 | }
86 | _workers.remove(Worker.this);
87 | _latch.countDown();
88 | }
89 | };
90 |
91 | _thread = _threadFactory.newThread(runnable);
92 | _thread.start();
93 | }
94 |
95 | public double utilization(long t0, long t1) {
96 | long start = _start.getAndSet(t1);
97 | if (start == 0) {
98 | _start.compareAndSet(t1, 0);
99 | }
100 | long active = _totalDuration.getAndSet(0) + (start == 0 ? 0 : t1 - start);
101 | long total = t1 - Math.max(t0, _birth);
102 |
103 | return (double) active / (double) total;
104 | }
105 |
106 | public boolean isActive() {
107 | return _runnable != null;
108 | }
109 |
110 | public boolean isShutdown() {
111 | return _isShutdown;
112 | }
113 |
114 | public boolean shutdown() {
115 | if (!_isShutdown) {
116 | _isShutdown = true;
117 | _numWorkers.decrementAndGet();
118 | return true;
119 | }
120 | return false;
121 | }
122 | }
123 |
124 | private static AtomicInteger _numExecutors = new AtomicInteger(0);
125 |
126 | private final ThreadFactory _threadFactory;
127 | private final BlockingQueue _queue;
128 | private final CopyOnWriteArrayList _workers = new CopyOnWriteArrayList();
129 | private final AtomicInteger _numWorkers = new AtomicInteger(0);
130 | private final AtomicInteger _incomingTasks = new AtomicInteger(0);
131 | private final AtomicInteger _rejectedTasks = new AtomicInteger(0);
132 | private final Controller _controller;
133 |
134 | private final EnumSet _metrics;
135 | private final boolean _measureQueueLatency;
136 | private final boolean _measureTaskLatency;
137 | private final boolean _measureTaskArrivalRate;
138 | private final boolean _measureTaskRejectionRate;
139 |
140 | private boolean _isShutdown = false;
141 |
142 | private final AtomicReference _queueLatencies =
143 | new AtomicReference(new Stats.UniformLongReservoir());
144 |
145 | private final AtomicReference _taskLatencies =
146 | new AtomicReference(new Stats.UniformLongReservoir());
147 |
148 | private final AtomicReference _queueLengths =
149 | new AtomicReference(new Stats.UniformLongReservoir());
150 |
151 | private final AtomicReference _utilizations =
152 | new AtomicReference(new Stats.UniformDoubleReservoir());
153 |
154 | private final AtomicReference _taskArrivalRates =
155 | new AtomicReference(new Stats.UniformDoubleReservoir());
156 |
157 | private final AtomicReference _taskCompletionRates =
158 | new AtomicReference(new Stats.UniformDoubleReservoir());
159 |
160 | private final AtomicReference _taskRejectionRates =
161 | new AtomicReference(new Stats.UniformDoubleReservoir());
162 |
163 | private volatile Stats _stats = Stats.EMPTY;
164 |
165 | /**
166 | * @param threadFactory the ThreadFactory used by the executor
167 | * @param queue the queue that holds Runnable objects waiting to be executed
168 | * @param controller the Controller object that updates the thread count
169 | * @param metrics the metrics that will be collected and delivered to the controller
170 | * @param initialThreadCount the number of threads that the executor will begin with
171 | * @param samplePeriod the period at which the executor's state will be sampled
172 | * @param controlPeriod the period at which the controller will be invoked with the gathered statistics
173 | * @param unit the time unit for the #samplePeriod and #controlPeriod
174 | */
175 | public Executor(ThreadFactory threadFactory, BlockingQueue queue, Executor.Controller controller, int initialThreadCount, EnumSet metrics, long samplePeriod, long controlPeriod, TimeUnit unit) {
176 |
177 | _threadFactory = threadFactory;
178 | _queue = queue;
179 | _controller = controller;
180 | _metrics = metrics;
181 |
182 | _measureQueueLatency = _metrics.contains(Stats.Metric.QUEUE_LATENCY);
183 | _measureTaskLatency = _metrics.contains(Stats.Metric.TASK_LATENCY);
184 | _measureTaskArrivalRate = _metrics.contains(Stats.Metric.TASK_ARRIVAL_RATE);
185 | _measureTaskRejectionRate = _metrics.contains(Stats.Metric.TASK_REJECTION_RATE);
186 |
187 | final int duration = (int) unit.toMillis(samplePeriod);
188 | final int iterations = (int) (controlPeriod / samplePeriod);
189 |
190 | Thread t =
191 | new Thread(new Runnable() {
192 | public void run() {
193 | startControlLoop(duration, iterations);
194 | }
195 | },
196 | "dirigiste-executor-controller-" + _numExecutors.getAndIncrement());
197 | t.setDaemon(true);
198 | t.start();
199 |
200 | for (int i = 0; i < Math.max(1, initialThreadCount); i++) {
201 | startWorker();
202 | }
203 | }
204 |
205 | /**
206 | * @return the metrics being gathered by the executor
207 | */
208 | public EnumSet getMetrics() {
209 | return _metrics;
210 | }
211 |
212 | /**
213 | * @return the last aggregate statistics given to the control loop.
214 | */
215 | public Stats getLastStats() {
216 | return _stats;
217 | }
218 |
219 | /**
220 | * @return the aggregate statistics for the executor since the last control loop update.
221 | */
222 | public Stats getStats() {
223 | return new Stats
224 | (_metrics,
225 | _numWorkers.get(),
226 | _utilizations.get().toArray(),
227 | _taskArrivalRates.get().toArray(),
228 | _taskCompletionRates.get().toArray(),
229 | _taskRejectionRates.get().toArray(),
230 | _queueLengths.get().toArray(),
231 | _queueLatencies.get().toArray(),
232 | _taskLatencies.get().toArray());
233 | }
234 |
235 | @Override
236 | public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
237 |
238 | long duration = unit.toMillis(timeout);
239 | long start = System.currentTimeMillis();
240 |
241 | for (Worker w : _workers) {
242 | long remaining = (start + duration) - System.currentTimeMillis();
243 | if (remaining < 0) {
244 | return false;
245 | }
246 | if(!w._latch.await(remaining, TimeUnit.MILLISECONDS)) {
247 | return false;
248 | };
249 | }
250 |
251 | return true;
252 | }
253 |
254 | /**
255 | * A version of execute which will simply block until the task is accepted, rather than
256 | * throwing a RejectedExceptionException.
257 | *
258 | * RejectedExecutionException will only be thrown if the executor is shut down.
259 | */
260 | public void executeWithoutRejection(Runnable runnable) throws NullPointerException, InterruptedException {
261 | if (runnable == null) {
262 | throw new NullPointerException();
263 | }
264 |
265 | if (_isShutdown) {
266 | throw new RejectedExecutionException("Executor is shutdown!");
267 | }
268 |
269 | if (_measureTaskArrivalRate) {
270 | _incomingTasks.incrementAndGet();
271 | }
272 |
273 | if (_measureTaskLatency || _measureQueueLatency) {
274 | final long enqueue = System.nanoTime();
275 | final Runnable r = runnable;
276 | runnable = new Runnable() {
277 | public void run() {
278 |
279 | if (_measureQueueLatency) {
280 | _queueLatencies.get().sample(System.nanoTime() - enqueue);
281 | }
282 |
283 | try {
284 | r.run();
285 | } finally {
286 | if (_measureTaskLatency) {
287 | _taskLatencies.get().sample(System.nanoTime() - enqueue);
288 | }
289 | }
290 | }
291 | };
292 | }
293 |
294 | if (!_queue.offer(runnable) || _workers.isEmpty()) {
295 | startWorker();
296 | _queue.put(runnable);
297 | }
298 | }
299 |
300 | @Override
301 | public void execute(Runnable runnable) throws NullPointerException, RejectedExecutionException {
302 | if (runnable == null) {
303 | throw new NullPointerException();
304 | }
305 |
306 | if (_isShutdown) {
307 | throw new RejectedExecutionException("Executor is shutdown!");
308 | }
309 |
310 | if (_measureTaskArrivalRate) {
311 | _incomingTasks.incrementAndGet();
312 | }
313 |
314 | if (_measureTaskLatency || _measureQueueLatency) {
315 | final long enqueue = System.nanoTime();
316 | final Runnable r = runnable;
317 | runnable = new Runnable() {
318 | public void run() {
319 |
320 | if (_measureQueueLatency) {
321 | _queueLatencies.get().sample(System.nanoTime() - enqueue);
322 | }
323 |
324 | try {
325 | r.run();
326 | } finally {
327 | if (_measureTaskLatency) {
328 | _taskLatencies.get().sample(System.nanoTime() - enqueue);
329 | }
330 | }
331 | }
332 | };
333 | }
334 |
335 | if (!_queue.offer(runnable) || _workers.isEmpty()) {
336 | if (startWorker()) {
337 | try {
338 | _queue.put(runnable);
339 | } catch (InterruptedException e) {
340 | if (_measureTaskRejectionRate) {
341 | _rejectedTasks.incrementAndGet();
342 | }
343 | throw new RejectedExecutionException();
344 | }
345 | } else {
346 | if (_measureTaskRejectionRate) {
347 | _rejectedTasks.incrementAndGet();
348 | }
349 | throw new RejectedExecutionException();
350 | }
351 | }
352 | }
353 |
354 | @Override
355 | public boolean isShutdown() {
356 | return _isShutdown;
357 | }
358 |
359 | @Override
360 | public boolean isTerminated() {
361 | return _isShutdown && _workers.isEmpty();
362 | }
363 |
364 | @Override
365 | public void shutdown() {
366 | synchronized (this) {
367 | _isShutdown = true;
368 | for (Worker w : _workers) {
369 | w.shutdown();
370 | }
371 | }
372 | }
373 |
374 | @Override
375 | public List shutdownNow() {
376 | synchronized (this) {
377 | _isShutdown = true;
378 | List rs = new ArrayList();
379 | _queue.drainTo(rs);
380 | for (Worker w : _workers) {
381 | Runnable r = w._runnable;
382 | w.shutdown();
383 | w._thread.interrupt();
384 |
385 | if (r != null) {
386 | rs.add(r);
387 | }
388 | }
389 | return rs;
390 | }
391 | }
392 |
393 | ///
394 |
395 | private Stats updateStats() {
396 | return new Stats
397 | (_metrics,
398 | _numWorkers.get(),
399 | _utilizations.getAndSet(new Stats.UniformDoubleReservoir()).toArray(),
400 | _taskArrivalRates.getAndSet(new Stats.UniformDoubleReservoir()).toArray(),
401 | _taskCompletionRates.getAndSet(new Stats.UniformDoubleReservoir()).toArray(),
402 | _taskRejectionRates.getAndSet(new Stats.UniformDoubleReservoir()).toArray(),
403 | _queueLengths.getAndSet(new Stats.UniformLongReservoir()).toArray(),
404 | _queueLatencies.getAndSet(new Stats.UniformLongReservoir()).toArray(),
405 | _taskLatencies.getAndSet(new Stats.UniformLongReservoir()).toArray());
406 | }
407 |
408 | private boolean startWorker() {
409 | while (true) {
410 | int numWorkers = _numWorkers.get();
411 | if (!_controller.shouldIncrement(numWorkers)) {
412 | return false;
413 | }
414 | if (_numWorkers.compareAndSet(numWorkers, numWorkers+1)) {
415 | _workers.add(new Worker());
416 | return true;
417 | }
418 | }
419 | }
420 |
421 | private void startControlLoop(int duration, int iterations) {
422 |
423 | boolean measureUtilization = _metrics.contains(Stats.Metric.UTILIZATION);
424 | boolean measureTaskArrivalRate = _metrics.contains(Stats.Metric.TASK_ARRIVAL_RATE);
425 | boolean measureTaskCompletionRate = _metrics.contains(Stats.Metric.TASK_COMPLETION_RATE);
426 | boolean measureTaskRejectionRate = _metrics.contains(Stats.Metric.TASK_REJECTION_RATE);
427 | boolean measureQueueLength = _metrics.contains(Stats.Metric.QUEUE_LENGTH);
428 |
429 | double samplesPerSecond = 1000.0 / duration;
430 | int iteration = 0;
431 | long utilizationSample = 0;
432 |
433 | try {
434 | while (!_isShutdown) {
435 | iteration = (iteration + 1) % iterations;
436 |
437 | long start = System.currentTimeMillis();
438 |
439 | // gather stats
440 | if (measureQueueLength) {
441 | _queueLengths.get().sample(_queue.size());
442 | }
443 |
444 | if (measureTaskArrivalRate) {
445 | _taskArrivalRates.get().sample(_incomingTasks.getAndSet(0) * samplesPerSecond);
446 | }
447 |
448 | if (measureTaskRejectionRate) {
449 | _taskRejectionRates.get().sample(_rejectedTasks.getAndSet(0) * samplesPerSecond);
450 | }
451 |
452 | int tasks = 0;
453 |
454 | int workerCount = 0;
455 | double utilizationSum = 0.0;
456 | long nextUtilizationSample = 0;
457 | if (measureUtilization) {
458 | nextUtilizationSample = System.nanoTime();
459 | }
460 |
461 | for (Worker w : _workers) {
462 | if (w.isShutdown()) {
463 | continue;
464 | }
465 |
466 | if (measureUtilization) {
467 | workerCount++;
468 | utilizationSum += w.utilization(utilizationSample, nextUtilizationSample);
469 | }
470 |
471 | if (measureTaskCompletionRate) {
472 | tasks += w._completed.getAndSet(0);
473 | }
474 | }
475 |
476 | if (measureUtilization) {
477 | utilizationSample = nextUtilizationSample;
478 | _utilizations.get().sample(utilizationSum / (double) workerCount);
479 | }
480 |
481 | if (measureTaskCompletionRate) {
482 | _taskCompletionRates.get().sample(tasks * samplesPerSecond);
483 | }
484 |
485 | // update worker count
486 | if (iteration == 0) {
487 | _stats = updateStats();
488 | int adjustment = _controller.adjustment(_stats);
489 |
490 | synchronized (this) {
491 | if (_isShutdown) {
492 | break;
493 | }
494 |
495 | if (adjustment < 0 && _queue.size() == 0) {
496 |
497 | // never let the number of workers drop below 1
498 | adjustment = Math.min(-adjustment, _numWorkers.get()-1);
499 |
500 | for (Worker w : _workers) {
501 | if (adjustment == 0) break;
502 | if (w.shutdown()) {
503 | adjustment--;
504 | }
505 | }
506 | } else if (adjustment > 0) {
507 |
508 | // create new workers
509 | for (int i = 0; i < adjustment; i++) {
510 | if (!startWorker()) {
511 | break;
512 | }
513 | }
514 | }
515 | }
516 | }
517 |
518 | Thread.sleep(Math.max(0, duration - (System.currentTimeMillis() - start)));
519 | }
520 | } catch (InterruptedException e) {
521 |
522 | }
523 | }
524 | }
525 |
--------------------------------------------------------------------------------
/src/io/aleph/dirigiste/Executors.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import java.util.concurrent.*;
4 | import java.util.EnumSet;
5 |
6 | public class Executors {
7 |
8 | private static ThreadFactory threadFactory() {
9 | return new ThreadFactory() {
10 | public Thread newThread(Runnable r) {
11 | Thread t = java.util.concurrent.Executors.defaultThreadFactory().newThread(r);
12 | t.setDaemon(true);
13 | return t;
14 | }
15 | };
16 | }
17 |
18 | /**
19 | * @param numThreads the number of threads in the thread pool
20 | */
21 | public static Executor fixedExecutor(final int numThreads) {
22 | return fixedExecutor(numThreads, EnumSet.noneOf(Stats.Metric.class));
23 | }
24 |
25 | /**
26 | * @param numThreads the number of threads in the thread pool
27 | * @param metrics the metrics that will be gathered by the executor
28 | */
29 | public static Executor fixedExecutor(final int numThreads, EnumSet metrics) {
30 | return new Executor(threadFactory(), new SynchronousQueue(false), fixedController(numThreads), numThreads, metrics, 25, 10000, TimeUnit.MILLISECONDS);
31 | }
32 |
33 | /**
34 | * @param numThreads the number of threads in the thread pool
35 | */
36 | public static Executor.Controller fixedController(final int numThreads) {
37 | return new Executor.Controller() {
38 | public boolean shouldIncrement(int numWorkers) {
39 | return numWorkers < numThreads;
40 | }
41 |
42 | public int adjustment(Stats stats) {
43 | return stats.getNumWorkers() - numThreads;
44 | }
45 | };
46 | }
47 |
48 | /**
49 | * @param targetUtilization the target level of utilization, within [0, 1]
50 | * @param maxThreadCount the maximum number of threads
51 | */
52 | public static Executor utilizationExecutor(double targetUtilization, int maxThreadCount) {
53 | return utilizationExecutor(targetUtilization, maxThreadCount, EnumSet.of(Stats.Metric.UTILIZATION));
54 | }
55 |
56 | /**
57 | * @param targetUtilization the target level of utilization, within [0, 1]
58 | * @param maxThreadCount the maximum number of threads
59 | * @param metrics the metrics which should be gathered
60 | */
61 | public static Executor utilizationExecutor(double targetUtilization, int maxThreadCount, EnumSet metrics) {
62 | return new Executor(threadFactory(), new SynchronousQueue(false), utilizationController(targetUtilization, maxThreadCount), 1, metrics, 25, 10000, TimeUnit.MILLISECONDS);
63 | }
64 |
65 | /**
66 | * @param targetUtilization the target level of utilization, within [0, 1]
67 | * @param maxThreadCount the maximum number of threads that can be allocated
68 | */
69 | public static Executor.Controller utilizationController(final double targetUtilization, final int maxThreadCount) {
70 | return new Executor.Controller() {
71 | public boolean shouldIncrement(int numWorkers) {
72 | return numWorkers < maxThreadCount;
73 | }
74 |
75 | public int adjustment(Stats stats) {
76 | int numWorkers = stats.getNumWorkers();
77 | double correction = stats.getUtilization(1.0) / targetUtilization;
78 | int n = (int) Math.ceil(stats.getNumWorkers() * correction) - numWorkers;
79 |
80 | if (n < 0) {
81 | return Math.max(n, (int) -Math.ceil(numWorkers/4.0));
82 | } else if (n > 0) {
83 | return Math.min(n, (int) Math.ceil(numWorkers/4.0));
84 | } else {
85 | return 0;
86 | }
87 | }
88 | };
89 | }
90 |
91 | }
92 |
--------------------------------------------------------------------------------
/src/io/aleph/dirigiste/IPool.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import java.util.Map;
4 |
5 | public interface IPool {
6 |
7 | interface Controller {
8 |
9 | /**
10 | * @param key the key which requires a new object
11 | * @param objectsForKey the number of currently existing objects for 'key'
12 | * @param totalObjects the total number of objects across every key
13 | * @return 'true' if a new object under 'key' should be created, false otherwise
14 | */
15 | boolean shouldIncrement(K key, int objectsForKey, int totalObjects);
16 |
17 | /**
18 | * @param stats a map of key onto stats for that key
19 | * @return a map of key onto how many objects should be created (if positive) or disposed (if negative)
20 | */
21 | Map adjustment(Map stats);
22 | }
23 |
24 | interface Generator {
25 | /**
26 | * Creates a new instance of the pooled object, which must be
27 | * non-null, and non-equal to all other generated objects.
28 | *
29 | * @param key the key for which the object is being generated.
30 | */
31 | V generate(K key) throws Exception;
32 |
33 | /**
34 | * Disposes of the generated value. Should be idempotent.
35 | *
36 | * @param val an object which was previously created via 'generate'.
37 | */
38 | void destroy(K key, V val);
39 | }
40 |
41 | interface AcquireCallback {
42 |
43 | /**
44 | * A callback that returns a pooled object.
45 | */
46 | void handleObject(V obj);
47 | }
48 |
49 | /**
50 | * Acquires an object from the pool, potentially creating one if none is available.
51 | *
52 | * @param key the key of the pooled object being acquired
53 | * @param callback the callback that will be invoked with the object once it's available
54 | */
55 | void acquire(K key, AcquireCallback callback);
56 |
57 | /**
58 | * Acquires an object from the pool, potentially creating one if none is available.
59 | *
60 | * @param key the key of the pooled object being acquired
61 | * @return the object, once it's acquired
62 | */
63 | V acquire(K key) throws InterruptedException;
64 |
65 | /**
66 | * Releases an object that has been acquired back to the pool.
67 | *
68 | * @param key the key of the pooled object being released
69 | * @param obj the pooled object being released
70 | */
71 | void release(K key, V obj);
72 |
73 | /**
74 | * Disposes of an object, removing it from the pool.
75 | *
76 | * @param key the key of the pooled object being disposed
77 | * @param obj the pooled object being disposed
78 | */
79 | void dispose(K key, V obj);
80 |
81 | void shutdown();
82 | }
83 |
--------------------------------------------------------------------------------
/src/io/aleph/dirigiste/Pool.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import java.util.*;
4 | import java.util.concurrent.*;
5 | import java.util.concurrent.atomic.*;
6 | import java.util.concurrent.locks.ReentrantLock;
7 |
8 | public class Pool implements IPool {
9 |
10 | // pooled object queue
11 | class Queue {
12 |
13 | private volatile boolean _isShutdown = false;
14 |
15 | private final Deque> _takes;
16 | private final Deque _puts = new LinkedBlockingDeque<>();
17 | private final K _key;
18 |
19 | final AtomicLong incoming = new AtomicLong(0);
20 | final AtomicLong completed = new AtomicLong(0);
21 | final AtomicLong rejected = new AtomicLong(0);
22 | final AtomicInteger objects = new AtomicInteger(0);
23 |
24 | public Queue(K key, int queueSize) {
25 | _key = key;
26 | _takes = new LinkedBlockingDeque<>(queueSize);
27 | }
28 |
29 | public int getQueueLength() {
30 | return _takes.size();
31 | }
32 |
33 | public int availableObjectsCount() {
34 | return _puts.size();
35 | }
36 |
37 | public void cancelTake(AcquireCallback take) {
38 | _takes.remove(take);
39 | }
40 |
41 | public void release(V obj) {
42 | completed.incrementAndGet();
43 | put(obj);
44 | }
45 |
46 | public void destroy(V obj) {
47 | try {
48 | _generator.destroy(_key, obj);
49 | } finally {
50 | _numObjects.decrementAndGet();
51 | }
52 | }
53 |
54 | public void shutdown() {
55 | _lock.lock();
56 |
57 | int n = objects.get();
58 | for (int i = 0; i < n; i++) {
59 | drop();
60 | }
61 |
62 | _isShutdown = true;
63 |
64 | _lock.unlock();
65 | }
66 |
67 | public void drop() {
68 |
69 | _lock.lock();
70 |
71 | int n;
72 | while (true) {
73 | n = objects.get();
74 |
75 | // if we're already at zero, or have more work to do
76 | // it's a no-op
77 | if (n <= 0 || getQueueLength() > 0) {
78 | _lock.unlock();
79 | return;
80 | }
81 | if (objects.compareAndSet(n, n-1)) {
82 | break;
83 | }
84 | }
85 |
86 | try {
87 | take(this::destroy, true);
88 | } catch (RejectedExecutionException e) {
89 | throw new RuntimeException(e);
90 | } finally {
91 | _lock.unlock();
92 | }
93 | }
94 |
95 | private void put(V obj) {
96 | _lock.lock();
97 |
98 | if (_isShutdown) {
99 | _lock.unlock();
100 | throw new IllegalStateException("already shutdown");
101 | }
102 |
103 | if (_destroyedObjects.contains(obj)) {
104 | _destroyedObjects.remove(obj);
105 | objects.decrementAndGet();
106 | _lock.unlock();
107 | destroy(obj);
108 | return;
109 | }
110 |
111 | AcquireCallback c = _takes.poll();
112 | if (c != null) {
113 | _lock.unlock();
114 | c.handleObject(obj);
115 | } else {
116 | _puts.add(obj);
117 | _lock.unlock();
118 | }
119 | }
120 |
121 | public int cleanup() {
122 | _lock.lock();
123 |
124 | List live = new ArrayList<>();
125 | List dead = new ArrayList<>();
126 | V obj = _puts.poll();
127 | while (obj != null) {
128 | if (!_destroyedObjects.contains(obj)) {
129 | live.add(obj);
130 | } else {
131 | dead.add(obj);
132 | _destroyedObjects.remove(obj);
133 | objects.decrementAndGet();
134 | }
135 | obj = _puts.poll();
136 | }
137 |
138 | int numObjects = objects.get();
139 |
140 | if (! live.isEmpty()) {
141 | _puts.addAll(live);
142 | }
143 |
144 | _lock.unlock();
145 |
146 | for (V o : dead) {
147 | destroy(o);
148 | }
149 |
150 | return numObjects;
151 | }
152 |
153 | public boolean take(AcquireCallback c, boolean skipToFront) throws RejectedExecutionException {
154 | incoming.incrementAndGet();
155 | _lock.lock();
156 |
157 | if (_isShutdown) {
158 | _lock.unlock();
159 | throw new IllegalStateException("already shutdown");
160 | }
161 |
162 | V obj = _puts.poll();
163 | while (_destroyedObjects.contains(obj)) {
164 | // expired object, clean it up and try again
165 | _destroyedObjects.remove(obj);
166 | objects.decrementAndGet();
167 |
168 | _lock.unlock();
169 | destroy(obj);
170 | _lock.lock();
171 |
172 | obj = _puts.poll();
173 | }
174 |
175 | if (obj != null) {
176 |
177 | // we got one, send it out
178 | _lock.unlock();
179 | c.handleObject(obj);
180 | return true;
181 | } else {
182 |
183 | // we didn't get one, try to enqueue our request
184 | // or reject the request if there are too many already
185 | boolean success = (skipToFront ? _takes.offerFirst(c) : _takes.offerLast(c));
186 | _lock.unlock();
187 | if (!success) {
188 | rejected.incrementAndGet();
189 | throw new RejectedExecutionException();
190 | }
191 | return false;
192 | }
193 | }
194 | }
195 |
196 | // static field
197 | private static AtomicInteger _numPools = new AtomicInteger(0);
198 |
199 | // fields
200 | private final int _maxQueueSize;
201 | private final Generator _generator;
202 | private final Controller _controller;
203 | private final double _rateMultiplier;
204 |
205 | private volatile boolean _isShutdown = false;
206 |
207 | private final AtomicInteger _numObjects = new AtomicInteger(0);
208 | private final ReentrantLock _lock = new ReentrantLock();
209 | private final Set _destroyedObjects = Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<>()));
210 | private final ConcurrentHashMap _start = new ConcurrentHashMap();
211 | final ConcurrentHashMap _queues = new ConcurrentHashMap<>();
212 | private final ConcurrentHashMap _queueLockCount = new ConcurrentHashMap<>();
213 | private final Stats.UniformLongReservoirMap _queueLatencies = new Stats.UniformLongReservoirMap();
214 | private final Stats.UniformLongReservoirMap _taskLatencies = new Stats.UniformLongReservoirMap();
215 | private final Stats.UniformLongReservoirMap _queueLengths = new Stats.UniformLongReservoirMap();
216 | private final Stats.UniformDoubleReservoirMap _utilizations = new Stats.UniformDoubleReservoirMap();
217 | private final Stats.UniformDoubleReservoirMap _taskArrivalRates = new Stats.UniformDoubleReservoirMap();
218 | private final Stats.UniformDoubleReservoirMap _taskCompletionRates = new Stats.UniformDoubleReservoirMap();
219 | private final Stats.UniformDoubleReservoirMap _taskRejectionRates = new Stats.UniformDoubleReservoirMap();
220 |
221 | // private methods
222 |
223 | /**
224 | * Returns or creates the queue for the given key.
225 | */
226 | Queue queue(K key) {
227 | Queue q = _queues.get(key);
228 | if (q == null) {
229 | q = new Queue(key, _maxQueueSize);
230 | Queue prior = _queues.putIfAbsent(key, q);
231 | return prior == null ? q : prior;
232 | }
233 | return q;
234 | }
235 |
236 | private Map updateStats() {
237 | Map queueLatencies = _queueLatencies.toMap();
238 | Map taskLatencies = _taskLatencies.toMap();
239 | Map queueLengths = _queueLengths.toMap();
240 | Map utilizations = _utilizations.toMap();
241 | Map taskArrivalRates = _taskArrivalRates.toMap();
242 | Map taskCompletionRates = _taskCompletionRates.toMap();
243 | Map taskRejectionRates = _taskRejectionRates.toMap();
244 |
245 | Map stats = new HashMap<>();
246 | for (K key : _queues.keySet()) {
247 | stats.put(key,
248 | new Stats(EnumSet.allOf(Stats.Metric.class),
249 | queue(key).objects.get(),
250 | utilizations.get(key),
251 | taskArrivalRates.get(key),
252 | taskCompletionRates.get(key),
253 | taskRejectionRates.get(key),
254 | queueLengths.get(key),
255 | queueLatencies.get(key),
256 | taskLatencies.get(key)));
257 | }
258 | return stats;
259 | }
260 |
261 | private void addObject(K key) {
262 | Queue q = queue(key);
263 |
264 | _lock.lock();
265 | if (_controller.shouldIncrement(key, q.objects.get(), _numObjects.get())) {
266 |
267 | // get all of our numbers aligned before unlocking
268 | _numObjects.incrementAndGet();
269 | q.objects.incrementAndGet();
270 | _lock.unlock();
271 |
272 | try {
273 | q.put(_generator.generate(key));
274 | } catch (Exception e) {
275 | _numObjects.decrementAndGet();
276 | q.objects.decrementAndGet();
277 | throw new RuntimeException(e);
278 | }
279 | } else {
280 | _lock.unlock();
281 | }
282 | }
283 |
284 | /**
285 | * Returns the utilization according to the state of a queue.
286 | * @param available Number of objects available on the queue waiting for being used
287 | * @param queueLength Number of pending takes waiting to have access to an object
288 | * @param objects Number of objects created on the queue
289 | * @return The queue utilization:
290 | * - 0 means all the objects are available and there is no pending takes
291 | * - 1 means all the objects are in use (0 available)
292 | * - >1 means all the objects are in use (0 available) and there are pending takes
293 | */
294 | double getUtilization(int available, int queueLength, int objects) {
295 | if(objects==0 && queueLength==0) {
296 | return 0;
297 | }
298 | return 1.0 - ((available - queueLength) / Math.max(1.0, objects));
299 | }
300 |
301 | /**
302 | * Adjust the various queues according to {@link io.aleph.dirigiste.IPool.Controller#adjustment(java.util.Map)}.
303 | * Queues no longer in use are removed from the queues and shutdown.
304 | */
305 | private void adjust() {
306 | sample();
307 |
308 | final Map _stats = updateStats();
309 | final Map adjustment = _controller.adjustment(_stats);
310 |
311 | // clear out any unused queues
312 | for (Map.Entry entry : _stats.entrySet()) {
313 | K key = entry.getKey();
314 | if (entry.getValue().getUtilization(1) == 0
315 | && _queues.get(key).objects.get() == 0
316 | && _queueLockCount.getOrDefault(key, 0) == 0) {
317 |
318 | // Ensure we have an exclusive lock on the queue before it got shut down.
319 | _queueLockCount.compute(key, (__, useCount) -> {
320 | if(useCount == null || useCount == 0) {
321 | _queues.remove(key).shutdown();
322 |
323 | // clean up stats so they don't remain in memory forever
324 | _queueLatencies.remove(key);
325 | _taskLatencies.remove(key);
326 | _queueLengths.remove(key);
327 | _utilizations.remove(key);
328 | _taskArrivalRates.remove(key);
329 | _taskCompletionRates.remove(key);
330 | _taskRejectionRates.remove(key);
331 | return null;
332 | }
333 | return useCount;
334 | });
335 | }
336 | }
337 |
338 | // defer pool growth until we've reduced other pools
339 | List upward = new ArrayList();
340 |
341 | for (Map.Entry entry : adjustment.entrySet()) {
342 | int n = entry.getValue();
343 | if (n < 0) {
344 | Queue q = queue(entry.getKey());
345 | for (int i = 0; i < -n; i++) {
346 | q.drop();
347 | }
348 | q.cleanup();
349 | } else if (n > 0) {
350 | for (int i = 0; i < n; i++) {
351 | upward.add(entry.getKey());
352 | }
353 | }
354 | }
355 |
356 | // if we don't have room for everything, make sure we grow
357 | // a random subset
358 |
359 | Collections.shuffle(upward);
360 | for (K key : upward) {
361 | addObject(key);
362 | }
363 | }
364 |
365 | /**
366 | * Sample all the queues to compute their current:
367 | * - queueLength (pending takes)
368 | * - utilization
369 | * - taskArrivalRate
370 | * - taskCompletionRate
371 | * - taskRejectionRate
372 | */
373 | private void sample() {
374 | for (Map.Entry entry : _queues.entrySet()) {
375 | K key = entry.getKey();
376 | Queue q = entry.getValue();
377 | long completed = q.completed.getAndSet(0);
378 | long incoming = q.incoming.getAndSet(0);
379 | long rejected = q.rejected.getAndSet(0);
380 | int objects = q.objects.get();
381 | int queueLength = q.getQueueLength();
382 | int available = q.availableObjectsCount();
383 | double utilization = getUtilization(available, queueLength, objects);
384 |
385 | _queueLengths.sample(key, queueLength);
386 | _utilizations.sample(key, utilization);
387 | _taskArrivalRates.sample(key, incoming * _rateMultiplier);
388 | _taskCompletionRates.sample(key, completed * _rateMultiplier);
389 | _taskRejectionRates.sample(key, rejected * _rateMultiplier); }
390 | }
391 |
392 | private void startControlLoop(int duration, int iterations) {
393 |
394 | int iteration = 0;
395 |
396 | try {
397 | while (!_isShutdown) {
398 |
399 | iteration = (iteration + 1) % iterations;
400 | boolean isControlPeriod = iteration == 0;
401 |
402 | long start = System.currentTimeMillis();
403 |
404 | if(!isControlPeriod) {
405 | sample();
406 | }
407 |
408 | if (_isShutdown) {
409 | break;
410 | }
411 |
412 | if (isControlPeriod) {
413 | adjust();
414 | }
415 |
416 | Thread.sleep(Math.max(0, duration - (System.currentTimeMillis() - start)));
417 | }
418 | } catch (InterruptedException e) {
419 | }
420 | }
421 |
422 | // constructor
423 |
424 | public Pool(Generator generator, Controller controller, int maxQueueSize, long samplePeriod, long controlPeriod, TimeUnit unit) {
425 | _generator = generator;
426 | _controller = controller;
427 | _maxQueueSize = maxQueueSize;
428 |
429 | final int duration = (int) unit.toMillis(samplePeriod);
430 | final int iterations = (int) (controlPeriod / samplePeriod);
431 | _rateMultiplier = (double) unit.toMillis(1000) / duration;
432 |
433 | Thread t =
434 | new Thread(() -> startControlLoop(duration, iterations),
435 | "dirigiste-pool-controller-" + _numPools.getAndIncrement());
436 | t.setDaemon(true);
437 | t.start();
438 |
439 | }
440 |
441 | // public methods
442 |
443 | @Override
444 | public void acquire(final K key, final AcquireCallback callback) {
445 | final long start = System.nanoTime();
446 |
447 | // To prevent the queue from being deleted by the startControlLoop method (which runs on
448 | // another thread) as soon as it has been created, we need to mark the Queue as in use.
449 | _queueLockCount.compute(key, (__, useCount) -> useCount == null ? 1 : useCount + 1);
450 |
451 | Queue q = queue(key);
452 | AcquireCallback wrapper =
453 | obj -> {
454 | // do all the latency bookkeeping
455 | long acquire = System.nanoTime();
456 | _queueLatencies.sample(key, acquire - start);
457 | _start.put(obj, start);
458 |
459 | callback.handleObject(obj);
460 | };
461 | boolean success = q.take(wrapper, false);
462 |
463 | // if we didn't immediately get an object, try to create one
464 | if (!success) {
465 | try {
466 | addObject(key);
467 | } catch (Throwable e) {
468 | q.cancelTake(wrapper);
469 | _queueLockCount.compute(key, (__, useCount) -> useCount - 1);
470 | throw new RuntimeException(e);
471 | }
472 | }
473 | }
474 |
475 | @Override
476 | public V acquire(K key) throws InterruptedException {
477 | final AtomicReference ref = new AtomicReference(null);
478 | final CountDownLatch latch = new CountDownLatch(1);
479 |
480 | acquire(key, obj -> {
481 | ref.set(obj);
482 | latch.countDown();
483 | });
484 |
485 | latch.await();
486 | return ref.get();
487 | }
488 |
489 | @Override
490 | public void release(K key, V obj) {
491 | long end = System.nanoTime();
492 | Long start = _start.remove(obj);
493 |
494 | if (start != null) {
495 | _taskLatencies.sample(key, end - start);
496 | queue(key).release(obj);
497 | _queueLockCount.compute(key, (__, useCount) -> useCount - 1);
498 | }
499 | }
500 |
501 | @Override
502 | public void dispose(K key, V obj) {
503 | Queue q = queue(key);
504 |
505 | _lock.lock();
506 | _destroyedObjects.add(obj);
507 | int pendingTakes = q._takes.size();
508 | Long start = _start.remove(obj);
509 | _lock.unlock();
510 |
511 | // if it's been taken, "put" it back so it can be cleaned up
512 | if (start != null) {
513 | q.put(obj);
514 | } else {
515 | q.cleanup();
516 | }
517 |
518 | if (pendingTakes > 0) {
519 | // Objects can be created when there are pending takes. Under this circumstance, a
520 | // new object has to be created to replace the one that just got disposed.
521 | addObject(key);
522 | }
523 |
524 | _queueLockCount.compute(key, (__, useCount) -> useCount - 1);
525 | }
526 |
527 | @Override
528 | public void shutdown() {
529 | _isShutdown = true;
530 | for (Map.Entry entry : _queues.entrySet()) {
531 | entry.getValue().shutdown();
532 | }
533 | }
534 | }
535 |
--------------------------------------------------------------------------------
/src/io/aleph/dirigiste/Pools.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import java.util.*;
4 | import java.util.concurrent.TimeUnit;
5 |
6 | public class Pools {
7 |
8 | /**
9 | * @param maxObjectsPerKey the maximum number of pooled objects per key
10 | * @param maxTotalObjects the total number of object that the pool can contain
11 | */
12 | public static IPool.Controller fixedController(final int maxObjectsPerKey, final int maxTotalObjects) {
13 | return new IPool.Controller() {
14 | public boolean shouldIncrement(Object key, int objectsForKey, int totalObjects) {
15 | return (objectsForKey < maxObjectsPerKey) && (totalObjects < maxTotalObjects);
16 | }
17 |
18 | public Map adjustment(Map stats) {
19 | return new HashMap();
20 | }
21 | };
22 | }
23 |
24 | /**
25 | * @param targetUtilization the target utilization per key, within [0, 1]
26 | * @param maxObjectsPerKey the maximum number of pooled objects per key
27 | * @param maxTotalObjects the total number of object that the pool can contain
28 | */
29 | public static IPool.Controller utilizationController(final double targetUtilization, final int maxObjectsPerKey, final int maxTotalObjects) {
30 |
31 | return new IPool.Controller() {
32 | public boolean shouldIncrement(Object key, int objectsForKey, int totalObjects) {
33 | return (objectsForKey < maxObjectsPerKey) && (totalObjects < maxTotalObjects);
34 | }
35 |
36 | public Map adjustment(Map stats) {
37 | Map adj = new HashMap();
38 |
39 | for (Object e : stats.entrySet()) {
40 | Map.Entry entry = (Map.Entry) e;
41 | Stats s = (Stats) entry.getValue();
42 | int numWorkers = s.getNumWorkers();
43 | double correction = s.getUtilization(1.0) / targetUtilization;
44 | int n = (int) Math.ceil(s.getNumWorkers() * correction) - numWorkers;
45 |
46 | adj.put(entry.getKey(), n);
47 | }
48 | return adj;
49 | }
50 | };
51 | }
52 |
53 | /**
54 | * @param generator the pooled object generator
55 | * @param targetUtilization the target utilization per key, within [0, 1]
56 | * @param maxObjectsPerKey the maximum number of pooled objects per key
57 | * @param maxTotalObjects the total number of object that the pool can contain
58 | */
59 | public static IPool utilizationPool(IPool.Generator generator, double targetUtilization, int maxObjectsPerKey, int maxTotalObjects) {
60 | return utilizationPool(generator, 65536, targetUtilization, maxObjectsPerKey, maxTotalObjects);
61 | }
62 |
63 | /**
64 | * @param generator the pooled object generator
65 | * @param maxQueueLength the maximum number of acquire requests that can be queued
66 | * @param targetUtilization the target utilization per key, within [0, 1]
67 | * @param maxObjectsPerKey the maximum number of pooled objects per key
68 | * @param maxTotalObjects the total number of object that the pool can contain
69 | */
70 | public static IPool utilizationPool(IPool.Generator generator, int maxQueueLength, double targetUtilization, int maxObjectsPerKey, int maxTotalObjects) {
71 | return new Pool(generator, utilizationController(targetUtilization, maxObjectsPerKey, maxTotalObjects), maxQueueLength, 25, 1000, TimeUnit.MILLISECONDS);
72 | }
73 |
74 | /**
75 | * @param generator the pooled object generator
76 | * @param maxObjectsPerKey the maximum number of pooled objects per key
77 | * @param maxTotalObjects the total number of object that the pool can contain
78 | */
79 | public static IPool fixedPool(IPool.Generator generator, int maxObjectsPerKey, int maxTotalObjects) {
80 | return fixedPool(generator, 65536, maxObjectsPerKey, maxTotalObjects);
81 | }
82 |
83 | /**
84 | * @param generator the pooled object generator
85 | * @param maxQueueLength the maximum number of acquire requests that can be queued
86 | * @param maxObjectsPerKey the maximum number of pooled objects per key
87 | * @param maxTotalObjects the total number of object that the pool can contain
88 | */
89 | public static IPool fixedPool(IPool.Generator generator, int maxQueueLength, int maxObjectsPerKey, int maxTotalObjects) {
90 | return new Pool(generator, fixedController(maxObjectsPerKey, maxTotalObjects), maxQueueLength, 25, 1000, TimeUnit.MILLISECONDS);
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/src/io/aleph/dirigiste/Stats.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import java.util.concurrent.*;
4 | import java.util.concurrent.atomic.*;
5 | import java.util.*;
6 |
7 | public class Stats {
8 |
9 | public enum Metric {
10 | QUEUE_LENGTH,
11 | QUEUE_LATENCY,
12 | TASK_LATENCY,
13 | TASK_ARRIVAL_RATE,
14 | TASK_COMPLETION_RATE,
15 | TASK_REJECTION_RATE,
16 | UTILIZATION
17 | }
18 |
19 | static final int RESERVOIR_SIZE = 4096;
20 |
21 | public static class UniformLongReservoir {
22 |
23 | private final AtomicInteger _count = new AtomicInteger();
24 | final AtomicLongArray _values = new AtomicLongArray(RESERVOIR_SIZE);
25 |
26 | UniformLongReservoir() {
27 | }
28 |
29 | public void sample(long n) {
30 | int cnt = _count.incrementAndGet();
31 | if (cnt <= RESERVOIR_SIZE) {
32 | _values.set(cnt-1, n);
33 | } else {
34 | int idx = ThreadLocalRandom.current().nextInt(cnt);
35 | if (idx < RESERVOIR_SIZE) {
36 | _values.set(idx, n);
37 | }
38 | }
39 | }
40 |
41 | public long[] toArray() {
42 | int cnt = Math.min(RESERVOIR_SIZE, _count.get());
43 |
44 | long[] vals = new long[cnt];
45 | for (int i = 0; i < cnt; i++) {
46 | vals[i] = _values.get(i);
47 | }
48 | Arrays.sort(vals);
49 |
50 | return vals;
51 | }
52 | }
53 |
54 | public static class UniformDoubleReservoir {
55 | private final AtomicInteger _count = new AtomicInteger();
56 | final AtomicLongArray _values = new AtomicLongArray(RESERVOIR_SIZE);
57 |
58 | UniformDoubleReservoir() {
59 | }
60 |
61 | public void sample(double n) {
62 | int cnt = _count.incrementAndGet();
63 | if (cnt <= RESERVOIR_SIZE) {
64 | _values.set(cnt-1, Double.doubleToLongBits(n));
65 | } else {
66 | int idx = ThreadLocalRandom.current().nextInt(cnt);
67 | if (idx < RESERVOIR_SIZE) {
68 | _values.set(idx, Double.doubleToLongBits(n));
69 | }
70 | }
71 | }
72 |
73 | public double[] toArray() {
74 | int cnt = Math.min(RESERVOIR_SIZE, _count.get());
75 |
76 | double[] vals = new double[cnt];
77 | for (int i = 0; i < cnt; i++) {
78 | vals[i] = Double.longBitsToDouble(_values.get(i));
79 | }
80 | Arrays.sort(vals);
81 |
82 | return vals;
83 | }
84 | }
85 |
86 | public static class UniformLongReservoirMap {
87 | ConcurrentHashMap _reservoirs =
88 | new ConcurrentHashMap();
89 |
90 | public void sample(K key, long n) {
91 | UniformLongReservoir r = _reservoirs.get(key);
92 | if (r == null) {
93 | r = new UniformLongReservoir();
94 | UniformLongReservoir prior = _reservoirs.putIfAbsent(key, r);
95 | r = (prior == null ? r : prior);
96 | }
97 | r.sample(n);
98 | }
99 |
100 | public Map toMap() {
101 | Map m = new HashMap<>();
102 | for (K k : _reservoirs.keySet()) {
103 | m.put(k, _reservoirs.put(k, new UniformLongReservoir()).toArray());
104 | }
105 | return m;
106 | }
107 |
108 | public void remove(K key) {
109 | _reservoirs.remove(key);
110 | }
111 | }
112 |
113 | public static class UniformDoubleReservoirMap {
114 | ConcurrentHashMap _reservoirs =
115 | new ConcurrentHashMap();
116 |
117 | public void sample(K key, double n) {
118 | UniformDoubleReservoir r = _reservoirs.get(key);
119 | if (r == null) {
120 | r = new UniformDoubleReservoir();
121 | UniformDoubleReservoir prior = _reservoirs.putIfAbsent(key, r);
122 | r = (prior == null ? r : prior);
123 | }
124 | r.sample(n);
125 | }
126 |
127 | public Map toMap() {
128 | Map m = new HashMap<>();
129 | for (K k : _reservoirs.keySet()) {
130 | m.put(k, _reservoirs.remove(k).toArray());
131 | }
132 | return m;
133 | }
134 |
135 | public void remove(K key) {
136 | _reservoirs.remove(key);
137 | }
138 | }
139 |
140 | public static double lerp(long low, long high, double t) {
141 | return low + (high - low) * t;
142 | }
143 |
144 | public static double lerp(double low, double high, double t) {
145 | return low + (high - low) * t;
146 | }
147 |
148 | public static double lerp(long[] vals, double t) {
149 |
150 | if (vals == null) {
151 | return 0;
152 | }
153 |
154 | if (t < 0 || 1 < t) {
155 | throw new IllegalArgumentException(Double.toString(t));
156 | }
157 |
158 | int cnt = vals.length;
159 |
160 | switch (cnt) {
161 | case 0:
162 | return 0.0;
163 | case 1:
164 | return (double) vals[0];
165 | default:
166 | if (t == 1.0) {
167 | return (double) vals[cnt-1];
168 | }
169 | double idx = (cnt-1) * t;
170 | int iidx = (int) idx;
171 | return lerp(vals[iidx], vals[iidx + 1], idx - iidx);
172 | }
173 | }
174 |
175 | public static double lerp(double[] vals, double t) {
176 |
177 | if (vals == null) {
178 | return 0;
179 | }
180 |
181 | if (t < 0 || 1 < t) {
182 | throw new IllegalArgumentException(Double.toString(t));
183 | }
184 |
185 | int cnt = vals.length;
186 |
187 | switch (cnt) {
188 | case 0:
189 | return 0.0;
190 | case 1:
191 | return vals[0];
192 | default:
193 | if (t == 1.0) {
194 | return vals[cnt-1];
195 | }
196 | double idx = (cnt-1) * t;
197 | int iidx = (int) idx;
198 | return lerp(vals[iidx], vals[iidx + 1], idx - iidx);
199 | }
200 | }
201 |
202 | public static double mean(double[] vals) {
203 | if (vals == null || vals.length == 0) {
204 | return 0;
205 | }
206 |
207 | double sum = 0;
208 | for (double val : vals) {
209 | sum += val;
210 | }
211 | return sum/vals.length;
212 | }
213 |
214 | public static double mean(long[] vals) {
215 | if (vals == null || vals.length == 0) {
216 | return 0;
217 | }
218 |
219 | long sum = 0;
220 | for (long val : vals) {
221 | sum += val;
222 | }
223 | return sum/vals.length;
224 | }
225 |
226 | //
227 |
228 | private final EnumSet _metrics;
229 |
230 | private final int _numWorkers;
231 | private final double[] _utilizations;
232 | private final double[] _taskArrivalRates;
233 | private final double[] _taskCompletionRates;
234 | private final double[] _taskRejectionRates;
235 | private final long[] _queueLengths;
236 | private final long[] _queueLatencies;
237 | private final long[] _taskLatencies;
238 |
239 | public static final Stats EMPTY = new Stats(EnumSet.noneOf(Metric.class), 0, new double[] {}, new double[] {}, new double[] {}, new double[] {}, new long[] {}, new long[] {}, new long[] {});
240 |
241 | public Stats(EnumSet metrics, int numWorkers, double[] utilizations, double[] taskArrivalRates, double[] taskCompletionRates, double[] taskRejectionRates, long[] queueLengths, long[] queueLatencies, long[] taskLatencies) {
242 | _metrics = metrics;
243 | _numWorkers = numWorkers;
244 | _utilizations = utilizations;
245 | _taskArrivalRates = taskArrivalRates;
246 | _taskCompletionRates = taskCompletionRates;
247 | _taskRejectionRates = taskRejectionRates;
248 | _queueLengths = queueLengths;
249 | _queueLatencies = queueLatencies;
250 | _taskLatencies = taskLatencies;
251 | }
252 |
253 | /**
254 | * @return the provided metrics
255 | */
256 | public EnumSet getMetrics() {
257 | return _metrics;
258 | }
259 |
260 | /**
261 | * @return the number of active workers in the pool.
262 | */
263 | public int getNumWorkers() {
264 | return _numWorkers;
265 | }
266 |
267 | /**
268 | * @return the mean utilization of the workers as a value between 0 and 1.
269 | */
270 | public double getMeanUtilization() {
271 | return mean(_utilizations);
272 | }
273 |
274 | /**
275 | * @param quantile the point within the distribution to look up, 0.5 returns the median, 0.9 the 90th percentile
276 | * @return the utilization of the workers as a value between 0 and 1
277 | */
278 | public double getUtilization(double quantile) {
279 | return lerp(_utilizations, quantile);
280 | }
281 |
282 | /**
283 | * @return the mean task arrival rate of the executor, in tasks per second
284 | */
285 | public double getMeanTaskArrivalRate() {
286 | return mean(_taskArrivalRates);
287 | }
288 |
289 | /**
290 | * @param quantile the point within the distribution to look up, 0.5 returns the median, 0.9 the 90th percentile
291 | * @return the task arrival rate of the executor, in tasks per second
292 | */
293 | public double getTaskArrivalRate(double quantile) {
294 | return lerp(_taskArrivalRates, quantile);
295 | }
296 |
297 | /**
298 | * @return the mean task completion rate of the executor, in tasks per second
299 | */
300 | public double getMeanTaskCompletionRate() {
301 | return mean(_taskCompletionRates);
302 | }
303 |
304 | /**
305 | * @param quantile the point within the distribution to look up, 0.5 returns the median, 0.9 the 90th percentile
306 | * @return the task completion rate of the executor, in tasks per second
307 | */
308 | public double getTaskCompletionRate(double quantile) {
309 | return lerp(_taskCompletionRates, quantile);
310 | }
311 |
312 | /**
313 | * @return the mean task rejection rate of the executor, in tasks per second
314 | */
315 | public double getMeanTaskRejectionRate() {
316 | return mean(_taskRejectionRates);
317 | }
318 |
319 | /**
320 | * @param quantile the point within the distribution to look up, 0.5 returns the median, 0.9 the 90th percentile
321 | * @return the task rejection rate of the executor, in tasks per second
322 | */
323 | public double getTaskRejectionRate(double quantile) {
324 | return lerp(_taskRejectionRates, quantile);
325 | }
326 |
327 | /**
328 | * @return the mean length of the queue
329 | */
330 | public double getMeanQueueLength() {
331 | return mean(_queueLengths);
332 | }
333 |
334 | /**
335 | * @param quantile the point within the distribution to look up, 0.5 returns the median, 0.9 the 90th percentile
336 | * @return the length of the queue
337 | */
338 | public double getQueueLength(double quantile) {
339 | return lerp(_queueLengths, quantile);
340 | }
341 |
342 | /**
343 | * @return the mean time each task spends on the queue, in nanoseconds
344 | */
345 | public double getMeanQueueLatency() {
346 | return mean(_queueLatencies);
347 | }
348 |
349 | /**
350 | * @param quantile the point within the distribution to look up, 0.5 returns the median, 0.9 the 90th percentile
351 | * @return the time each task spends on the queue, in nanoseconds
352 | */
353 | public double getQueueLatency(double quantile) {
354 | return lerp(_queueLatencies, quantile);
355 | }
356 |
357 | /**
358 | * @return the mean time each task takes to complete, including time on the queue, in nanoseconds
359 | */
360 | public double getMeanTaskLatency() {
361 | return mean(_taskLatencies);
362 | }
363 |
364 | /**
365 | * @param quantile the point within the distribution to look up, 0.5 returns the median, 0.9 the 90th percentile
366 | * @return the time each task takes to complete, including time on the queue, in nanoseconds
367 | */
368 | public double getTaskLatency(double quantile) {
369 | return lerp(_taskLatencies, quantile);
370 | }
371 | }
372 |
--------------------------------------------------------------------------------
/test/clojure/dirigiste/executor_test.clj:
--------------------------------------------------------------------------------
1 | (ns dirigiste.executor-test
2 | (:require
3 | [clojure.test :refer :all])
4 | (:import
5 | [java.util
6 | EnumSet]
7 | [java.util.concurrent
8 | ArrayBlockingQueue
9 | FutureTask
10 | CompletableFuture
11 | ScheduledExecutorService
12 | ExecutorService
13 | RejectedExecutionException
14 | CountDownLatch
15 | SynchronousQueue
16 | TimeUnit]
17 | [io.aleph.dirigiste
18 | Executors
19 | Executor
20 | Executor$Controller
21 | Stats$Metric]))
22 |
23 | (defn run-producer [^java.util.concurrent.Executor ex n interval]
24 | (dotimes [_ n]
25 | (let [latch (CountDownLatch. 1)]
26 | (try
27 |
28 | (.execute ex
29 | #(do (Thread/sleep interval) (.countDown latch)))
30 | (.await latch)
31 |
32 | (catch Throwable e
33 | )))))
34 |
35 | (defn stress-executor [ex n m interval]
36 | (is
37 | (->> (range n)
38 | (map (fn [_] (future (run-producer ex m interval))))
39 | doall
40 | (map #(deref % 3e5 ::timeout))
41 | doall
42 | (remove nil?)
43 | empty?))
44 | (let [s (.getStats ex)]
45 | (prn (.getUtilization s 0.5) (.getUtilization s 0.9) (.getUtilization s 0.99))
46 | s))
47 |
48 | (defn run-executor-test [pause]
49 | (let [ex (Executor.
50 | (java.util.concurrent.Executors/defaultThreadFactory)
51 | (SynchronousQueue. false)
52 | (Executors/utilizationController 0.9 64)
53 | 1
54 | (EnumSet/allOf Stats$Metric)
55 | 10
56 | 1000
57 | TimeUnit/MILLISECONDS)]
58 | (try
59 | (dotimes [_ 1]
60 | (is (< 30 (.getNumWorkers (stress-executor ex 32 1e5 pause)) 40))
61 | (is (< 15 (.getNumWorkers (stress-executor ex 16 1e5 pause)) 20))
62 | (is (< 5 (.getNumWorkers (stress-executor ex 8 1e5 pause)) 15))
63 | (Thread/sleep (* 1000 10))
64 | (is (= 1 (-> ex .getStats .getNumWorkers))))
65 | (finally
66 | (.shutdown ex)))))
67 |
68 | (deftest test-executor
69 | (run-executor-test 0)
70 | #_(run-executor-test 1)
71 | #_(run-executor-test 10))
72 |
73 | (deftest ^:stress test-utilization-metric
74 | )
75 |
76 | (defn- shutdown-task [times task-timeout-ms]
77 | (let [res (atom 0)]
78 | [res (fn [] (loop [cnt 1]
79 | (swap! res inc)
80 | (Thread/sleep task-timeout-ms)
81 | (when (< cnt times)
82 | (recur (inc cnt)))))]))
83 |
84 | (defn- check-shutdown-after [ex task-timeout-ms]
85 | (let [times 3
86 | [res task] (shutdown-task times task-timeout-ms)]
87 | (.execute ex task)
88 | (Thread/sleep 10)
89 | (.shutdown ex)
90 | (is (= 1 @res))
91 |
92 | (Thread/sleep (+ 10 (* task-timeout-ms times)))
93 | (is (= times @res))
94 |
95 | (is (thrown? RejectedExecutionException
96 | (.execute ex task)))
97 | (Thread/sleep (+ task-timeout-ms 10))
98 | (is (= times @res))
99 |
100 | (is (.isShutdown ex))
101 | (is (.isTerminated ex))))
102 |
103 | (defn- check-shutdown-now-after [ex task-timeout-ms]
104 | (let [times 3
105 | [res task] (shutdown-task times task-timeout-ms)]
106 | (.execute ex task)
107 | (Thread/sleep 10)
108 | (is (= 1 (count (.shutdownNow ex))))
109 | (is (= 1 @res))
110 |
111 | ;; task must have been interrupted
112 | (Thread/sleep (+ 10 (* task-timeout-ms (inc times))))
113 | (is (= 1 @res))
114 |
115 | (is (thrown? RejectedExecutionException
116 | (.execute ex task)))
117 | (Thread/sleep (+ task-timeout-ms 10))
118 | (is (= 1 @res))
119 |
120 | (is (.isShutdown ex))
121 | (is (.isTerminated ex))))
122 |
123 | (defn- custom-fixed-executor [controller]
124 | (Executor. (java.util.concurrent.Executors/defaultThreadFactory)
125 | (java.util.concurrent.SynchronousQueue. false)
126 | controller 1 (EnumSet/noneOf Stats$Metric)
127 | 10 100 TimeUnit/MILLISECONDS))
128 |
129 | #_(deftest shutdown-custom-fixed-executor
130 | (let [controller (Executors/fixedController 1)]
131 | (check-shutdown-now-after (custom-fixed-executor controller) 100)
132 | (check-shutdown-after (custom-fixed-executor controller) 100)))
133 |
134 | (deftest shutdown-manifold-fixed-executor
135 | (let [controller (reify Executor$Controller
136 | (shouldIncrement [_ n]
137 | (< n 1))
138 | (adjustment [_ s]
139 | (- 1 (.getNumWorkers s))))]
140 | (check-shutdown-now-after (custom-fixed-executor controller) 30)
141 | (check-shutdown-after (custom-fixed-executor controller) 30)))
142 |
143 | (defn- real-workers-count [ex]
144 | (count (.get (doto (.getDeclaredField Executor "_workers")
145 | (.setAccessible true)) ex)))
146 |
147 | (deftest task-interruption-should-not-kill-workers
148 | (let [num-threads (* (.availableProcessors (Runtime/getRuntime)) 4)
149 |
150 | ^ScheduledExecutorService sch-ex
151 | (java.util.concurrent.Executors/newSingleThreadScheduledExecutor)
152 |
153 | test-ex (Executor.
154 | (java.util.concurrent.Executors/defaultThreadFactory)
155 | (ArrayBlockingQueue. 60000 false)
156 | (reify Executor$Controller
157 | (shouldIncrement [_ n] (< n num-threads))
158 | (adjustment [_ s] 0))
159 | num-threads
160 | (EnumSet/noneOf Stats$Metric)
161 | 25
162 | 10000
163 | TimeUnit/MILLISECONDS)
164 |
165 | cancel-task (fn [^FutureTask f]
166 | (when-not (.isDone f)
167 | (.cancel f true)))
168 |
169 | run-with-timeout
170 | (fn [^long timeout-ms, f]
171 | (let [ex-fut (volatile! nil)
172 | f' (fn []
173 | (while (not @ex-fut))
174 | (.schedule sch-ex ^Callable #(cancel-task @ex-fut)
175 | timeout-ms TimeUnit/MILLISECONDS)
176 | (f))]
177 | (vreset! ex-fut (.submit ^ExecutorService test-ex ^Callable f'))))]
178 | (is (= num-threads (real-workers-count test-ex)))
179 | (try (try (dotimes [_ 50000]
180 | (run-with-timeout 100 ^Callable #(Thread/sleep 100)))
181 | (catch Exception _))
182 | (Thread/sleep 1000)
183 | (is (= num-threads (real-workers-count test-ex)))
184 | (finally (.shutdown test-ex)
185 | (.shutdown sch-ex)))))
186 |
--------------------------------------------------------------------------------
/test/clojure/dirigiste/pool_test.clj:
--------------------------------------------------------------------------------
1 | (ns dirigiste.pool-test
2 | (:require
3 | [clojure.test :refer :all])
4 | (:import
5 | [java.util.concurrent
6 | TimeUnit]
7 | [io.aleph.dirigiste
8 | Pools
9 | Pool
10 | IPool$Generator
11 | IPool$Controller]))
12 |
13 | (defn generator [disposed]
14 | (let [cnt (atom 0)]
15 | (reify IPool$Generator
16 | (generate [_ k]
17 | (swap! cnt inc))
18 | (destroy [_ k v]
19 | (swap! disposed conj [k v])))))
20 |
21 | (defn controller [f max-objects]
22 | (reify IPool$Controller
23 | (shouldIncrement [_ key objects-for-key total-objects]
24 | (< total-objects max-objects))
25 | (adjustment [_ key->stats]
26 | (f key->stats))))
27 |
28 | (defn pool
29 | ([generator controller]
30 | (pool generator controller 1e5))
31 | ([generator controller max-queue-size]
32 | (Pool. generator controller max-queue-size 25 1e4 TimeUnit/MILLISECONDS)))
33 |
34 | (deftest test-basic-pool-ops
35 | (let [disposed (atom #{})
36 | p (pool (generator disposed) (controller (constantly {}) 3))]
37 | (try
38 | (is (= 1 (.acquire p :foo)))
39 | (is (= 2 (.acquire p :bar)))
40 | (.release p :foo 1)
41 | (is (= 1 (.acquire p :foo)))
42 | (is (= 3 (.acquire p :foo)))
43 | (.release p :bar 2)
44 | (.release p :foo 1)
45 | (is (= 2 (.acquire p :bar)))
46 | (.dispose p :bar 2)
47 | (.dispose p :foo 1)
48 | (.dispose p :foo 3)
49 | (is (= #{[:foo 1] [:bar 2] [:foo 3]} @disposed))
50 | (finally
51 | (.shutdown p)))))
52 |
53 | (defn adjustment-stats [f]
54 | (let [stats (promise)
55 | p (pool
56 | (generator (atom #{}))
57 | (controller #(do (deliver stats %) {}) 1e4))]
58 | (try
59 | (f p)
60 | @stats
61 | (finally
62 | (.shutdown p)))))
63 |
64 | (defn simple-generator [generate-fn]
65 | (reify IPool$Generator
66 | (generate [_ k]
67 | (generate-fn k))
68 | (destroy [_ k v])))
69 |
70 | ;; Test for: https://github.com/ztellman/dirigiste/issues/7
71 | (deftest test-acquire-error-after-throw-in-addobject
72 | (let [p (pool
73 | (simple-generator (fn [k] (if (= :fail k)
74 | (throw (Exception. "Failed"))
75 | k)))
76 | (Pools/fixedController 1 1)
77 | 1)]
78 | (is (= :ok (.acquire p :ok)))
79 | (.dispose p :ok :ok)
80 |
81 | (is (thrown-with-msg? Exception #"Failed"
82 | (.acquire p :fail)))
83 | (is (thrown-with-msg? Exception #"Failed"
84 | (.acquire p :fail)))))
85 |
86 | (deftest test-adjustment
87 | (let [stats (:foo
88 | (adjustment-stats
89 | (fn [p]
90 | (dotimes [_ 1e2]
91 | (dotimes [i 10]
92 | (let [x (.acquire p :foo)]
93 | (Thread/sleep i)
94 | (.release p :foo x)))))))]
95 | (is (< 0 (.getTaskLatency stats 0.1) 3e6))
96 | (is (< 4e6 (.getTaskLatency stats 0.5) 7e6))
97 | (is (< 9e6 (.getTaskLatency stats 0.9) 12e6))))
98 |
--------------------------------------------------------------------------------
/test/java/io/aleph/dirigiste/ExecutorTest.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import static org.junit.Assert.assertFalse;
4 | import static org.junit.Assert.assertTrue;
5 | import org.junit.Test;
6 |
7 | import java.util.concurrent.TimeUnit;
8 |
9 | public class ExecutorTest {
10 | @Test
11 | public void testAwaitTerminationShouldReturnFalse() throws InterruptedException {
12 | Executor executor = Executors.fixedExecutor(1);
13 | executor.executeWithoutRejection(() -> {});
14 | executor.shutdown();
15 | boolean result = executor.awaitTermination(500, TimeUnit.MICROSECONDS);
16 | assertFalse(result);
17 | }
18 |
19 | @Test
20 | public void testAwaitTerminationShouldReturnTrue() throws InterruptedException {
21 | Executor executor = Executors.fixedExecutor(1);
22 | executor.executeWithoutRejection(() -> {});
23 | executor.shutdown();
24 | boolean result = executor.awaitTermination(1200, TimeUnit.MICROSECONDS);
25 | assertTrue(result);
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/test/java/io/aleph/dirigiste/PoolTest.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import io.aleph.dirigiste.IPool.Controller;
4 | import io.aleph.dirigiste.IPool.Generator;
5 | import static java.util.stream.Collectors.toMap;
6 | import static org.junit.Assert.assertEquals;
7 | import static org.junit.Assert.assertNull;
8 | import static org.junit.Assert.assertThrows;
9 | import static org.junit.Assert.assertTrue;
10 | import org.junit.Test;
11 |
12 | import java.util.List;
13 | import java.util.Map;
14 | import java.util.UUID;
15 | import java.util.concurrent.ExecutorService;
16 | import java.util.concurrent.Executors;
17 | import java.util.concurrent.Future;
18 | import java.util.concurrent.RejectedExecutionException;
19 | import java.util.concurrent.TimeUnit;
20 | import java.util.stream.Collectors;
21 | import java.util.stream.IntStream;
22 |
23 | public class PoolTest {
24 |
25 | private static final Key KEY = new Key("foo");
26 |
27 | @Test
28 | public void testPoolWithOneAcquire() throws InterruptedException {
29 | Pool pool = newPool();
30 | pool.acquire(KEY);
31 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
32 | assertEquals(1, pool.queue(KEY).objects.get());
33 | assertEquals(1, getUtilization(pool), 0);
34 | }
35 |
36 | @Test
37 | public void testPoolWithOneAcquireOneReleaseOneAcquire() throws InterruptedException {
38 | Pool pool = newPool();
39 | Value val = pool.acquire(KEY);
40 | pool.release(KEY, val);
41 | pool.acquire(KEY);
42 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
43 | assertEquals(1, pool.queue(KEY).objects.get());
44 | assertEquals(1, getUtilization(pool), 0);
45 | }
46 |
47 | @Test
48 | public void testPoolWithThreeAcquire() throws InterruptedException {
49 | Pool pool = newPool();
50 | pool.acquire(KEY);
51 | pool.acquire(KEY);
52 | pool.acquire(KEY);
53 | Thread.sleep(500);
54 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
55 | assertEquals(3, pool.queue(KEY).objects.get());
56 | assertEquals(1, getUtilization(pool), 0);
57 | }
58 |
59 | @Test
60 | public void testPoolWithTwoAcquireOneDispose() throws InterruptedException {
61 | Pool pool = newPool();
62 | pool.acquire(KEY);
63 | Value val = pool.acquire(KEY);
64 | pool.dispose(KEY, val);
65 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
66 | assertEquals(1, pool.queue(KEY).objects.get());
67 | assertEquals(1, getUtilization(pool), 0);
68 | }
69 |
70 | @Test
71 | public void testPoolWithOneAcquireOneDispose() throws InterruptedException {
72 | Pool pool = newPool();
73 | Value val = pool.acquire(KEY);
74 | pool.dispose(KEY, val);
75 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
76 | assertEquals(0, pool.queue(KEY).objects.get());
77 | assertEquals(0, getUtilization(pool), 0);
78 | }
79 |
80 | @Test
81 | public void testPoolWithTwoAcquireOneRelease() throws InterruptedException {
82 | Pool pool = newPool();
83 | pool.acquire(KEY);
84 | Value val = pool.acquire(KEY);
85 | pool.release(KEY, val);
86 | assertEquals(1, pool.queue(KEY).availableObjectsCount());
87 | assertEquals(2, pool.queue(KEY).objects.get());
88 | assertEquals(0.5, getUtilization(pool), 0);
89 | }
90 |
91 | @Test
92 | public void testPoolWithTwoAcquireTwoRelease() throws InterruptedException {
93 | Pool pool = newPool();
94 | Value val = pool.acquire(KEY);
95 | Value val2 = pool.acquire(KEY);
96 | pool.release(KEY, val);
97 | pool.release(KEY, val2);
98 | assertEquals(2, pool.queue(KEY).availableObjectsCount());
99 | assertEquals(2, pool.queue(KEY).objects.get());
100 | assertEquals(0, getUtilization(pool), 0);
101 | }
102 |
103 | @Test
104 | public void testPoolWithTwoAcquireOneReleaseOneDispose() throws InterruptedException {
105 | Pool pool = newPool();
106 | Value val = pool.acquire(KEY);
107 | Value val2 = pool.acquire(KEY);
108 | pool.release(KEY, val);
109 | pool.dispose(KEY, val2);
110 | assertEquals(1, pool.queue(KEY).availableObjectsCount());
111 | assertEquals(1, pool.queue(KEY).objects.get());
112 | assertEquals(0, getUtilization(pool), 0);
113 | }
114 |
115 | @Test
116 | public void testFullPoolWithOneAcquire() {
117 | Pool pool = newPool(fullController());
118 | pool.acquire(KEY, __ -> {});
119 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
120 | assertEquals(0, pool.queue(KEY).objects.get());
121 | assertEquals(2, getUtilization(pool), 0);
122 | }
123 |
124 | @Test
125 | public void testFullPoolWithTwoAcquire() throws InterruptedException {
126 | Pool pool = newPool(fullController());
127 | pool.acquire(KEY, __ -> {});
128 | pool.acquire(KEY, __ -> {});
129 | Thread.sleep(200);
130 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
131 | assertEquals(0, pool.queue(KEY).objects.get());
132 | assertEquals(3, getUtilization(pool), 0);
133 | }
134 |
135 | @Test
136 | public void testFullPoolWithThreeAcquire() throws InterruptedException {
137 | // This test is performed multiple times as it detected some race conditions
138 | // to ensure the queues are not removed while in use.
139 | for(int i=0;i<100;i++) {
140 | Pool pool = newPool(fullController());
141 | pool.acquire(KEY, __ -> {});
142 | pool.acquire(KEY, __ -> {});
143 | pool.acquire(KEY, __ -> {});
144 | Thread.sleep(50);
145 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
146 | assertEquals(0, pool.queue(KEY).objects.get());
147 | assertEquals(4, getUtilization(pool), 0);
148 | }
149 | }
150 |
151 | @Test
152 | public void testPoolWithMisbehavingGenerator() {
153 | Pool pool = newPool(noopController(), misbehavingGenerator());
154 | assertThrows(Exception.class, () -> pool.acquire(KEY));
155 | }
156 |
157 | @Test
158 | public void testPoolWithMisbehavingDestroyGenerator() throws InterruptedException {
159 | Pool pool = newPool(noopController(), misbehavingDestroyGenerator());
160 | Value val = pool.acquire(KEY);
161 | assertThrows(Exception.class, () -> pool.dispose(KEY, val));
162 | }
163 |
164 | @Test
165 | public void testPoolDisposeWhenShutdown() throws InterruptedException {
166 | Pool pool = newPool(noopController(), misbehavingDestroyGenerator());
167 | Value val = pool.acquire(KEY);
168 | pool.shutdown();
169 | assertThrows(Exception.class, () -> pool.dispose(KEY, val));
170 | }
171 |
172 | @Test
173 | public void testPoolAcquireWhenQueueFull() {
174 | Pool pool = newPool(fullController(), generator(), 1);
175 | pool.acquire(KEY, __ -> {});
176 | assertThrows(RejectedExecutionException.class, () -> pool.acquire(KEY));
177 | }
178 |
179 | @Test
180 | public void testPoolShutdown() throws InterruptedException {
181 | Pool pool = newPool(noopController());
182 | Value val = pool.acquire(KEY);
183 | pool.release(KEY, val);
184 | pool.shutdown();
185 | assertEquals(0, pool.queue(KEY).availableObjectsCount());
186 | assertEquals(0, pool.queue(KEY).objects.get());
187 | assertEquals(0, getUtilization(pool), 0);
188 | }
189 |
190 | @Test
191 | public void testPoolQueueRemovalWhenNotInUseWithRelease() throws InterruptedException {
192 | Pool pool = newPool(utilizationController());
193 | Value val = pool.acquire(KEY);
194 | pool.release(KEY, val);
195 | // Wait for the controlPeriod
196 | Thread.sleep(300);
197 | assertNull(pool._queues.get(KEY));
198 | }
199 |
200 | @Test
201 | public void testPoolQueueRemovalWhenNotInUseWithDispose() throws InterruptedException {
202 | Pool pool = newPool(utilizationController());
203 | Value val = pool.acquire(KEY);
204 | pool.dispose(KEY, val);
205 | // Wait for the controlPeriod
206 | Thread.sleep(300);
207 | assertNull(pool._queues.get(KEY));
208 | }
209 |
210 | @Test
211 | public void testPoolWithSimpleUtilizationExecutor() throws InterruptedException {
212 | Pool pool = newPool(utilizationController());
213 | Value val1 = pool.acquire(KEY);
214 | Value val2 = pool.acquire(KEY);
215 | pool.release(KEY, val1);
216 | pool.dispose(KEY, val2);
217 | // Wait for the controlPeriod
218 | Thread.sleep(300);
219 | assertNull(pool._queues.get(KEY));
220 | }
221 |
222 | @Test
223 | public void testPoolIncByOne() throws InterruptedException {
224 | Pool pool = newPool(incByOneController(), generator(), 1 ,10, 100);
225 | pool.acquire(KEY);
226 | // Let's wait a bit it has been adjusted
227 | Thread.sleep(150);
228 | assertEquals(pool._queues.get(KEY).objects.get(), 2);
229 | }
230 |
231 | @Test
232 | public void testPoolOnAHighlyConcurrentEnvironment() throws InterruptedException {
233 | ExecutorService executorService = Executors.newFixedThreadPool(30);
234 | Pool pool = newPool(utilizationController());
235 |
236 | List> futures = IntStream.range(0, 1000).mapToObj(__ -> executorService.submit(() -> {
237 | try {
238 | Value val = pool.acquire(KEY);
239 | pool.dispose(KEY, val);
240 | return true;
241 | } catch (InterruptedException e) {
242 | throw new RuntimeException(e);
243 | }
244 | })).collect(Collectors.toList());
245 |
246 | assertTrue(futures.stream().allMatch(f -> {
247 | try {
248 | return f.get();
249 | } catch (Exception e) {
250 | throw new RuntimeException(e);
251 | }
252 | }));
253 |
254 | int available = pool.queue(KEY).availableObjectsCount();
255 | int objects = pool.queue(KEY).objects.get();
256 | double utilization = getUtilization(pool);
257 |
258 | // We should have between 0 and 1000 objects whether they haven't
259 | // be destroyed yet
260 | assertTrue(available >= 0 && available <= 1000);
261 | assertTrue(objects >= 0 && objects <= 1000);
262 | // Utilization can go below 0
263 | assertTrue(utilization >= -1 && utilization <= 1);
264 |
265 | // Wait for the controlPeriod
266 | Thread.sleep(300);
267 | assertNull(pool._queues.get(KEY));
268 | }
269 |
270 | @Test
271 | public void testPoolOnASmallConcurrentEnvironment() throws InterruptedException {
272 | ExecutorService executorService = Executors.newFixedThreadPool(30);
273 | Pool pool = newPool(utilizationController());
274 |
275 | List> futures = IntStream.range(0, 5).mapToObj(__ -> executorService.submit(() -> {
276 | try {
277 | for(int i = 0;i<=100000;i++) {
278 | Value val = pool.acquire(KEY);
279 | pool.dispose(KEY, val);
280 | }
281 | return true;
282 | } catch (InterruptedException e) {
283 | throw new RuntimeException(e);
284 | }
285 | })).collect(Collectors.toList());
286 |
287 | assertTrue(futures.stream().allMatch(f -> {
288 | try {
289 | return f.get();
290 | } catch (Exception e) {
291 | throw new RuntimeException(e);
292 | }
293 | }));
294 |
295 | int available = pool.queue(KEY).availableObjectsCount();
296 | int objects = pool.queue(KEY).objects.get();
297 | double utilization = getUtilization(pool);
298 |
299 | // We should have between 0 and 5 objects whether they haven't
300 | // be destroyed yet
301 | assertTrue(available >= 0 && available <= 5);
302 | assertTrue(objects >= 0 && objects <= 5);
303 | // Utilization can go below 0
304 | assertTrue(utilization >= -1 && utilization <= 1);
305 |
306 | // Wait for the controlPeriod
307 | Thread.sleep(300);
308 | assertNull(pool._queues.get(KEY));
309 | }
310 |
311 | @Test
312 | public void testPoolAcquireReleaseMultipleTimes() {
313 | Pool pool = newPool(utilizationController());
314 | assertTrue(IntStream.range(0, 10000).mapToObj(i -> {
315 | try {
316 | Value val = pool.acquire(KEY);
317 | pool.dispose(KEY, val);
318 | return true;
319 | } catch (InterruptedException e) {
320 | throw new RuntimeException(e);
321 | }
322 | }).allMatch(v -> v));
323 | }
324 |
325 | private Pool newPool() {
326 | return newPool(noopController(), generator());
327 | }
328 |
329 | private Pool newPool(Controller controller) {
330 | return new Pool<>(generator(), controller, 65536, 1, 10, TimeUnit.MICROSECONDS);
331 | }
332 |
333 | private Pool newPool(Controller controller, Generator generator) {
334 | return new Pool<>(generator, controller, 65536, 10, 1000, TimeUnit.MICROSECONDS);
335 | }
336 |
337 | private Pool newPool(Controller controller, Generator generator, int maxQueueSize) {
338 | return new Pool<>(generator, controller, maxQueueSize, 10, 1000, TimeUnit.MICROSECONDS);
339 | }
340 |
341 | private Pool newPool(Controller controller, Generator generator, int maxQueueSize, int samplePeriod, int controlPeriod) {
342 | return new Pool<>(generator, controller, maxQueueSize, samplePeriod, controlPeriod, TimeUnit.MILLISECONDS);
343 | }
344 |
345 | private double getUtilization(Pool pool) {
346 | return pool.getUtilization(pool.queue(KEY).availableObjectsCount(), pool.queue(KEY).getQueueLength(), pool.queue(KEY).objects.get());
347 | }
348 |
349 | private Generator generator() {
350 | return new Generator() {
351 | @Override
352 | public Value generate(Key key) {
353 | return new Value(UUID.randomUUID().toString());
354 | }
355 |
356 | @Override
357 | public void destroy(Key key, Value val) {
358 | // Nothing to clean up
359 | }
360 | };
361 | }
362 |
363 | private Generator misbehavingGenerator() {
364 | return new Generator() {
365 | @Override
366 | public Value generate(Key key) {
367 | throw new RuntimeException("BOOM!");
368 | }
369 |
370 | @Override
371 | public void destroy(Key key, Value val) {
372 | throw new RuntimeException("BOOM!");
373 | }
374 | };
375 | }
376 |
377 | private Generator misbehavingDestroyGenerator() {
378 | return new Generator() {
379 | @Override
380 | public Value generate(Key key) {
381 | return new Value(UUID.randomUUID().toString());
382 | }
383 |
384 | @Override
385 | public void destroy(Key key, Value val) {
386 | throw new RuntimeException("BOOM!");
387 | }
388 | };
389 | }
390 |
391 | private Controller noopController() {
392 | return new Controller() {
393 | @Override
394 | public boolean shouldIncrement(Key key, int objectsForKey, int totalObjects) {
395 | return true;
396 | }
397 |
398 | @Override
399 | public Map adjustment(Map stats) {
400 | return stats.entrySet().stream().collect(toMap(Map.Entry::getKey, __ -> 0));
401 | }
402 | };
403 | }
404 |
405 | private Controller incByOneController() {
406 | return new Controller() {
407 | @Override
408 | public boolean shouldIncrement(Key key, int objectsForKey, int totalObjects) {
409 | return true;
410 | }
411 |
412 | @Override
413 | public Map adjustment(Map stats) {
414 | return stats.entrySet().stream().collect(toMap(Map.Entry::getKey, __ -> 1));
415 | }
416 | };
417 | }
418 |
419 | private Controller fullController() {
420 | return new Controller() {
421 | @Override
422 | public boolean shouldIncrement(Key key, int objectsForKey, int totalObjects) {
423 | return false;
424 | }
425 |
426 | @Override
427 | public Map adjustment(Map stats) {
428 | return stats.entrySet().stream().collect(toMap(Map.Entry::getKey, __ -> 0));
429 | }
430 | };
431 | }
432 |
433 | private Controller utilizationController() {
434 | return Pools.utilizationController(0.9, 8, 1024);
435 | }
436 |
437 | // An identity-based key
438 | private static class Key {
439 | public final String value;
440 |
441 | Key(String value) {
442 | this.value = value;
443 | }
444 | }
445 |
446 | // An identity-based value
447 | private static class Value {
448 | public final String value;
449 |
450 | Value(String value) {
451 | this.value = value;
452 | }
453 | }
454 | }
455 |
--------------------------------------------------------------------------------
/test/java/io/aleph/dirigiste/StatsTest.java:
--------------------------------------------------------------------------------
1 | package io.aleph.dirigiste;
2 |
3 | import io.aleph.dirigiste.Stats.UniformDoubleReservoir;
4 | import io.aleph.dirigiste.Stats.UniformDoubleReservoirMap;
5 | import io.aleph.dirigiste.Stats.UniformLongReservoir;
6 | import io.aleph.dirigiste.Stats.UniformLongReservoirMap;
7 | import static org.junit.Assert.assertArrayEquals;
8 | import static org.junit.Assert.assertEquals;
9 | import static org.junit.Assert.assertSame;
10 | import static org.junit.Assert.assertThrows;
11 | import static org.junit.Assert.assertTrue;
12 | import org.junit.Test;
13 |
14 | import java.util.EnumSet;
15 | import java.util.Map;
16 | import java.util.UUID;
17 | import java.util.concurrent.ThreadLocalRandom;
18 | import java.util.function.Function;
19 | import java.util.stream.IntStream;
20 |
21 | public class StatsTest {
22 | @Test
23 | public void testUniformLongReservoirWithReservoirSize() {
24 | UniformLongReservoir uniformLongReservoir = new UniformLongReservoir();
25 | IntStream.rangeClosed(1, Stats.RESERVOIR_SIZE).forEach(uniformLongReservoir::sample);
26 | long[] values = IntStream.range(0, Stats.RESERVOIR_SIZE)
27 | .mapToLong(uniformLongReservoir._values::get)
28 | .toArray();
29 | assertArrayEquals(values, uniformLongReservoir.toArray());
30 | }
31 |
32 | @Test
33 | public void testSampleOnUniformLongReservoirWithReservoirSizePlus1() {
34 | UniformLongReservoir uniformLongReservoir = new UniformLongReservoir();
35 | IntStream.rangeClosed(1, Stats.RESERVOIR_SIZE+1).forEach(uniformLongReservoir::sample);
36 | boolean existsSomewhere = IntStream.range(0, Stats.RESERVOIR_SIZE)
37 | .anyMatch(i -> Stats.RESERVOIR_SIZE + 1 == uniformLongReservoir._values.get(i));
38 | assertTrue(existsSomewhere);
39 | assertEquals(Stats.RESERVOIR_SIZE+1, uniformLongReservoir.toArray()[Stats.RESERVOIR_SIZE-1], 0);
40 | }
41 |
42 | @Test
43 | public void testUniformDoubleReservoirWithReservoirSize() {
44 | UniformDoubleReservoir uniformDoubleReservoir = new UniformDoubleReservoir();
45 | IntStream.rangeClosed(1, Stats.RESERVOIR_SIZE).forEach(uniformDoubleReservoir::sample);
46 | double[] values = IntStream.range(0, Stats.RESERVOIR_SIZE)
47 | .mapToDouble(i -> Double.longBitsToDouble(uniformDoubleReservoir._values.get(i)))
48 | .toArray();
49 | assertArrayEquals(values, uniformDoubleReservoir.toArray(),0.0);
50 | }
51 |
52 | @Test
53 | public void testUniformDoubleReservoirWithReservoirSizePlus1() {
54 | UniformDoubleReservoir uniformDoubleReservoir = new UniformDoubleReservoir();
55 | IntStream.rangeClosed(1, Stats.RESERVOIR_SIZE+1).forEach(uniformDoubleReservoir::sample);
56 | boolean existsSomewhere = IntStream.range(0, Stats.RESERVOIR_SIZE)
57 | .anyMatch(i -> Stats.RESERVOIR_SIZE + 1 == Double.longBitsToDouble(uniformDoubleReservoir._values.get(i)));
58 | assertTrue(existsSomewhere);
59 | assertEquals(Stats.RESERVOIR_SIZE+1, uniformDoubleReservoir.toArray()[Stats.RESERVOIR_SIZE-1], 0);
60 | }
61 |
62 | @Test
63 | public void testUniformLongReservoirMap() {
64 | UniformLongReservoirMap uniformLongReservoirMap = new UniformLongReservoirMap<>();
65 | IntStream.range(0, 20).forEach(i -> uniformLongReservoirMap.sample(new Key(UUID.randomUUID().toString()), ThreadLocalRandom.current().nextInt(100)));
66 | Map reservoirs = uniformLongReservoirMap._reservoirs;
67 | assertEquals(20, reservoirs.size());
68 | assertEquals(20, uniformLongReservoirMap.toMap().size());
69 | assertEquals(20, reservoirs.size());
70 | reservoirs.keySet().forEach(k -> assertSame(reservoirs.get(k), uniformLongReservoirMap._reservoirs.get(k)));
71 | uniformLongReservoirMap._reservoirs.keySet().forEach(uniformLongReservoirMap::remove);
72 | assertTrue(uniformLongReservoirMap._reservoirs.isEmpty());
73 | }
74 |
75 | @Test
76 | public void testUniformDoubleReservoirMap() {
77 | UniformDoubleReservoirMap uniformDoubleReservoirMap = new UniformDoubleReservoirMap<>();
78 | IntStream.range(0, 20).forEach(i -> uniformDoubleReservoirMap.sample(new Key(UUID.randomUUID().toString()), ThreadLocalRandom.current().nextInt(100)));
79 | Map reservoirs = uniformDoubleReservoirMap._reservoirs;
80 | assertEquals(20, reservoirs.size());
81 | uniformDoubleReservoirMap.remove(uniformDoubleReservoirMap._reservoirs.keySet().iterator().next());
82 | assertEquals(19, uniformDoubleReservoirMap.toMap().size());
83 | assertEquals(0, reservoirs.size());
84 | assertTrue(uniformDoubleReservoirMap._reservoirs.isEmpty());
85 | }
86 |
87 | @Test
88 | public void testGetMetrics() {
89 | Stats stats = new Stats(EnumSet.of(Stats.Metric.UTILIZATION), 5, new double[] {}, new double[] {}, new double[] {},
90 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
91 | assertEquals(EnumSet.of(Stats.Metric.UTILIZATION), stats.getMetrics());
92 | }
93 |
94 | @Test
95 | public void testGetNumWorkers() {
96 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 5, new double[] {}, new double[] {}, new double[] {},
97 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
98 | assertEquals(5, stats.getNumWorkers());
99 | }
100 |
101 | @Test
102 | public void testGetUtilizationWithEmptyStats() {
103 | assertEquals(0, Stats.EMPTY.getUtilization(1), 0);
104 | }
105 |
106 | @Test
107 | public void testGetUtilizationWithNullStat() {
108 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, null, new double[] {}, new double[] {},
109 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
110 | lerpBehaviorWithNullStat(stats::getUtilization);
111 | }
112 |
113 | @Test
114 | public void testGetUtilizationWithSingleStat() {
115 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {5}, new double[] {}, new double[] {},
116 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
117 | lerpBehaviorWithSingleStat(stats::getUtilization);
118 | }
119 |
120 | @Test
121 | public void testGetUtilizationWithSomeStats() {
122 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {0, 0, 1, 2, 4, 5}, new double[] {}, new double[] {},
123 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
124 | lerpBehaviorWithSomeStats(stats::getUtilization);
125 | }
126 |
127 | @Test
128 | public void testGetMeanUtilizationWithEmptyStats() {
129 | assertEquals(0, Stats.EMPTY.getMeanUtilization(), 0);
130 | }
131 |
132 | @Test
133 | public void testGetMeanUtilizationWithNullStat() {
134 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, null, new double[] {}, new double[] {},
135 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
136 | assertEquals(0, stats.getMeanUtilization(), 0);
137 | }
138 |
139 | @Test
140 | public void testGetMeanUtilizationWithSingleStat() {
141 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {5}, new double[] {}, new double[] {},
142 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
143 | assertEquals(5, stats.getMeanUtilization(), 0);
144 | }
145 |
146 | @Test
147 | public void testGetMeanUtilizationWithSomeStats() {
148 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {0, 0, 1, 2, 4, 5}, new double[] {}, new double[] {},
149 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
150 | assertEquals(2, stats.getMeanUtilization(), 0);
151 | }
152 |
153 | @Test
154 | public void testGetTaskArrivalRateWithEmptyStats() {
155 | assertEquals(0, Stats.EMPTY.getTaskArrivalRate(1), 0);
156 | }
157 |
158 | @Test
159 | public void testGetTaskArrivalRateWithNullStat() {
160 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, null, new double[] {},
161 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
162 | lerpBehaviorWithNullStat(stats::getTaskArrivalRate);
163 | }
164 |
165 | @Test
166 | public void testGetTaskArrivalRateWithSingleStat() {
167 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {5}, new double[] {},
168 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
169 | lerpBehaviorWithSingleStat(stats::getTaskArrivalRate);
170 | }
171 |
172 | @Test
173 | public void testGetMeanTaskArrivalRateWithEmptyStats() {
174 | assertEquals(0, Stats.EMPTY.getMeanTaskArrivalRate(), 0);
175 | }
176 |
177 | @Test
178 | public void testGetMeanTaskArrivalRateWithNullStat() {
179 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, null, new double[] {},
180 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
181 | assertEquals(0, stats.getMeanTaskArrivalRate(), 0);
182 | }
183 |
184 | @Test
185 | public void testGetMeanTaskArrivalRateWithSingleStat() {
186 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {5}, new double[] {},
187 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
188 | assertEquals(5, stats.getMeanTaskArrivalRate(), 0);
189 | }
190 |
191 | @Test
192 | public void testGetMeanTaskArrivalRateWithSomeStats() {
193 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {0, 0, 1, 2, 4, 5}, new double[] {},
194 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
195 | assertEquals(2, stats.getMeanTaskArrivalRate(), 0);
196 | }
197 |
198 | @Test
199 | public void testGetTaskArrivalRateWithSomeStats() {
200 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {0, 0, 1, 2, 4, 5}, new double[] {},
201 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
202 | lerpBehaviorWithSomeStats(stats::getTaskArrivalRate);
203 | }
204 |
205 | @Test
206 | public void testGetTaskCompletionRateWithEmptyStats() {
207 | assertEquals(0, Stats.EMPTY.getTaskCompletionRate(1), 0);
208 | }
209 |
210 | @Test
211 | public void testGetTaskCompletionRateWithNullStat() {
212 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, null,
213 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
214 | lerpBehaviorWithNullStat(stats::getTaskCompletionRate);
215 | }
216 |
217 | @Test
218 | public void testGetTaskCompletionRateWithSingleStat() {
219 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {5},
220 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
221 | lerpBehaviorWithSingleStat(stats::getTaskCompletionRate);
222 | }
223 |
224 | @Test
225 | public void testGetTaskCompletionRateWithSomeStats() {
226 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {0, 0, 1, 2, 4, 5},
227 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
228 | lerpBehaviorWithSomeStats(stats::getTaskCompletionRate);
229 | }
230 |
231 | @Test
232 | public void testGetMeanTaskCompletionRateWithEmptyStats() {
233 | assertEquals(0, Stats.EMPTY.getMeanTaskCompletionRate(), 0);
234 | }
235 |
236 | @Test
237 | public void testGetMeanTaskCompletionRateWithNullStat() {
238 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, null,
239 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
240 | assertEquals(0, stats.getMeanTaskCompletionRate(), 0);
241 | }
242 |
243 | @Test
244 | public void testGetMeanTaskCompletionRateWithSingleStat() {
245 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {5},
246 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
247 | assertEquals(5, stats.getMeanTaskCompletionRate(), 0);
248 | }
249 |
250 | @Test
251 | public void testGetMeanTaskCompletionRateWithSomeStats() {
252 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {0, 0, 1, 2, 4, 5},
253 | new double[] {}, new long[] {}, new long[] {}, new long[] {});
254 | assertEquals(2, stats.getMeanTaskCompletionRate(), 0);
255 | }
256 |
257 | @Test
258 | public void testGetTaskRejectionRateWithEmptyStats() {
259 | assertEquals(0, Stats.EMPTY.getTaskRejectionRate(1), 0);
260 | }
261 |
262 | @Test
263 | public void testGetTaskRejectionRateWithNullStat() {
264 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
265 | null, new long[] {}, new long[] {}, new long[] {});
266 | lerpBehaviorWithNullStat(stats::getTaskRejectionRate);
267 | }
268 |
269 | @Test
270 | public void testGetTaskRejectionRateWithSingleStat() {
271 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
272 | new double[] {5}, new long[] {}, new long[] {}, new long[] {});
273 | lerpBehaviorWithSingleStat(stats::getTaskRejectionRate);
274 | }
275 |
276 | @Test
277 | public void testGetTaskRejectionRateWithSomeStats() {
278 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
279 | new double[] {0, 0, 1, 2, 4, 5}, new long[] {}, new long[] {}, new long[] {});
280 | lerpBehaviorWithSomeStats(stats::getTaskRejectionRate);
281 | }
282 |
283 | @Test
284 | public void testGetMeanTaskRejectionRateWithEmptyStats() {
285 | assertEquals(0, Stats.EMPTY.getMeanTaskRejectionRate(), 0);
286 | }
287 |
288 | @Test
289 | public void testGetMeanTaskRejectionRateWithNullStat() {
290 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
291 | null, new long[] {}, new long[] {}, new long[] {});
292 | assertEquals(0, stats.getMeanTaskRejectionRate(), 0);
293 | }
294 |
295 | @Test
296 | public void testGetMeanTaskRejectionRateWithSingleStat() {
297 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
298 | new double[] {5}, new long[] {}, new long[] {}, new long[] {});
299 | assertEquals(5, stats.getMeanTaskRejectionRate(), 0);
300 | }
301 |
302 | @Test
303 | public void testGetMeanTaskRejectionRateWithSomeStats() {
304 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
305 | new double[] {0, 0, 1, 2, 4, 5}, new long[] {}, new long[] {}, new long[] {});
306 | assertEquals(2, stats.getMeanTaskRejectionRate(), 0);
307 | }
308 |
309 | @Test
310 | public void testGetQueueLengthWithEmptyStats() {
311 | assertEquals(0, Stats.EMPTY.getQueueLength(1), 0);
312 | }
313 |
314 | @Test
315 | public void testGetQueueLengthWithNullStat() {
316 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
317 | new double[] {}, null, new long[] {}, new long[] {});
318 | lerpBehaviorWithNullStat(stats::getQueueLength);
319 | }
320 |
321 | @Test
322 | public void testGetQueueLengthWithSingleStat() {
323 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
324 | new double[] {}, new long[] {5}, new long[] {}, new long[] {});
325 | lerpBehaviorWithSingleStat(stats::getQueueLength);
326 | }
327 |
328 | @Test
329 | public void testGetQueueLengthWithSomeStats() {
330 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
331 | new double[] {}, new long[] {0, 0, 1, 2, 4, 5}, new long[] {}, new long[] {});
332 | lerpBehaviorWithSomeStats(stats::getQueueLength);
333 | }
334 |
335 | @Test
336 | public void testGetMeanQueueLengthWithEmptyStats() {
337 | assertEquals(0, Stats.EMPTY.getMeanQueueLength(), 0);
338 | }
339 |
340 | @Test
341 | public void testGetMeanQueueLengthWithNullStat() {
342 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
343 | new double[] {}, null, new long[] {}, new long[] {});
344 | assertEquals(0, stats.getMeanQueueLength(), 0);
345 | }
346 |
347 | @Test
348 | public void testGetMeanQueueLengthWithSingleStat() {
349 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
350 | new double[] {}, new long[] {5}, new long[] {}, new long[] {});
351 | assertEquals(5, stats.getMeanQueueLength(), 0);
352 | }
353 |
354 | @Test
355 | public void testGetMeanQueueLengthWithSomeStats() {
356 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
357 | new double[] {}, new long[] {0, 0, 1, 2, 4, 5}, new long[] {}, new long[] {});
358 | assertEquals(2, stats.getMeanQueueLength(), 0);
359 | }
360 |
361 | @Test
362 | public void testGetQueueLatencyWithEmptyStats() {
363 | assertEquals(0, Stats.EMPTY.getQueueLatency(1), 0);
364 | }
365 |
366 | @Test
367 | public void testGetQueueLatencyWithNullStat() {
368 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
369 | new double[] {}, new long[] {}, null, new long[] {});
370 | lerpBehaviorWithNullStat(stats::getQueueLatency);
371 | }
372 |
373 | @Test
374 | public void testGetQueueLatencyWithSingleStat() {
375 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
376 | new double[] {}, new long[] {}, new long[] {5}, new long[] {});
377 | lerpBehaviorWithSingleStat(stats::getQueueLatency);
378 | }
379 |
380 | @Test
381 | public void testGetQueueLatencyWithSomeStats() {
382 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
383 | new double[] {}, new long[] {}, new long[] {0, 0, 1, 2, 4, 5}, new long[] {});
384 | lerpBehaviorWithSomeStats(stats::getQueueLatency);
385 | }
386 |
387 | @Test
388 | public void testGetMeanQueueLatencyWithEmptyStats() {
389 | assertEquals(0, Stats.EMPTY.getMeanQueueLatency(), 0);
390 | }
391 |
392 | @Test
393 | public void testGetMeanQueueLatencyWithNullStat() {
394 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
395 | new double[] {}, new long[] {}, null, new long[] {});
396 | assertEquals(0, stats.getMeanQueueLatency(), 0);
397 | }
398 |
399 | @Test
400 | public void testGetMeanQueueLatencyWithSingleStat() {
401 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
402 | new double[] {}, new long[] {}, new long[] {5}, new long[] {});
403 | assertEquals(5, stats.getMeanQueueLatency(), 0);
404 | }
405 |
406 | @Test
407 | public void testGetMeanQueueLatencyWithSomeStats() {
408 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
409 | new double[] {}, new long[] {}, new long[] {0, 0, 1, 2, 4, 5}, new long[] {});
410 | assertEquals(2, stats.getMeanQueueLatency(), 0);
411 | }
412 |
413 | @Test
414 | public void testGetTaskLatencyWithEmptyStats() {
415 | assertEquals(0, Stats.EMPTY.getQueueLatency(1), 0);
416 | }
417 |
418 | @Test
419 | public void testGetTaskLatencyWithNullStat() {
420 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
421 | new double[] {}, new long[] {}, new long[] {}, null);
422 | lerpBehaviorWithNullStat(stats::getTaskLatency);
423 | }
424 |
425 | @Test
426 | public void testGetTaskLatencyWithSingleStat() {
427 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
428 | new double[] {}, new long[] {}, new long[] {}, new long[] {5});
429 | lerpBehaviorWithSingleStat(stats::getTaskLatency);
430 | }
431 |
432 | @Test
433 | public void testGetTaskLatencyWithSomeStats() {
434 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
435 | new double[] {}, new long[] {}, new long[] {}, new long[] {0, 0, 1, 2, 4, 5});
436 | lerpBehaviorWithSomeStats(stats::getTaskLatency);
437 | }
438 |
439 | @Test
440 | public void testGetMeanTaskLatencyWithEmptyStats() {
441 | assertEquals(0, Stats.EMPTY.getQueueLatency(1), 0);
442 | }
443 |
444 | @Test
445 | public void testGetMeanTaskLatencyWithNullStat() {
446 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
447 | new double[] {}, new long[] {}, new long[] {}, null);
448 | assertEquals(0, stats.getMeanTaskLatency(), 0);
449 | }
450 |
451 | @Test
452 | public void testGetMeanTaskLatencyWithSingleStat() {
453 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
454 | new double[] {}, new long[] {}, new long[] {}, new long[] {5});
455 | assertEquals(5, stats.getMeanTaskLatency(), 0);
456 | }
457 |
458 | @Test
459 | public void testGetMeanTaskLatencyWithSomeStats() {
460 | Stats stats = new Stats(EnumSet.allOf(Stats.Metric.class), 0, new double[] {}, new double[] {}, new double[] {},
461 | new double[] {}, new long[] {}, new long[] {}, new long[] {0, 0, 1, 2, 4, 5});
462 | assertEquals(2, stats.getMeanTaskLatency(), 0);
463 | }
464 |
465 | private void lerpBehaviorWithNullStat(Function f) {
466 | assertEquals(0, f.apply(1.0), 0);
467 | assertEquals(0, f.apply(0.75), 0);
468 | assertEquals(0, f.apply(0.5), 0);
469 | assertEquals(0, f.apply(0.25), 0);
470 | assertEquals(0, f.apply(0.0), 0);
471 | assertEquals(0, f.apply(-1.0), 0);
472 | assertEquals(0, f.apply(2.0), 0);
473 | }
474 |
475 | private void lerpBehaviorWithSingleStat(Function f) {
476 | assertEquals(5, f.apply(1.0), 0);
477 | assertEquals(5, f.apply(0.75), 0);
478 | assertEquals(5, f.apply(0.5), 0);
479 | assertEquals(5, f.apply(0.25), 0);
480 | assertEquals(5, f.apply(0.0), 0);
481 | assertThrows(IllegalArgumentException.class, () -> f.apply(-1.0));
482 | assertThrows(IllegalArgumentException.class, () -> f.apply(2.0));
483 | }
484 |
485 | private void lerpBehaviorWithSomeStats(Function f) {
486 | assertEquals(5, f.apply(1.0), 0);
487 | assertEquals(3.5, f.apply(0.75), 0);
488 | assertEquals(1.5, f.apply(0.5), 0);
489 | assertEquals(0.25, f.apply(0.25), 0);
490 | assertEquals(0, f.apply(0.0), 0);
491 | assertThrows(IllegalArgumentException.class, () -> f.apply(-1.0));
492 | assertThrows(IllegalArgumentException.class, () -> f.apply(2.0));
493 | }
494 |
495 | // An identity-based key
496 | private static class Key {
497 | public final String value;
498 |
499 | Key(String value) {
500 | this.value = value;
501 | }
502 | }
503 | }
504 |
--------------------------------------------------------------------------------