├── .gitignore
├── .mvn
└── wrapper
│ ├── maven-wrapper.jar
│ └── maven-wrapper.properties
├── .travis.yml
├── LICENSE
├── README.md
├── mvnw
├── mvnw.cmd
├── pom.xml
└── src
├── main
├── java
│ └── com
│ │ └── spr
│ │ ├── ReactorExampleApplication.java
│ │ ├── controller
│ │ └── BlogController.java
│ │ ├── dao
│ │ ├── BlogPost.java
│ │ └── BlogRepository.java
│ │ ├── model
│ │ └── PostContent.java
│ │ ├── service
│ │ └── BlogService.java
│ │ └── setup
│ │ └── CassandraSetup.java
└── resources
│ ├── application.properties
│ └── cassandra.yaml
└── test
└── java
└── com
└── spr
└── ReactorExampleApplicationTests.java
/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | !.mvn/wrapper/maven-wrapper.jar
3 |
4 | ### STS ###
5 | .apt_generated
6 | .classpath
7 | .factorypath
8 | .project
9 | .settings
10 | .springBeans
11 |
12 | ### IntelliJ IDEA ###
13 | .idea
14 | *.iws
15 | *.iml
16 | *.ipr
17 |
18 | ### NetBeans ###
19 | nbproject/private/
20 | build/
21 | nbbuild/
22 | dist/
23 | nbdist/
24 | .nb-gradle/
--------------------------------------------------------------------------------
/.mvn/wrapper/maven-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jvz/reactor-spring5-example/a5a820cfc73aa4c220ba96376188c117272c56b1/.mvn/wrapper/maven-wrapper.jar
--------------------------------------------------------------------------------
/.mvn/wrapper/maven-wrapper.properties:
--------------------------------------------------------------------------------
1 | distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.3.9/apache-maven-3.3.9-bin.zip
2 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: java
2 | jdk:
3 | - oraclejdk8
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | This repository contains sample code on implementing a simple blog microservice
2 | using [Spring 5][spring], [Reactor][reactor], and [Cassandra][cassandra].
3 | As of writing, this uses Spring Boot 2.0.0-SNAPSHOT as Spring 5 and Spring Boot 2
4 | have not been released yet.
5 |
6 | The code is explained on [my blog post about Reactor][blog].
7 |
8 | To run the project, execute:
9 |
10 | mvn spring-boot:run
11 |
12 | You can also run the standard
13 |
14 | mvn package
15 |
16 | and then execute the created jar file as per usual with a Spring Boot app.
17 |
18 | [spring]: http://projects.spring.io/spring-framework/
19 | [reactor]: https://projectreactor.io/
20 | [cassandra]: https://cassandra.apache.org/
21 | [blog]: http://musigma.org/java/2016/11/21/reactor.html
22 |
--------------------------------------------------------------------------------
/mvnw:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # ----------------------------------------------------------------------------
3 | # Licensed to the Apache Software Foundation (ASF) under one
4 | # or more contributor license agreements. See the NOTICE file
5 | # distributed with this work for additional information
6 | # regarding copyright ownership. The ASF licenses this file
7 | # to you under the Apache License, Version 2.0 (the
8 | # "License"); you may not use this file except in compliance
9 | # with the License. You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing,
14 | # software distributed under the License is distributed on an
15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 | # KIND, either express or implied. See the License for the
17 | # specific language governing permissions and limitations
18 | # under the License.
19 | # ----------------------------------------------------------------------------
20 |
21 | # ----------------------------------------------------------------------------
22 | # Maven2 Start Up Batch script
23 | #
24 | # Required ENV vars:
25 | # ------------------
26 | # JAVA_HOME - location of a JDK home dir
27 | #
28 | # Optional ENV vars
29 | # -----------------
30 | # M2_HOME - location of maven2's installed home dir
31 | # MAVEN_OPTS - parameters passed to the Java VM when running Maven
32 | # e.g. to debug Maven itself, use
33 | # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
34 | # MAVEN_SKIP_RC - flag to disable loading of mavenrc files
35 | # ----------------------------------------------------------------------------
36 |
37 | if [ -z "$MAVEN_SKIP_RC" ] ; then
38 |
39 | if [ -f /etc/mavenrc ] ; then
40 | . /etc/mavenrc
41 | fi
42 |
43 | if [ -f "$HOME/.mavenrc" ] ; then
44 | . "$HOME/.mavenrc"
45 | fi
46 |
47 | fi
48 |
49 | # OS specific support. $var _must_ be set to either true or false.
50 | cygwin=false;
51 | darwin=false;
52 | mingw=false
53 | case "`uname`" in
54 | CYGWIN*) cygwin=true ;;
55 | MINGW*) mingw=true;;
56 | Darwin*) darwin=true
57 | #
58 | # Look for the Apple JDKs first to preserve the existing behaviour, and then look
59 | # for the new JDKs provided by Oracle.
60 | #
61 | if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
62 | #
63 | # Apple JDKs
64 | #
65 | export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
66 | fi
67 |
68 | if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
69 | #
70 | # Apple JDKs
71 | #
72 | export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
73 | fi
74 |
75 | if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
76 | #
77 | # Oracle JDKs
78 | #
79 | export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
80 | fi
81 |
82 | if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
83 | #
84 | # Apple JDKs
85 | #
86 | export JAVA_HOME=`/usr/libexec/java_home`
87 | fi
88 | ;;
89 | esac
90 |
91 | if [ -z "$JAVA_HOME" ] ; then
92 | if [ -r /etc/gentoo-release ] ; then
93 | JAVA_HOME=`java-config --jre-home`
94 | fi
95 | fi
96 |
97 | if [ -z "$M2_HOME" ] ; then
98 | ## resolve links - $0 may be a link to maven's home
99 | PRG="$0"
100 |
101 | # need this for relative symlinks
102 | while [ -h "$PRG" ] ; do
103 | ls=`ls -ld "$PRG"`
104 | link=`expr "$ls" : '.*-> \(.*\)$'`
105 | if expr "$link" : '/.*' > /dev/null; then
106 | PRG="$link"
107 | else
108 | PRG="`dirname "$PRG"`/$link"
109 | fi
110 | done
111 |
112 | saveddir=`pwd`
113 |
114 | M2_HOME=`dirname "$PRG"`/..
115 |
116 | # make it fully qualified
117 | M2_HOME=`cd "$M2_HOME" && pwd`
118 |
119 | cd "$saveddir"
120 | # echo Using m2 at $M2_HOME
121 | fi
122 |
123 | # For Cygwin, ensure paths are in UNIX format before anything is touched
124 | if $cygwin ; then
125 | [ -n "$M2_HOME" ] &&
126 | M2_HOME=`cygpath --unix "$M2_HOME"`
127 | [ -n "$JAVA_HOME" ] &&
128 | JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
129 | [ -n "$CLASSPATH" ] &&
130 | CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
131 | fi
132 |
133 | # For Migwn, ensure paths are in UNIX format before anything is touched
134 | if $mingw ; then
135 | [ -n "$M2_HOME" ] &&
136 | M2_HOME="`(cd "$M2_HOME"; pwd)`"
137 | [ -n "$JAVA_HOME" ] &&
138 | JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
139 | # TODO classpath?
140 | fi
141 |
142 | if [ -z "$JAVA_HOME" ]; then
143 | javaExecutable="`which javac`"
144 | if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
145 | # readlink(1) is not available as standard on Solaris 10.
146 | readLink=`which readlink`
147 | if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
148 | if $darwin ; then
149 | javaHome="`dirname \"$javaExecutable\"`"
150 | javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
151 | else
152 | javaExecutable="`readlink -f \"$javaExecutable\"`"
153 | fi
154 | javaHome="`dirname \"$javaExecutable\"`"
155 | javaHome=`expr "$javaHome" : '\(.*\)/bin'`
156 | JAVA_HOME="$javaHome"
157 | export JAVA_HOME
158 | fi
159 | fi
160 | fi
161 |
162 | if [ -z "$JAVACMD" ] ; then
163 | if [ -n "$JAVA_HOME" ] ; then
164 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
165 | # IBM's JDK on AIX uses strange locations for the executables
166 | JAVACMD="$JAVA_HOME/jre/sh/java"
167 | else
168 | JAVACMD="$JAVA_HOME/bin/java"
169 | fi
170 | else
171 | JAVACMD="`which java`"
172 | fi
173 | fi
174 |
175 | if [ ! -x "$JAVACMD" ] ; then
176 | echo "Error: JAVA_HOME is not defined correctly." >&2
177 | echo " We cannot execute $JAVACMD" >&2
178 | exit 1
179 | fi
180 |
181 | if [ -z "$JAVA_HOME" ] ; then
182 | echo "Warning: JAVA_HOME environment variable is not set."
183 | fi
184 |
185 | CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
186 |
187 | # For Cygwin, switch paths to Windows format before running java
188 | if $cygwin; then
189 | [ -n "$M2_HOME" ] &&
190 | M2_HOME=`cygpath --path --windows "$M2_HOME"`
191 | [ -n "$JAVA_HOME" ] &&
192 | JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
193 | [ -n "$CLASSPATH" ] &&
194 | CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
195 | fi
196 |
197 | # traverses directory structure from process work directory to filesystem root
198 | # first directory with .mvn subdirectory is considered project base directory
199 | find_maven_basedir() {
200 | local basedir=$(pwd)
201 | local wdir=$(pwd)
202 | while [ "$wdir" != '/' ] ; do
203 | if [ -d "$wdir"/.mvn ] ; then
204 | basedir=$wdir
205 | break
206 | fi
207 | wdir=$(cd "$wdir/.."; pwd)
208 | done
209 | echo "${basedir}"
210 | }
211 |
212 | # concatenates all lines of a file
213 | concat_lines() {
214 | if [ -f "$1" ]; then
215 | echo "$(tr -s '\n' ' ' < "$1")"
216 | fi
217 | }
218 |
219 | export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
220 | MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
221 |
222 | # Provide a "standardized" way to retrieve the CLI args that will
223 | # work with both Windows and non-Windows executions.
224 | MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
225 | export MAVEN_CMD_LINE_ARGS
226 |
227 | WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
228 |
229 | exec "$JAVACMD" \
230 | $MAVEN_OPTS \
231 | -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
232 | "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
233 | ${WRAPPER_LAUNCHER} "$@"
234 |
--------------------------------------------------------------------------------
/mvnw.cmd:
--------------------------------------------------------------------------------
1 | @REM ----------------------------------------------------------------------------
2 | @REM Licensed to the Apache Software Foundation (ASF) under one
3 | @REM or more contributor license agreements. See the NOTICE file
4 | @REM distributed with this work for additional information
5 | @REM regarding copyright ownership. The ASF licenses this file
6 | @REM to you under the Apache License, Version 2.0 (the
7 | @REM "License"); you may not use this file except in compliance
8 | @REM with the License. You may obtain a copy of the License at
9 | @REM
10 | @REM http://www.apache.org/licenses/LICENSE-2.0
11 | @REM
12 | @REM Unless required by applicable law or agreed to in writing,
13 | @REM software distributed under the License is distributed on an
14 | @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | @REM KIND, either express or implied. See the License for the
16 | @REM specific language governing permissions and limitations
17 | @REM under the License.
18 | @REM ----------------------------------------------------------------------------
19 |
20 | @REM ----------------------------------------------------------------------------
21 | @REM Maven2 Start Up Batch script
22 | @REM
23 | @REM Required ENV vars:
24 | @REM JAVA_HOME - location of a JDK home dir
25 | @REM
26 | @REM Optional ENV vars
27 | @REM M2_HOME - location of maven2's installed home dir
28 | @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
29 | @REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
30 | @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
31 | @REM e.g. to debug Maven itself, use
32 | @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
33 | @REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
34 | @REM ----------------------------------------------------------------------------
35 |
36 | @REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
37 | @echo off
38 | @REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
39 | @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
40 |
41 | @REM set %HOME% to equivalent of $HOME
42 | if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
43 |
44 | @REM Execute a user defined script before this one
45 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
46 | @REM check for pre script, once with legacy .bat ending and once with .cmd ending
47 | if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
48 | if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
49 | :skipRcPre
50 |
51 | @setlocal
52 |
53 | set ERROR_CODE=0
54 |
55 | @REM To isolate internal variables from possible post scripts, we use another setlocal
56 | @setlocal
57 |
58 | @REM ==== START VALIDATION ====
59 | if not "%JAVA_HOME%" == "" goto OkJHome
60 |
61 | echo.
62 | echo Error: JAVA_HOME not found in your environment. >&2
63 | echo Please set the JAVA_HOME variable in your environment to match the >&2
64 | echo location of your Java installation. >&2
65 | echo.
66 | goto error
67 |
68 | :OkJHome
69 | if exist "%JAVA_HOME%\bin\java.exe" goto init
70 |
71 | echo.
72 | echo Error: JAVA_HOME is set to an invalid directory. >&2
73 | echo JAVA_HOME = "%JAVA_HOME%" >&2
74 | echo Please set the JAVA_HOME variable in your environment to match the >&2
75 | echo location of your Java installation. >&2
76 | echo.
77 | goto error
78 |
79 | @REM ==== END VALIDATION ====
80 |
81 | :init
82 |
83 | set MAVEN_CMD_LINE_ARGS=%*
84 |
85 | @REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
86 | @REM Fallback to current working directory if not found.
87 |
88 | set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
89 | IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
90 |
91 | set EXEC_DIR=%CD%
92 | set WDIR=%EXEC_DIR%
93 | :findBaseDir
94 | IF EXIST "%WDIR%"\.mvn goto baseDirFound
95 | cd ..
96 | IF "%WDIR%"=="%CD%" goto baseDirNotFound
97 | set WDIR=%CD%
98 | goto findBaseDir
99 |
100 | :baseDirFound
101 | set MAVEN_PROJECTBASEDIR=%WDIR%
102 | cd "%EXEC_DIR%"
103 | goto endDetectBaseDir
104 |
105 | :baseDirNotFound
106 | set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
107 | cd "%EXEC_DIR%"
108 |
109 | :endDetectBaseDir
110 |
111 | IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
112 |
113 | @setlocal EnableExtensions EnableDelayedExpansion
114 | for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
115 | @endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
116 |
117 | :endReadAdditionalConfig
118 |
119 | SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
120 |
121 | set WRAPPER_JAR="".\.mvn\wrapper\maven-wrapper.jar""
122 | set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
123 |
124 | %MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CMD_LINE_ARGS%
125 | if ERRORLEVEL 1 goto error
126 | goto end
127 |
128 | :error
129 | set ERROR_CODE=1
130 |
131 | :end
132 | @endlocal & set ERROR_CODE=%ERROR_CODE%
133 |
134 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
135 | @REM check for post script, once with legacy .bat ending and once with .cmd ending
136 | if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
137 | if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
138 | :skipRcPost
139 |
140 | @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
141 | if "%MAVEN_BATCH_PAUSE%" == "on" pause
142 |
143 | if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
144 |
145 | exit /B %ERROR_CODE%
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 | 4.0.0
5 |
6 | com.spr
7 | reactor-example
8 | 0.0.1-SNAPSHOT
9 | jar
10 |
11 | reactor-example
12 | Demo project for Spring Boot
13 |
14 |
15 | org.springframework.boot
16 | spring-boot-starter-parent
17 | 2.0.0.BUILD-SNAPSHOT
18 |
19 |
20 |
21 |
22 | UTF-8
23 | UTF-8
24 | 1.8
25 |
26 |
27 |
28 |
29 | org.springframework.boot.experimental
30 | spring-boot-starter-web-reactive
31 |
32 |
33 | org.springframework.boot
34 | spring-boot-starter-data-cassandra
35 |
36 |
37 | org.springframework.boot
38 | spring-boot-starter-test
39 | test
40 |
41 |
42 | org.projectlombok
43 | lombok
44 |
45 |
46 | io.projectreactor.ipc
47 | reactor-netty
48 |
49 |
50 | org.apache.cassandra
51 | cassandra-all
52 | 3.9
53 |
54 |
55 |
56 |
57 |
58 |
59 | org.springframework.boot.experimental
60 | spring-boot-dependencies-web-reactive
61 | 0.1.0.BUILD-SNAPSHOT
62 | pom
63 | import
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 | org.springframework.boot
72 | spring-boot-maven-plugin
73 |
74 |
75 |
76 |
77 |
78 |
79 | spring-snapshots
80 | Spring Snapshots
81 | https://repo.spring.io/snapshot
82 |
83 | true
84 |
85 |
86 |
87 | spring-milestones
88 | Spring Milestones
89 | https://repo.spring.io/milestone
90 |
91 | false
92 |
93 |
94 |
95 |
96 |
97 | spring-snapshots
98 | Spring Snapshots
99 | https://repo.spring.io/snapshot
100 |
101 | true
102 |
103 |
104 |
105 | spring-milestones
106 | Spring Milestones
107 | https://repo.spring.io/milestone
108 |
109 | false
110 |
111 |
112 |
113 |
114 |
115 |
--------------------------------------------------------------------------------
/src/main/java/com/spr/ReactorExampleApplication.java:
--------------------------------------------------------------------------------
1 | package com.spr;
2 |
3 | import java.io.IOException;
4 |
5 | import com.spr.setup.CassandraSetup;
6 | import org.springframework.boot.SpringApplication;
7 | import org.springframework.boot.autoconfigure.SpringBootApplication;
8 |
9 | @SpringBootApplication
10 | public class ReactorExampleApplication {
11 |
12 | public static void main(String[] args) throws IOException {
13 | CassandraSetup.init();
14 | SpringApplication.run(ReactorExampleApplication.class, args);
15 | }
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/src/main/java/com/spr/controller/BlogController.java:
--------------------------------------------------------------------------------
1 | package com.spr.controller;
2 |
3 | import java.util.UUID;
4 |
5 | import com.spr.model.PostContent;
6 | import com.spr.service.BlogService;
7 | import lombok.AllArgsConstructor;
8 | import org.springframework.beans.factory.annotation.Autowired;
9 | import org.springframework.http.MediaType;
10 | import org.springframework.web.bind.annotation.CrossOrigin;
11 | import org.springframework.web.bind.annotation.GetMapping;
12 | import org.springframework.web.bind.annotation.PathVariable;
13 | import org.springframework.web.bind.annotation.PostMapping;
14 | import org.springframework.web.bind.annotation.PutMapping;
15 | import org.springframework.web.bind.annotation.RequestBody;
16 | import org.springframework.web.bind.annotation.RequestMapping;
17 | import org.springframework.web.bind.annotation.RestController;
18 | import reactor.core.publisher.Mono;
19 |
20 | /**
21 | * REST controller for the blog microservice.
22 | *
23 | * @author Matt Sicker
24 | */
25 | @RestController
26 | @CrossOrigin("http://localhost:8000")
27 | @RequestMapping(produces = MediaType.APPLICATION_JSON_VALUE)
28 | @AllArgsConstructor(onConstructor = @__(@Autowired))
29 | public class BlogController {
30 |
31 | private final BlogService service;
32 |
33 | @GetMapping("/api/blog/{id}")
34 | public Mono getPost(@PathVariable final UUID id) {
35 | return service.getPost(id);
36 | }
37 |
38 | @PostMapping("/api/blog")
39 | public Mono addPost(@RequestBody Mono content) {
40 | return service.addPost(content);
41 | }
42 |
43 | @PutMapping("/api/blog/{id}")
44 | public Mono updatePost(@PathVariable final UUID id, @RequestBody final Mono content) {
45 | return service.updatePost(id, content);
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/java/com/spr/dao/BlogPost.java:
--------------------------------------------------------------------------------
1 | package com.spr.dao;
2 |
3 | import java.util.UUID;
4 |
5 | import com.fasterxml.jackson.annotation.JsonCreator;
6 | import lombok.AllArgsConstructor;
7 | import lombok.NonNull;
8 | import lombok.Value;
9 | import org.springframework.data.cassandra.mapping.PrimaryKey;
10 | import org.springframework.data.cassandra.mapping.Table;
11 |
12 | /**
13 | * A blog post entity for storage and retrieval in Cassandra.
14 | *
15 | * @author Matt Sicker
16 | */
17 | @Value
18 | @AllArgsConstructor(onConstructor = @__(@JsonCreator))
19 | @Table
20 | public class BlogPost {
21 | @PrimaryKey
22 | @NonNull
23 | UUID id;
24 | @NonNull
25 | String title;
26 | @NonNull
27 | String author;
28 | @NonNull
29 | String body;
30 | }
31 |
--------------------------------------------------------------------------------
/src/main/java/com/spr/dao/BlogRepository.java:
--------------------------------------------------------------------------------
1 | package com.spr.dao;
2 |
3 | import java.util.UUID;
4 |
5 | import org.springframework.data.repository.CrudRepository;
6 |
7 | /**
8 | * Simple Spring Data repository for blog posts.
9 | *
10 | * @author Matt Sicker
11 | */
12 | public interface BlogRepository extends CrudRepository {
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/java/com/spr/model/PostContent.java:
--------------------------------------------------------------------------------
1 | package com.spr.model;
2 |
3 | import com.fasterxml.jackson.annotation.JsonCreator;
4 | import lombok.AllArgsConstructor;
5 | import lombok.Builder;
6 | import lombok.NonNull;
7 | import lombok.Value;
8 | import lombok.experimental.Wither;
9 |
10 | /**
11 | * REST API data format for blog posts.
12 | *
13 | * @author Matt Sicker
14 | */
15 | @Value
16 | @Builder
17 | @Wither
18 | @AllArgsConstructor(onConstructor = @__(@JsonCreator))
19 | public class PostContent {
20 | @NonNull
21 | String title;
22 | @NonNull
23 | String author;
24 | @NonNull
25 | String body;
26 | }
27 |
--------------------------------------------------------------------------------
/src/main/java/com/spr/service/BlogService.java:
--------------------------------------------------------------------------------
1 | package com.spr.service;
2 |
3 | import java.util.UUID;
4 |
5 | import com.spr.dao.BlogPost;
6 | import com.spr.dao.BlogRepository;
7 | import com.spr.model.PostContent;
8 | import lombok.AllArgsConstructor;
9 | import org.springframework.beans.factory.annotation.Autowired;
10 | import org.springframework.stereotype.Service;
11 | import reactor.core.publisher.Mono;
12 | import reactor.core.scheduler.Schedulers;
13 |
14 | /**
15 | * Service to manage blog posts asynchronously via reactive stream APIs.
16 | *
17 | * @author Matt Sicker
18 | */
19 | @Service
20 | @AllArgsConstructor(onConstructor = @__(@Autowired)) // may not be necessary anymore in new versions of spring?
21 | public class BlogService {
22 |
23 | private final BlogRepository repository;
24 |
25 | public Mono getPost(final UUID id) {
26 | return Mono.defer(() -> Mono.justOrEmpty(repository.findOne(id)))
27 | .subscribeOn(Schedulers.elastic())
28 | .map(post -> new PostContent(post.getTitle(), post.getAuthor(), post.getBody()));
29 | }
30 |
31 | public Mono addPost(final Mono contentMono) {
32 | return contentMono
33 | .map(content -> new BlogPost(UUID.randomUUID(), content.getTitle(), content.getAuthor(), content.getBody()))
34 | .publishOn(Schedulers.parallel())
35 | .doOnNext(repository::save)
36 | .map(BlogPost::getId);
37 | }
38 |
39 | public Mono updatePost(final UUID id, final Mono contentMono) {
40 | return contentMono
41 | .map(content -> new BlogPost(id, content.getTitle(), content.getAuthor(), content.getBody()))
42 | .publishOn(Schedulers.parallel())
43 | .doOnNext(repository::save)
44 | .then();
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/main/java/com/spr/setup/CassandraSetup.java:
--------------------------------------------------------------------------------
1 | package com.spr.setup;
2 |
3 | import java.io.IOException;
4 | import java.net.InetAddress;
5 | import java.nio.file.Files;
6 | import java.nio.file.Path;
7 | import java.util.stream.Stream;
8 |
9 | import com.datastax.driver.core.Cluster;
10 | import com.datastax.driver.core.Session;
11 | import com.spr.ReactorExampleApplication;
12 | import lombok.extern.slf4j.Slf4j;
13 | import org.apache.cassandra.config.DatabaseDescriptor;
14 | import org.apache.cassandra.io.util.FileUtils;
15 | import org.apache.cassandra.service.EmbeddedCassandraService;
16 |
17 | /**
18 | * This class provides Cassandra bootstrapping by starting an embedded instance of Cassandra and created the necessary
19 | * keyspace and table for this microservice.
20 | *
21 | * @author Matt Sicker
22 | */
23 | @Slf4j
24 | public class CassandraSetup {
25 |
26 | public static void init() throws IOException {
27 | setupCassandraDirectories();
28 | startEmbeddedCassandra();
29 | setupDatabase();
30 | }
31 |
32 | /**
33 | * Creates the base storage directories required for running a Cassandra instance.
34 | */
35 | private static void setupCassandraDirectories() throws IOException {
36 | final Path root = Files.createTempDirectory("cassandra");
37 | final Path config = root.resolve("cassandra.yaml");
38 | Files.copy(ReactorExampleApplication.class.getResourceAsStream("/cassandra.yaml"), config);
39 | System.setProperty("cassandra.config", "file:" + config.toString());
40 | System.setProperty("cassandra.storagedir", root.toString());
41 | System.setProperty("cassandra-foreground", "true");
42 | Stream.of(DatabaseDescriptor.getAllDataFileLocations())
43 | .map(root::resolve)
44 | .map(Path::toFile)
45 | .forEach(FileUtils::createDirectory);
46 | }
47 |
48 | /**
49 | * Creates and backgrounds an instance of Cassandra.
50 | */
51 | private static void startEmbeddedCassandra() {
52 | final Thread thread = new Thread(new Cassandra());
53 | thread.setDaemon(true);
54 | thread.start();
55 | }
56 |
57 | private static class Cassandra implements Runnable {
58 | private final EmbeddedCassandraService cassandra = new EmbeddedCassandraService();
59 |
60 | @Override
61 | public void run() {
62 | try {
63 | cassandra.start();
64 | } catch (final IOException e) {
65 | log.error("Could not start Cassandra", e);
66 | }
67 | }
68 | }
69 |
70 | /**
71 | * Creates the keyspace and table used in this microservice.
72 | */
73 | private static void setupDatabase() {
74 | final Cluster cluster = new Cluster.Builder().addContactPoints(InetAddress.getLoopbackAddress()).build();
75 | try (final Session session = cluster.connect()) {
76 | session.execute("CREATE KEYSPACE blogs WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 4 };");
77 | session.execute("CREATE TABLE blogs.blogpost (" +
78 | "id uuid PRIMARY KEY," +
79 | "title text," +
80 | "author text," +
81 | "body text" +
82 | ");");
83 | }
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/src/main/resources/application.properties:
--------------------------------------------------------------------------------
1 | spring.data.cassandra.keyspace-name=blogs
2 | spring.data.cassandra.cluster-name=Spring
--------------------------------------------------------------------------------
/src/main/resources/cassandra.yaml:
--------------------------------------------------------------------------------
1 | # Cassandra storage config YAML
2 |
3 | # NOTE:
4 | # See http://wiki.apache.org/cassandra/StorageConfiguration for
5 | # full explanations of configuration directives
6 | # /NOTE
7 |
8 | # The name of the cluster. This is mainly used to prevent machines in
9 | # one logical cluster from joining another.
10 | cluster_name: 'Spring'
11 |
12 | # This defines the number of tokens randomly assigned to this node on the ring
13 | # The more tokens, relative to other nodes, the larger the proportion of data
14 | # that this node will store. You probably want all nodes to have the same number
15 | # of tokens assuming they have equal hardware capability.
16 | #
17 | # If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
18 | # and will use the initial_token as described below.
19 | #
20 | # Specifying initial_token will override this setting on the node's initial start,
21 | # on subsequent starts, this setting will apply even if initial token is set.
22 | #
23 | # If you already have a cluster with 1 token per node, and wish to migrate to
24 | # multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
25 | num_tokens: 256
26 |
27 | # Triggers automatic allocation of num_tokens tokens for this node. The allocation
28 | # algorithm attempts to choose tokens in a way that optimizes replicated load over
29 | # the nodes in the datacenter for the replication strategy used by the specified
30 | # keyspace.
31 | #
32 | # The load assigned to each node will be close to proportional to its number of
33 | # vnodes.
34 | #
35 | # Only supported with the Murmur3Partitioner.
36 | # allocate_tokens_for_keyspace: KEYSPACE
37 |
38 | # initial_token allows you to specify tokens manually. While you can use # it with
39 | # vnodes (num_tokens > 1, above) -- in which case you should provide a
40 | # comma-separated list -- it's primarily used when adding nodes # to legacy clusters
41 | # that do not have vnodes enabled.
42 | # initial_token:
43 |
44 | # See http://wiki.apache.org/cassandra/HintedHandoff
45 | # May either be "true" or "false" to enable globally
46 | hinted_handoff_enabled: true
47 | # When hinted_handoff_enabled is true, a black list of data centers that will not
48 | # perform hinted handoff
49 | #hinted_handoff_disabled_datacenters:
50 | # - DC1
51 | # - DC2
52 | # this defines the maximum amount of time a dead host will have hints
53 | # generated. After it has been dead this long, new hints for it will not be
54 | # created until it has been seen alive and gone down again.
55 | max_hint_window_in_ms: 10800000 # 3 hours
56 |
57 | # Maximum throttle in KBs per second, per delivery thread. This will be
58 | # reduced proportionally to the number of nodes in the cluster. (If there
59 | # are two nodes in the cluster, each delivery thread will use the maximum
60 | # rate; if there are three, each will throttle to half of the maximum,
61 | # since we expect two nodes to be delivering hints simultaneously.)
62 | hinted_handoff_throttle_in_kb: 1024
63 |
64 | # Number of threads with which to deliver hints;
65 | # Consider increasing this number when you have multi-dc deployments, since
66 | # cross-dc handoff tends to be slower
67 | max_hints_delivery_threads: 2
68 |
69 | # Directory where Cassandra should store hints.
70 | # If not set, the default directory is $CASSANDRA_HOME/data/hints.
71 | # hints_directory: /var/lib/cassandra/hints
72 |
73 | # How often hints should be flushed from the internal buffers to disk.
74 | # Will *not* trigger fsync.
75 | hints_flush_period_in_ms: 10000
76 |
77 | # Maximum size for a single hints file, in megabytes.
78 | max_hints_file_size_in_mb: 128
79 |
80 | # Maximum throttle in KBs per second, total. This will be
81 | # reduced proportionally to the number of nodes in the cluster.
82 | batchlog_replay_throttle_in_kb: 1024
83 |
84 | # Authentication backend, implementing IAuthenticator; used to identify users
85 | # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
86 | # PasswordAuthenticator}.
87 | #
88 | # - AllowAllAuthenticator performs no checks - set it to disable authentication.
89 | # - PasswordAuthenticator relies on username/password pairs to authenticate
90 | # users. It keeps usernames and hashed passwords in system_auth.credentials table.
91 | # Please increase system_auth keyspace replication factor if you use this authenticator.
92 | # If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
93 | authenticator: AllowAllAuthenticator
94 |
95 | # Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
96 | # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
97 | # CassandraAuthorizer}.
98 | #
99 | # - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
100 | # - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
101 | # increase system_auth keyspace replication factor if you use this authorizer.
102 | authorizer: AllowAllAuthorizer
103 |
104 | # Part of the Authentication & Authorization backend, implementing IRoleManager; used
105 | # to maintain grants and memberships between roles.
106 | # Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
107 | # which stores role information in the system_auth keyspace. Most functions of the
108 | # IRoleManager require an authenticated login, so unless the configured IAuthenticator
109 | # actually implements authentication, most of this functionality will be unavailable.
110 | #
111 | # - CassandraRoleManager stores role data in the system_auth keyspace. Please
112 | # increase system_auth keyspace replication factor if you use this role manager.
113 | role_manager: CassandraRoleManager
114 |
115 | # Validity period for roles cache (fetching permissions can be an
116 | # expensive operation depending on the authorizer). Granted roles are cached for
117 | # authenticated sessions in AuthenticatedUser and after the period specified
118 | # here, become eligible for (async) reload.
119 | # Defaults to 2000, set to 0 to disable.
120 | # Will be disabled automatically for AllowAllAuthenticator.
121 | roles_validity_in_ms: 2000
122 |
123 | # Refresh interval for roles cache (if enabled).
124 | # After this interval, cache entries become eligible for refresh. Upon next
125 | # access, an async reload is scheduled and the old value returned until it
126 | # completes. If roles_validity_in_ms is non-zero, then this must be
127 | # also.
128 | # Defaults to the same value as roles_validity_in_ms.
129 | # roles_update_interval_in_ms: 1000
130 |
131 | # Validity period for permissions cache (fetching permissions can be an
132 | # expensive operation depending on the authorizer, CassandraAuthorizer is
133 | # one example). Defaults to 2000, set to 0 to disable.
134 | # Will be disabled automatically for AllowAllAuthorizer.
135 | permissions_validity_in_ms: 2000
136 |
137 | # Refresh interval for permissions cache (if enabled).
138 | # After this interval, cache entries become eligible for refresh. Upon next
139 | # access, an async reload is scheduled and the old value returned until it
140 | # completes. If permissions_validity_in_ms is non-zero, then this must be
141 | # also.
142 | # Defaults to the same value as permissions_validity_in_ms.
143 | # permissions_update_interval_in_ms: 1000
144 |
145 | # The partitioner is responsible for distributing groups of rows (by
146 | # partition key) across nodes in the cluster. You should leave this
147 | # alone for new clusters. The partitioner can NOT be changed without
148 | # reloading all data, so when upgrading you should set this to the
149 | # same partitioner you were already using.
150 | #
151 | # Besides Murmur3Partitioner, partitioners included for backwards
152 | # compatibility include RandomPartitioner, ByteOrderedPartitioner, and
153 | # OrderPreservingPartitioner.
154 | #
155 | partitioner: org.apache.cassandra.dht.Murmur3Partitioner
156 |
157 | # Directories where Cassandra should store data on disk. Cassandra
158 | # will spread data evenly across them, subject to the granularity of
159 | # the configured compaction strategy.
160 | # If not set, the default directory is $CASSANDRA_HOME/data/data.
161 | # data_file_directories:
162 | # - /var/lib/cassandra/data
163 |
164 | # commit log. when running on magnetic HDD, this should be a
165 | # separate spindle than the data directories.
166 | # If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
167 | # commitlog_directory: /var/lib/cassandra/commitlog
168 |
169 | # policy for data disk failures:
170 | # die: shut down gossip and client transports and kill the JVM for any fs errors or
171 | # single-sstable errors, so the node can be replaced.
172 | # stop_paranoid: shut down gossip and client transports even for single-sstable errors,
173 | # kill the JVM for errors during startup.
174 | # stop: shut down gossip and client transports, leaving the node effectively dead, but
175 | # can still be inspected via JMX, kill the JVM for errors during startup.
176 | # best_effort: stop using the failed disk and respond to requests based on
177 | # remaining available sstables. This means you WILL see obsolete
178 | # data at CL.ONE!
179 | # ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
180 | disk_failure_policy: stop
181 |
182 | # policy for commit disk failures:
183 | # die: shut down gossip and Thrift and kill the JVM, so the node can be replaced.
184 | # stop: shut down gossip and Thrift, leaving the node effectively dead, but
185 | # can still be inspected via JMX.
186 | # stop_commit: shutdown the commit log, letting writes collect but
187 | # continuing to service reads, as in pre-2.0.5 Cassandra
188 | # ignore: ignore fatal errors and let the batches fail
189 | commit_failure_policy: stop
190 |
191 | # Maximum size of the key cache in memory.
192 | #
193 | # Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
194 | # minimum, sometimes more. The key cache is fairly tiny for the amount of
195 | # time it saves, so it's worthwhile to use it at large numbers.
196 | # The row cache saves even more time, but must contain the entire row,
197 | # so it is extremely space-intensive. It's best to only use the
198 | # row cache if you have hot rows or static rows.
199 | #
200 | # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
201 | #
202 | # Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
203 | key_cache_size_in_mb:
204 |
205 | # Duration in seconds after which Cassandra should
206 | # save the key cache. Caches are saved to saved_caches_directory as
207 | # specified in this configuration file.
208 | #
209 | # Saved caches greatly improve cold-start speeds, and is relatively cheap in
210 | # terms of I/O for the key cache. Row cache saving is much more expensive and
211 | # has limited use.
212 | #
213 | # Default is 14400 or 4 hours.
214 | key_cache_save_period: 14400
215 |
216 | # Number of keys from the key cache to save
217 | # Disabled by default, meaning all keys are going to be saved
218 | # key_cache_keys_to_save: 100
219 |
220 | # Row cache implementation class name.
221 | # Available implementations:
222 | # org.apache.cassandra.cache.OHCProvider Fully off-heap row cache implementation (default).
223 | # org.apache.cassandra.cache.SerializingCacheProvider This is the row cache implementation availabile
224 | # in previous releases of Cassandra.
225 | # row_cache_class_name: org.apache.cassandra.cache.OHCProvider
226 |
227 | # Maximum size of the row cache in memory.
228 | # Please note that OHC cache implementation requires some additional off-heap memory to manage
229 | # the map structures and some in-flight memory during operations before/after cache entries can be
230 | # accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
231 | # Do not specify more memory that the system can afford in the worst usual situation and leave some
232 | # headroom for OS block level cache. Do never allow your system to swap.
233 | #
234 | # Default value is 0, to disable row caching.
235 | row_cache_size_in_mb: 0
236 |
237 | # Duration in seconds after which Cassandra should save the row cache.
238 | # Caches are saved to saved_caches_directory as specified in this configuration file.
239 | #
240 | # Saved caches greatly improve cold-start speeds, and is relatively cheap in
241 | # terms of I/O for the key cache. Row cache saving is much more expensive and
242 | # has limited use.
243 | #
244 | # Default is 0 to disable saving the row cache.
245 | row_cache_save_period: 0
246 |
247 | # Number of keys from the row cache to save.
248 | # Specify 0 (which is the default), meaning all keys are going to be saved
249 | # row_cache_keys_to_save: 100
250 |
251 | # Maximum size of the counter cache in memory.
252 | #
253 | # Counter cache helps to reduce counter locks' contention for hot counter cells.
254 | # In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
255 | # write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
256 | # of the lock hold, helping with hot counter cell updates, but will not allow skipping
257 | # the read entirely. Only the local (clock, count) tuple of a counter cell is kept
258 | # in memory, not the whole counter, so it's relatively cheap.
259 | #
260 | # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
261 | #
262 | # Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
263 | # NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
264 | counter_cache_size_in_mb:
265 |
266 | # Duration in seconds after which Cassandra should
267 | # save the counter cache (keys only). Caches are saved to saved_caches_directory as
268 | # specified in this configuration file.
269 | #
270 | # Default is 7200 or 2 hours.
271 | counter_cache_save_period: 7200
272 |
273 | # Number of keys from the counter cache to save
274 | # Disabled by default, meaning all keys are going to be saved
275 | # counter_cache_keys_to_save: 100
276 |
277 | # saved caches
278 | # If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
279 | # saved_caches_directory: /var/lib/cassandra/saved_caches
280 |
281 | # commitlog_sync may be either "periodic" or "batch."
282 | #
283 | # When in batch mode, Cassandra won't ack writes until the commit log
284 | # has been fsynced to disk. It will wait
285 | # commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
286 | # This window should be kept short because the writer threads will
287 | # be unable to do extra work while waiting. (You may need to increase
288 | # concurrent_writes for the same reason.)
289 | #
290 | # commitlog_sync: batch
291 | # commitlog_sync_batch_window_in_ms: 2
292 | #
293 | # the other option is "periodic" where writes may be acked immediately
294 | # and the CommitLog is simply synced every commitlog_sync_period_in_ms
295 | # milliseconds.
296 | commitlog_sync: periodic
297 | commitlog_sync_period_in_ms: 10000
298 |
299 | # The size of the individual commitlog file segments. A commitlog
300 | # segment may be archived, deleted, or recycled once all the data
301 | # in it (potentially from each columnfamily in the system) has been
302 | # flushed to sstables.
303 | #
304 | # The default size is 32, which is almost always fine, but if you are
305 | # archiving commitlog segments (see commitlog_archiving.properties),
306 | # then you probably want a finer granularity of archiving; 8 or 16 MB
307 | # is reasonable.
308 | # Max mutation size is also configurable via max_mutation_size_in_kb setting in
309 | # cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
310 | #
311 | # NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
312 | # be set to at least twice the size of max_mutation_size_in_kb / 1024
313 | #
314 | commitlog_segment_size_in_mb: 32
315 |
316 | # Compression to apply to the commit log. If omitted, the commit log
317 | # will be written uncompressed. LZ4, Snappy, and Deflate compressors
318 | # are supported.
319 | #commitlog_compression:
320 | # - class_name: LZ4Compressor
321 | # parameters:
322 | # -
323 |
324 | # any class that implements the SeedProvider interface and has a
325 | # constructor that takes a Map of parameters will do.
326 | seed_provider:
327 | # Addresses of hosts that are deemed contact points.
328 | # Cassandra nodes use this list of hosts to find each other and learn
329 | # the topology of the ring. You must change this if you are running
330 | # multiple nodes!
331 | - class_name: org.apache.cassandra.locator.SimpleSeedProvider
332 | parameters:
333 | # seeds is actually a comma-delimited list of addresses.
334 | # Ex: ",,"
335 | - seeds: "127.0.0.1"
336 |
337 | # For workloads with more data than can fit in memory, Cassandra's
338 | # bottleneck will be reads that need to fetch data from
339 | # disk. "concurrent_reads" should be set to (16 * number_of_drives) in
340 | # order to allow the operations to enqueue low enough in the stack
341 | # that the OS and drives can reorder them. Same applies to
342 | # "concurrent_counter_writes", since counter writes read the current
343 | # values before incrementing and writing them back.
344 | #
345 | # On the other hand, since writes are almost never IO bound, the ideal
346 | # number of "concurrent_writes" is dependent on the number of cores in
347 | # your system; (8 * number_of_cores) is a good rule of thumb.
348 | concurrent_reads: 32
349 | concurrent_writes: 32
350 | concurrent_counter_writes: 32
351 |
352 | # For materialized view writes, as there is a read involved, so this should
353 | # be limited by the less of concurrent reads or concurrent writes.
354 | concurrent_materialized_view_writes: 32
355 |
356 | # Maximum memory to use for pooling sstable buffers. Defaults to the smaller
357 | # of 1/4 of heap or 512MB. This pool is allocated off-heap, so is in addition
358 | # to the memory allocated for heap. Memory is only allocated as needed.
359 | # file_cache_size_in_mb: 512
360 |
361 | # Flag indicating whether to allocate on or off heap when the sstable buffer
362 | # pool is exhausted, that is when it has exceeded the maximum memory
363 | # file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
364 |
365 | # buffer_pool_use_heap_if_exhausted: true
366 |
367 | # The strategy for optimizing disk read
368 | # Possible values are:
369 | # ssd (for solid state disks, the default)
370 | # spinning (for spinning disks)
371 | # disk_optimization_strategy: ssd
372 |
373 | # Total permitted memory to use for memtables. Cassandra will stop
374 | # accepting writes when the limit is exceeded until a flush completes,
375 | # and will trigger a flush based on memtable_cleanup_threshold
376 | # If omitted, Cassandra will set both to 1/4 the size of the heap.
377 | # memtable_heap_space_in_mb: 2048
378 | # memtable_offheap_space_in_mb: 2048
379 |
380 | # Ratio of occupied non-flushing memtable size to total permitted size
381 | # that will trigger a flush of the largest memtable. Larger mct will
382 | # mean larger flushes and hence less compaction, but also less concurrent
383 | # flush activity which can make it difficult to keep your disks fed
384 | # under heavy write load.
385 | #
386 | # memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
387 | # memtable_cleanup_threshold: 0.11
388 |
389 | # Specify the way Cassandra allocates and manages memtable memory.
390 | # Options are:
391 | # heap_buffers: on heap nio buffers
392 | # offheap_buffers: off heap (direct) nio buffers
393 | memtable_allocation_type: heap_buffers
394 |
395 | # Total space to use for commit logs on disk.
396 | #
397 | # If space gets above this value, Cassandra will flush every dirty CF
398 | # in the oldest segment and remove it. So a small total commitlog space
399 | # will tend to cause more flush activity on less-active columnfamilies.
400 | #
401 | # The default value is the smaller of 8192, and 1/4 of the total space
402 | # of the commitlog volume.
403 | #
404 | # commitlog_total_space_in_mb: 8192
405 |
406 | # This sets the amount of memtable flush writer threads. These will
407 | # be blocked by disk io, and each one will hold a memtable in memory
408 | # while blocked.
409 | #
410 | # memtable_flush_writers defaults to the smaller of (number of disks,
411 | # number of cores), with a minimum of 2 and a maximum of 8.
412 | #
413 | # If your data directories are backed by SSD, you should increase this
414 | # to the number of cores.
415 | #memtable_flush_writers: 8
416 |
417 | # A fixed memory pool size in MB for for SSTable index summaries. If left
418 | # empty, this will default to 5% of the heap size. If the memory usage of
419 | # all index summaries exceeds this limit, SSTables with low read rates will
420 | # shrink their index summaries in order to meet this limit. However, this
421 | # is a best-effort process. In extreme conditions Cassandra may need to use
422 | # more than this amount of memory.
423 | index_summary_capacity_in_mb:
424 |
425 | # How frequently index summaries should be resampled. This is done
426 | # periodically to redistribute memory from the fixed-size pool to sstables
427 | # proportional their recent read rates. Setting to -1 will disable this
428 | # process, leaving existing index summaries at their current sampling level.
429 | index_summary_resize_interval_in_minutes: 60
430 |
431 | # Whether to, when doing sequential writing, fsync() at intervals in
432 | # order to force the operating system to flush the dirty
433 | # buffers. Enable this to avoid sudden dirty buffer flushing from
434 | # impacting read latencies. Almost always a good idea on SSDs; not
435 | # necessarily on platters.
436 | trickle_fsync: false
437 | trickle_fsync_interval_in_kb: 10240
438 |
439 | # TCP port, for commands and data
440 | # For security reasons, you should not expose this port to the internet. Firewall it if needed.
441 | storage_port: 7000
442 |
443 | # SSL port, for encrypted communication. Unused unless enabled in
444 | # encryption_options
445 | # For security reasons, you should not expose this port to the internet. Firewall it if needed.
446 | ssl_storage_port: 7001
447 |
448 | # Address or interface to bind to and tell other Cassandra nodes to connect to.
449 | # You _must_ change this if you want multiple nodes to be able to communicate!
450 | #
451 | # Set listen_address OR listen_interface, not both. Interfaces must correspond
452 | # to a single address, IP aliasing is not supported.
453 | #
454 | # Leaving it blank leaves it up to InetAddress.getLocalHost(). This
455 | # will always do the Right Thing _if_ the node is properly configured
456 | # (hostname, name resolution, etc), and the Right Thing is to use the
457 | # address associated with the hostname (it might not be).
458 | #
459 | # Setting listen_address to 0.0.0.0 is always wrong.
460 | #
461 | # If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
462 | # you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
463 | # address will be used. If true the first ipv6 address will be used. Defaults to false preferring
464 | # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
465 | listen_address: localhost
466 | # listen_interface: eth0
467 | # listen_interface_prefer_ipv6: false
468 |
469 | # Address to broadcast to other Cassandra nodes
470 | # Leaving this blank will set it to the same value as listen_address
471 | # broadcast_address: 1.2.3.4
472 |
473 | # Internode authentication backend, implementing IInternodeAuthenticator;
474 | # used to allow/disallow connections from peer nodes.
475 | # internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
476 |
477 | # Whether to start the native transport server.
478 | # Please note that the address on which the native transport is bound is the
479 | # same as the rpc_address. The port however is different and specified below.
480 | start_native_transport: true
481 | # port for the CQL native transport to listen for clients on
482 | # For security reasons, you should not expose this port to the internet. Firewall it if needed.
483 | native_transport_port: 9042
484 | # Enabling native transport encryption in client_encryption_options allows you to either use
485 | # encryption for the standard port or to use a dedicated, additional port along with the unencrypted
486 | # standard native_transport_port.
487 | # Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
488 | # for native_transport_port. Setting native_transport_port_ssl to a different value
489 | # from native_transport_port will use encryption for native_transport_port_ssl while
490 | # keeping native_transport_port unencrypted.
491 | # native_transport_port_ssl: 9142
492 | # The maximum threads for handling requests when the native transport is used.
493 | # This is similar to rpc_max_threads though the default differs slightly (and
494 | # there is no native_transport_min_threads, idle threads will always be stopped
495 | # after 30 seconds).
496 | # native_transport_max_threads: 128
497 | #
498 | # The maximum size of allowed frame. Frame (requests) larger than this will
499 | # be rejected as invalid. The default is 256MB.
500 | # native_transport_max_frame_size_in_mb: 256
501 |
502 | # The maximum number of concurrent client connections.
503 | # The default is -1, which means unlimited.
504 | # native_transport_max_concurrent_connections: -1
505 |
506 | # The maximum number of concurrent client connections per source ip.
507 | # The default is -1, which means unlimited.
508 | # native_transport_max_concurrent_connections_per_ip: -1
509 |
510 | # Whether to start the thrift rpc server.
511 | start_rpc: false
512 |
513 | # The address or interface to bind the Thrift RPC service and native transport
514 | # server to.
515 | #
516 | # Set rpc_address OR rpc_interface, not both. Interfaces must correspond
517 | # to a single address, IP aliasing is not supported.
518 | #
519 | # Leaving rpc_address blank has the same effect as on listen_address
520 | # (i.e. it will be based on the configured hostname of the node).
521 | #
522 | # Note that unlike listen_address, you can specify 0.0.0.0, but you must also
523 | # set broadcast_rpc_address to a value other than 0.0.0.0.
524 | #
525 | # For security reasons, you should not expose this port to the internet. Firewall it if needed.
526 | #
527 | # If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
528 | # you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
529 | # address will be used. If true the first ipv6 address will be used. Defaults to false preferring
530 | # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
531 | rpc_address: localhost
532 | # rpc_interface: eth1
533 | # rpc_interface_prefer_ipv6: false
534 |
535 | # port for Thrift to listen for clients on
536 | rpc_port: 9160
537 |
538 | # RPC address to broadcast to drivers and other Cassandra nodes. This cannot
539 | # be set to 0.0.0.0. If left blank, this will be set to the value of
540 | # rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
541 | # be set.
542 | # broadcast_rpc_address: 1.2.3.4
543 |
544 | # enable or disable keepalive on rpc/native connections
545 | rpc_keepalive: true
546 |
547 | # Cassandra provides two out-of-the-box options for the RPC Server:
548 | #
549 | # sync -> One thread per thrift connection. For a very large number of clients, memory
550 | # will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
551 | # per thread, and that will correspond to your use of virtual memory (but physical memory
552 | # may be limited depending on use of stack space).
553 | #
554 | # hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
555 | # asynchronously using a small number of threads that does not vary with the amount
556 | # of thrift clients (and thus scales well to many clients). The rpc requests are still
557 | # synchronous (one thread per active request). If hsha is selected then it is essential
558 | # that rpc_max_threads is changed from the default value of unlimited.
559 | #
560 | # The default is sync because on Windows hsha is about 30% slower. On Linux,
561 | # sync/hsha performance is about the same, with hsha of course using less memory.
562 | #
563 | # Alternatively, can provide your own RPC server by providing the fully-qualified class name
564 | # of an o.a.c.t.TServerFactory that can create an instance of it.
565 | rpc_server_type: sync
566 |
567 | # Uncomment rpc_min|max_thread to set request pool size limits.
568 | #
569 | # Regardless of your choice of RPC server (see above), the number of maximum requests in the
570 | # RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
571 | # RPC server, it also dictates the number of clients that can be connected at all).
572 | #
573 | # The default is unlimited and thus provides no protection against clients overwhelming the server. You are
574 | # encouraged to set a maximum that makes sense for you in production, but do keep in mind that
575 | # rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
576 | #
577 | # rpc_min_threads: 16
578 | # rpc_max_threads: 2048
579 |
580 | # uncomment to set socket buffer sizes on rpc connections
581 | # rpc_send_buff_size_in_bytes:
582 | # rpc_recv_buff_size_in_bytes:
583 |
584 | # Uncomment to set socket buffer size for internode communication
585 | # Note that when setting this, the buffer size is limited by net.core.wmem_max
586 | # and when not setting it it is defined by net.ipv4.tcp_wmem
587 | # See:
588 | # /proc/sys/net/core/wmem_max
589 | # /proc/sys/net/core/rmem_max
590 | # /proc/sys/net/ipv4/tcp_wmem
591 | # /proc/sys/net/ipv4/tcp_wmem
592 | # and: man tcp
593 | # internode_send_buff_size_in_bytes:
594 | # internode_recv_buff_size_in_bytes:
595 |
596 | # Frame size for thrift (maximum message length).
597 | thrift_framed_transport_size_in_mb: 15
598 |
599 | # Set to true to have Cassandra create a hard link to each sstable
600 | # flushed or streamed locally in a backups/ subdirectory of the
601 | # keyspace data. Removing these links is the operator's
602 | # responsibility.
603 | incremental_backups: false
604 |
605 | # Whether or not to take a snapshot before each compaction. Be
606 | # careful using this option, since Cassandra won't clean up the
607 | # snapshots for you. Mostly useful if you're paranoid when there
608 | # is a data format change.
609 | snapshot_before_compaction: false
610 |
611 | # Whether or not a snapshot is taken of the data before keyspace truncation
612 | # or dropping of column families. The STRONGLY advised default of true
613 | # should be used to provide data safety. If you set this flag to false, you will
614 | # lose data on truncation or drop.
615 | auto_snapshot: true
616 |
617 | # When executing a scan, within or across a partition, we need to keep the
618 | # tombstones seen in memory so we can return them to the coordinator, which
619 | # will use them to make sure other replicas also know about the deleted rows.
620 | # With workloads that generate a lot of tombstones, this can cause performance
621 | # problems and even exaust the server heap.
622 | # (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
623 | # Adjust the thresholds here if you understand the dangers and want to
624 | # scan more tombstones anyway. These thresholds may also be adjusted at runtime
625 | # using the StorageService mbean.
626 | tombstone_warn_threshold: 1000
627 | tombstone_failure_threshold: 100000
628 |
629 | # Granularity of the collation index of rows within a partition.
630 | # Increase if your rows are large, or if you have a very large
631 | # number of rows per partition. The competing goals are these:
632 | # 1) a smaller granularity means more index entries are generated
633 | # and looking up rows withing the partition by collation column
634 | # is faster
635 | # 2) but, Cassandra will keep the collation index in memory for hot
636 | # rows (as part of the key cache), so a larger granularity means
637 | # you can cache more hot rows
638 | column_index_size_in_kb: 64
639 |
640 |
641 | # Log WARN on any batch size exceeding this value. 5kb per batch by default.
642 | # Caution should be taken on increasing the size of this threshold as it can lead to node instability.
643 | batch_size_warn_threshold_in_kb: 5
644 |
645 | # Fail any batch exceeding this value. 50kb (10x warn threshold) by default.
646 | batch_size_fail_threshold_in_kb: 50
647 |
648 | # Number of simultaneous compactions to allow, NOT including
649 | # validation "compactions" for anti-entropy repair. Simultaneous
650 | # compactions can help preserve read performance in a mixed read/write
651 | # workload, by mitigating the tendency of small sstables to accumulate
652 | # during a single long running compactions. The default is usually
653 | # fine and if you experience problems with compaction running too
654 | # slowly or too fast, you should look at
655 | # compaction_throughput_mb_per_sec first.
656 | #
657 | # concurrent_compactors defaults to the smaller of (number of disks,
658 | # number of cores), with a minimum of 2 and a maximum of 8.
659 | #
660 | # If your data directories are backed by SSD, you should increase this
661 | # to the number of cores.
662 | #concurrent_compactors: 1
663 |
664 | # Throttles compaction to the given total throughput across the entire
665 | # system. The faster you insert data, the faster you need to compact in
666 | # order to keep the sstable count down, but in general, setting this to
667 | # 16 to 32 times the rate you are inserting data is more than sufficient.
668 | # Setting this to 0 disables throttling. Note that this account for all types
669 | # of compaction, including validation compaction.
670 | compaction_throughput_mb_per_sec: 16
671 |
672 | # Log a warning when compacting partitions larger than this value
673 | compaction_large_partition_warning_threshold_mb: 100
674 |
675 | # When compacting, the replacement sstable(s) can be opened before they
676 | # are completely written, and used in place of the prior sstables for
677 | # any range that has been written. This helps to smoothly transfer reads
678 | # between the sstables, reducing page cache churn and keeping hot rows hot
679 | sstable_preemptive_open_interval_in_mb: 50
680 |
681 | # Throttles all outbound streaming file transfers on this node to the
682 | # given total throughput in Mbps. This is necessary because Cassandra does
683 | # mostly sequential IO when streaming data during bootstrap or repair, which
684 | # can lead to saturating the network connection and degrading rpc performance.
685 | # When unset, the default is 200 Mbps or 25 MB/s.
686 | # stream_throughput_outbound_megabits_per_sec: 200
687 |
688 | # Throttles all streaming file transfer between the datacenters,
689 | # this setting allows users to throttle inter dc stream throughput in addition
690 | # to throttling all network stream traffic as configured with
691 | # stream_throughput_outbound_megabits_per_sec
692 | # inter_dc_stream_throughput_outbound_megabits_per_sec:
693 |
694 | # How long the coordinator should wait for read operations to complete
695 | read_request_timeout_in_ms: 5000
696 | # How long the coordinator should wait for seq or index scans to complete
697 | range_request_timeout_in_ms: 10000
698 | # How long the coordinator should wait for writes to complete
699 | write_request_timeout_in_ms: 2000
700 | # How long the coordinator should wait for counter writes to complete
701 | counter_write_request_timeout_in_ms: 5000
702 | # How long a coordinator should continue to retry a CAS operation
703 | # that contends with other proposals for the same row
704 | cas_contention_timeout_in_ms: 1000
705 | # How long the coordinator should wait for truncates to complete
706 | # (This can be much longer, because unless auto_snapshot is disabled
707 | # we need to flush first so we can snapshot before removing the data.)
708 | truncate_request_timeout_in_ms: 60000
709 | # The default timeout for other, miscellaneous operations
710 | request_timeout_in_ms: 10000
711 |
712 | # Enable operation timeout information exchange between nodes to accurately
713 | # measure request timeouts. If disabled, replicas will assume that requests
714 | # were forwarded to them instantly by the coordinator, which means that
715 | # under overload conditions we will waste that much extra time processing
716 | # already-timed-out requests.
717 | #
718 | # Warning: before enabling this property make sure to ntp is installed
719 | # and the times are synchronized between the nodes.
720 | cross_node_timeout: false
721 |
722 | # Enable socket timeout for streaming operation.
723 | # When a timeout occurs during streaming, streaming is retried from the start
724 | # of the current file. This _can_ involve re-streaming an important amount of
725 | # data, so you should avoid setting the value too low.
726 | # Default value is 3600000, which means streams timeout after an hour.
727 | # streaming_socket_timeout_in_ms: 3600000
728 |
729 | # phi value that must be reached for a host to be marked down.
730 | # most users should never need to adjust this.
731 | # phi_convict_threshold: 8
732 |
733 | # endpoint_snitch -- Set this to a class that implements
734 | # IEndpointSnitch. The snitch has two functions:
735 | # - it teaches Cassandra enough about your network topology to route
736 | # requests efficiently
737 | # - it allows Cassandra to spread replicas around your cluster to avoid
738 | # correlated failures. It does this by grouping machines into
739 | # "datacenters" and "racks." Cassandra will do its best not to have
740 | # more than one replica on the same "rack" (which may not actually
741 | # be a physical location)
742 | #
743 | # IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
744 | # YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
745 | # ARE PLACED.
746 | #
747 | # IF THE RACK A REPLICA IS PLACED IN CHANGES AFTER THE REPLICA HAS BEEN
748 | # ADDED TO A RING, THE NODE MUST BE DECOMMISSIONED AND REBOOTSTRAPPED.
749 | #
750 | # Out of the box, Cassandra provides
751 | # - SimpleSnitch:
752 | # Treats Strategy order as proximity. This can improve cache
753 | # locality when disabling read repair. Only appropriate for
754 | # single-datacenter deployments.
755 | # - GossipingPropertyFileSnitch
756 | # This should be your go-to snitch for production use. The rack
757 | # and datacenter for the local node are defined in
758 | # cassandra-rackdc.properties and propagated to other nodes via
759 | # gossip. If cassandra-topology.properties exists, it is used as a
760 | # fallback, allowing migration from the PropertyFileSnitch.
761 | # - PropertyFileSnitch:
762 | # Proximity is determined by rack and data center, which are
763 | # explicitly configured in cassandra-topology.properties.
764 | # - Ec2Snitch:
765 | # Appropriate for EC2 deployments in a single Region. Loads Region
766 | # and Availability Zone information from the EC2 API. The Region is
767 | # treated as the datacenter, and the Availability Zone as the rack.
768 | # Only private IPs are used, so this will not work across multiple
769 | # Regions.
770 | # - Ec2MultiRegionSnitch:
771 | # Uses public IPs as broadcast_address to allow cross-region
772 | # connectivity. (Thus, you should set seed addresses to the public
773 | # IP as well.) You will need to open the storage_port or
774 | # ssl_storage_port on the public IP firewall. (For intra-Region
775 | # traffic, Cassandra will switch to the private IP after
776 | # establishing a connection.)
777 | # - RackInferringSnitch:
778 | # Proximity is determined by rack and data center, which are
779 | # assumed to correspond to the 3rd and 2nd octet of each node's IP
780 | # address, respectively. Unless this happens to match your
781 | # deployment conventions, this is best used as an example of
782 | # writing a custom Snitch class and is provided in that spirit.
783 | #
784 | # You can use a custom Snitch by setting this to the full class name
785 | # of the snitch, which will be assumed to be on your classpath.
786 | endpoint_snitch: SimpleSnitch
787 |
788 | # controls how often to perform the more expensive part of host score
789 | # calculation
790 | dynamic_snitch_update_interval_in_ms: 100
791 | # controls how often to reset all host scores, allowing a bad host to
792 | # possibly recover
793 | dynamic_snitch_reset_interval_in_ms: 600000
794 | # if set greater than zero and read_repair_chance is < 1.0, this will allow
795 | # 'pinning' of replicas to hosts in order to increase cache capacity.
796 | # The badness threshold will control how much worse the pinned host has to be
797 | # before the dynamic snitch will prefer other replicas over it. This is
798 | # expressed as a double which represents a percentage. Thus, a value of
799 | # 0.2 means Cassandra would continue to prefer the static snitch values
800 | # until the pinned host was 20% worse than the fastest.
801 | dynamic_snitch_badness_threshold: 0.1
802 |
803 | # request_scheduler -- Set this to a class that implements
804 | # RequestScheduler, which will schedule incoming client requests
805 | # according to the specific policy. This is useful for multi-tenancy
806 | # with a single Cassandra cluster.
807 | # NOTE: This is specifically for requests from the client and does
808 | # not affect inter node communication.
809 | # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
810 | # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
811 | # client requests to a node with a separate queue for each
812 | # request_scheduler_id. The scheduler is further customized by
813 | # request_scheduler_options as described below.
814 | request_scheduler: org.apache.cassandra.scheduler.NoScheduler
815 |
816 | # Scheduler Options vary based on the type of scheduler
817 | # NoScheduler - Has no options
818 | # RoundRobin
819 | # - throttle_limit -- The throttle_limit is the number of in-flight
820 | # requests per client. Requests beyond
821 | # that limit are queued up until
822 | # running requests can complete.
823 | # The value of 80 here is twice the number of
824 | # concurrent_reads + concurrent_writes.
825 | # - default_weight -- default_weight is optional and allows for
826 | # overriding the default which is 1.
827 | # - weights -- Weights are optional and will default to 1 or the
828 | # overridden default_weight. The weight translates into how
829 | # many requests are handled during each turn of the
830 | # RoundRobin, based on the scheduler id.
831 | #
832 | # request_scheduler_options:
833 | # throttle_limit: 80
834 | # default_weight: 5
835 | # weights:
836 | # Keyspace1: 1
837 | # Keyspace2: 5
838 |
839 | # request_scheduler_id -- An identifier based on which to perform
840 | # the request scheduling. Currently the only valid option is keyspace.
841 | # request_scheduler_id: keyspace
842 |
843 | # Enable or disable inter-node encryption
844 | # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
845 | # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
846 | # suite for authentication, key exchange and encryption of the actual data transfers.
847 | # Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
848 | # NOTE: No custom encryption options are enabled at the moment
849 | # The available internode options are : all, none, dc, rack
850 | #
851 | # If set to dc cassandra will encrypt the traffic between the DCs
852 | # If set to rack cassandra will encrypt the traffic between the racks
853 | #
854 | # The passwords used in these options must match the passwords used when generating
855 | # the keystore and truststore. For instructions on generating these files, see:
856 | # http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
857 | #
858 | server_encryption_options:
859 | internode_encryption: none
860 | keystore: conf/.keystore
861 | keystore_password: cassandra
862 | truststore: conf/.truststore
863 | truststore_password: cassandra
864 | # More advanced defaults below:
865 | # protocol: TLS
866 | # algorithm: SunX509
867 | # store_type: JKS
868 | # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
869 | # require_client_auth: false
870 |
871 | # enable or disable client/server encryption.
872 | client_encryption_options:
873 | enabled: false
874 | # If enabled and optional is set to true encrypted and unencrypted connections are handled.
875 | optional: false
876 | keystore: conf/.keystore
877 | keystore_password: cassandra
878 | # require_client_auth: false
879 | # Set trustore and truststore_password if require_client_auth is true
880 | # truststore: conf/.truststore
881 | # truststore_password: cassandra
882 | # More advanced defaults below:
883 | # protocol: TLS
884 | # algorithm: SunX509
885 | # store_type: JKS
886 | # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
887 |
888 | # internode_compression controls whether traffic between nodes is
889 | # compressed.
890 | # can be: all - all traffic is compressed
891 | # dc - traffic between different datacenters is compressed
892 | # none - nothing is compressed.
893 | internode_compression: all
894 |
895 | # Enable or disable tcp_nodelay for inter-dc communication.
896 | # Disabling it will result in larger (but fewer) network packets being sent,
897 | # reducing overhead from the TCP protocol itself, at the cost of increasing
898 | # latency if you block for cross-datacenter responses.
899 | inter_dc_tcp_nodelay: false
900 |
901 | # TTL for different trace types used during logging of the repair process.
902 | tracetype_query_ttl: 86400
903 | tracetype_repair_ttl: 604800
904 |
905 | # GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
906 | # Adjust the threshold based on your application throughput requirement
907 | # By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
908 | gc_warn_threshold_in_ms: 1000
909 |
910 | # UDFs (user defined functions) are disabled by default.
911 | # As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
912 | enable_user_defined_functions: false
913 |
914 | # Enables scripted UDFs (JavaScript UDFs).
915 | # Java UDFs are always enabled, if enable_user_defined_functions is true.
916 | # Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
917 | # This option has no effect, if enable_user_defined_functions is false.
918 | enable_scripted_user_defined_functions: false
919 |
920 | # The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
921 | # Lowering this value on Windows can provide much tighter latency and better throughput, however
922 | # some virtualized environments may see a negative performance impact from changing this setting
923 | # below their system default. The sysinternals 'clockres' tool can confirm your system's default
924 | # setting.
925 | windows_timer_interval: 1
926 |
--------------------------------------------------------------------------------
/src/test/java/com/spr/ReactorExampleApplicationTests.java:
--------------------------------------------------------------------------------
1 | package com.spr;
2 |
3 | import java.io.IOException;
4 | import java.util.UUID;
5 |
6 | import com.spr.controller.BlogController;
7 | import com.spr.model.PostContent;
8 | import com.spr.setup.CassandraSetup;
9 | import org.junit.BeforeClass;
10 | import org.junit.Test;
11 | import org.junit.runner.RunWith;
12 | import org.springframework.beans.factory.annotation.Autowired;
13 | import org.springframework.boot.test.context.SpringBootTest;
14 | import org.springframework.test.context.junit4.SpringRunner;
15 | import reactor.core.publisher.Mono;
16 |
17 | import static org.assertj.core.api.Assertions.assertThat;
18 |
19 | @RunWith(SpringRunner.class)
20 | @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
21 | public class ReactorExampleApplicationTests {
22 |
23 | @Autowired
24 | private BlogController controller;
25 |
26 | @BeforeClass
27 | public static void setUpClass() throws IOException {
28 | CassandraSetup.init();
29 | }
30 |
31 | @Test
32 | public void sanityTests() throws Exception {
33 | final Mono nonexistent = controller.getPost(UUID.randomUUID());
34 | assertThat(nonexistent.hasElement().block()).isFalse();
35 | final UUID id = controller.addPost(
36 | Mono.just(newPostContent())).block();
37 | final Mono contentMono = controller.getPost(id);
38 | assertThat(contentMono.block()).isEqualTo(newPostContent());
39 | controller.updatePost(id, Mono.just(newPostContent().withBody("Other body"))).block();
40 | final Mono updatedMono = controller.getPost(id);
41 | assertThat(updatedMono.block()).isEqualTo(newPostContent().withBody("Other body"));
42 | }
43 |
44 | private static PostContent newPostContent() {
45 | return PostContent.builder().title("Title").author("Author").body("Body").build();
46 | }
47 | }
48 |
--------------------------------------------------------------------------------