├── .gitignore
├── LICENSE
├── README.md
├── build.sbt
├── project
├── Dependencies.scala
├── HazelcastBuild.scala
├── Settings.scala
├── ShellPrompt.scala
├── Versions.scala
├── build.properties
└── plugins.sbt
├── sbt
└── src
├── main
├── java
│ └── com
│ │ └── hazelcast
│ │ └── spark
│ │ └── connector
│ │ ├── HazelcastJavaPairRDDFunctions.java
│ │ └── HazelcastSparkContext.java
├── resources
│ └── log4j.properties
└── scala
│ └── com
│ └── hazelcast
│ └── spark
│ └── connector
│ ├── HazelcastSparkContextFunctions.scala
│ ├── conf
│ ├── ConfigurationProperties.scala
│ └── SerializableConf.scala
│ ├── iterator
│ ├── CacheIterator.scala
│ └── MapIterator.scala
│ ├── package.scala
│ ├── rdd
│ ├── HazelcastJavaRDD.scala
│ ├── HazelcastRDD.scala
│ ├── HazelcastRDDFunctions.scala
│ └── PartitionLocationInfo.scala
│ └── util
│ ├── CleanupUtil.scala
│ ├── ConnectionUtil.scala
│ └── HazelcastUtil.scala
└── test
├── java
├── ReadFromHazelcastJavaTest.java
└── WriteToHazelcastJavaTest.java
└── scala
└── com
└── hazelcast
└── spark
└── connector
├── ReadFromHazelcastTest.scala
├── ReadPerformanceTest.scala
├── WritePerformanceTest.scala
└── WriteToHazelcastTest.scala
/.gitignore:
--------------------------------------------------------------------------------
1 | .clover/
2 | .idea/*
3 | target/
4 | .project
5 | .classpath
6 | .settings/
7 | .idea/
8 | .patch
9 | .surefire-*
10 | *.iml
11 | *.ipr
12 | *.iws
13 | .DS_Store
14 | reports/
15 | .directory
16 | atlassian-ide-plugin.xml
17 | performance*.log
18 |
19 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ***
2 | > :warning: **REMARK:**
3 | >
4 | > Dear community members,
5 | >
6 | > Thanks for your interest in **hazelcast-spark**! This project has become a Hazelcast Community project.
7 | >
8 | > Hazelcast Inc. gives this project to the developers community in the hope you can benefit from it. It comes without any maintenance guarantee by the original developers but their goodwill (and time!). We encourage you to use this project however you see fit, including any type of contribution in the form of a pull request or an issue.
9 | >
10 | > Feel free to visit our Slack Community for any help and feedback.
11 | ***
12 |
13 | # Table of Contents
14 |
15 | * [Spark Connector for Hazelcast](#spark-connector-for-hazelcast-beta)
16 | * [Features](#features)
17 | * [Requirements](#requirements)
18 | * [Releases](#releases)
19 | * [Stable](#stable)
20 | * [Snapshots](#snapshots)
21 | * [Configuration](#configuration)
22 | * [Properties](#properties)
23 | * [Creating the SparkContext](#creating-the-sparkcontext)
24 | * [Reading Data from Hazelcast](#reading-data-from-hazelcast)
25 | * [Writing Data to Hazelcast](#writing-data-to-hazelcast)
26 | * [Code Samples](#code-samples)
27 | * [Testing](#testing)
28 | * [Known Limitations](#known-limitations)
29 |
30 |
31 | # Spark Connector for Hazelcast
32 |
33 | Spark Connector for Hazelcast allows your Spark applications to connect to a Hazelcast cluster with the Spark RDD API.
34 |
35 | # Related Project - Hazelcast Jet
36 | Spark integration is one of several Hazelcast Big Data projects. We also offer a High Performance Stream Processing Engine, [Hazelcast Jet](https://github.com/hazelcast/hazelcast-jet).
37 |
38 | # Features
39 |
40 | - Read/Write support for Hazelcast Maps
41 | - Read/Write support for Hazelcast Caches
42 |
43 | # Requirements
44 |
45 | - Hazelcast 3.7.x or higher
46 | - Apache Spark 1.6.1
47 | - Apache Spark 2.1.0 or higher
48 |
49 | # Releases
50 |
51 | SBT (Scala Build Tool) and Maven dependencies for Spark Connector's stable and snapshot releases are shown in the following sections.
52 |
53 | ## Stable
54 |
55 | **SBT:**
56 |
57 | ```scala
58 | libraryDependencies += "com.hazelcast" % "hazelcast-spark" % "0.1"
59 | ```
60 |
61 |
62 | **Maven:**
63 |
64 | ```xml
65 |
66 | com.hazelcast
67 | hazelcast-spark
68 | 0.1
69 |
70 | ```
71 |
72 | ## Snapshots
73 |
74 | **SBT:**
75 |
76 | Add Sonatype resolver to the SBT as shown below:
77 |
78 | ```scala
79 | resolvers += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
80 | ```
81 |
82 | **Maven:**
83 |
84 | Add Sonatype repository to your `pom.xml` as shown below:
85 |
86 | ```xml
87 |
88 | sonatype-snapshots
89 | Sonatype Snapshot Repository
90 | https://oss.sonatype.org/content/repositories/snapshots
91 |
92 | false
93 |
94 |
95 | true
96 |
97 |
98 | ```
99 |
100 |
101 | # Configuration
102 |
103 | Spark Connector uses Hazelcast Client to talk with a Hazelcast Cluster. You can provide the configuration details of the client to be able to connect to a Hazelcast Cluster. If you have a complex setup, you can also provide a fully configured Hazelcast Client configuration XML to configure the Hazelcast Client.
104 |
105 | ## Properties
106 |
107 | You can set the options below for the `SparkConf` object:
108 |
109 | Property name | Description | Default value
110 | -----------------------------------------------|---------------------------------------------------|--------------------
111 | hazelcast.server.addresses | Comma separated list of Hazelcast server addresses. | 127.0.0.1:5701
112 | hazelcast.server.groupName | Group name of the Hazelcast Cluster. | dev
113 | hazelcast.server.groupPass | Group password of the Hazelcast Cluster. | dev-pass
114 | hazelcast.spark.valueBatchingEnabled | If enabled, retrieves values from Hazelcast in batches for better performance. If disabled, for each key, the connector will make a retrieve call to the cluster for retrieving the most recent value. | true
115 | hazelcast.spark.readBatchSize | Number of entries to read in for each batch. | 1000
116 | hazelcast.spark.writeBatchSize | Number of entries to write in for each batch. | 1000
117 | hazelcast.spark.clientXmlPath | Location of the Hazelcast Client XML configuration file. | N/A
118 |
119 | ## Creating the SparkContext
120 |
121 | **Scala:**
122 |
123 | ```scala
124 | val conf = new SparkConf()
125 | .set("hazelcast.server.addresses", "127.0.0.1:5701")
126 | .set("hazelcast.server.groupName", "dev")
127 | .set("hazelcast.server.groupPass", "dev-pass")
128 | .set("hazelcast.spark.valueBatchingEnabled", "true")
129 | .set("hazelcast.spark.readBatchSize", "5000")
130 | .set("hazelcast.spark.writeBatchSize", "5000")
131 |
132 | val sc = new SparkContext("spark://127.0.0.1:7077", "appname", conf)
133 | ```
134 |
135 | **Java:**
136 |
137 | ```java
138 |
139 | SparkConf conf = new SparkConf()
140 | .set("hazelcast.server.addresses", "127.0.0.1:5701")
141 | .set("hazelcast.server.groupName", "dev")
142 | .set("hazelcast.server.groupPass", "dev-pass")
143 | .set("hazelcast.spark.valueBatchingEnabled", "true")
144 | .set("hazelcast.spark.readBatchSize", "5000")
145 | .set("hazelcast.spark.writeBatchSize", "5000")
146 |
147 | JavaSparkContext jsc = new JavaSparkContext("spark://127.0.0.1:7077", "appname", conf);
148 | // wrapper to provide Hazelcast related functions to the Spark Context.
149 | HazelcastSparkContext hsc = new HazelcastSparkContext(jsc);
150 | ```
151 |
152 |
153 |
154 | # Reading Data from Hazelcast
155 |
156 | After `SparkContext` is created, you can load data stored in Hazelcast Maps and Caches into Spark as RDDs as shown below:
157 |
158 | **Scala:**
159 |
160 | ```scala
161 | import com.hazelcast.spark.connector.{toSparkContextFunctions}
162 |
163 | // read from map
164 | val rddFromMap = sc.fromHazelcastMap("map-name-to-be-loaded")
165 |
166 | // read from cache
167 | val rddFromCache = sc.fromHazelcastCache("cache-name-to-be-loaded")
168 | ```
169 |
170 | **Java:**
171 |
172 | ```java
173 | // read from map
174 | HazelcastJavaRDD rddFromMap = hsc.fromHazelcastMap("map-name-to-be-loaded")
175 |
176 | // read from cache
177 | HazelcastJavaRDD rddFromCache = hsc.fromHazelcastCache("cache-name-to-be-loaded")
178 | ```
179 |
180 | # Writing Data to Hazelcast
181 |
182 | After any computation, you can save your `PairRDD`s to Hazelcast Cluster as Maps or Caches as shown below:
183 |
184 |
185 | **Scala:**
186 |
187 | ```scala
188 | import com.hazelcast.spark.connector.{toHazelcastRDDFunctions}
189 | val rdd: RDD[(Int, Long)] = sc.parallelize(1 to 1000).zipWithIndex()
190 |
191 | // write to map
192 | rdd.saveToHazelcastMap(name);
193 |
194 | // write to cache
195 | rdd.saveToHazelcastCache(name);
196 | ```
197 |
198 | **Java:**
199 |
200 | ```java
201 | import static com.hazelcast.spark.connector.HazelcastJavaPairRDDFunctions.javaPairRddFunctions;
202 |
203 | JavaPairRDD rdd = hsc.parallelize(new ArrayList() {{
204 | add(1);
205 | add(2);
206 | add(3);
207 | }}).zipWithIndex();
208 |
209 | // write to map
210 | javaPairRddFunctions(rdd).saveToHazelcastMap(name);
211 |
212 | // write to cache
213 | javaPairRddFunctions(rdd).saveToHazelcastCache(name);
214 | ```
215 |
216 | # Code Samples
217 |
218 | You can find the code samples for Hazelcast Spark Connector at https://github.com/hazelcast/hazelcast-code-samples/tree/master/hazelcast-integration/spark.
219 |
220 |
221 | # Testing
222 |
223 | Run `./sbt clean test` command to execute the test suite.
224 |
225 | # Known Limitations
226 |
227 | If Hazelcast's data structure is modified (keys inserted or deleted) while Apache Spark is iterating over it, the RDD may encounter the same entry several times and fail to encounter other entries, even if they were present at the time of construction and untouched during the iteration. It is therefore recommended to keep the dataset stable while being read by Spark.
228 |
--------------------------------------------------------------------------------
/build.sbt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/project/Dependencies.scala:
--------------------------------------------------------------------------------
1 | import sbt._
2 | import Versions._
3 |
4 | object Dependencies {
5 | val hazelcast = "com.hazelcast" % "hazelcast" % hazelcastVersion
6 | val hazelcastClient = "com.hazelcast" % "hazelcast-client" % hazelcastVersion
7 | val spark = "org.apache.spark" %% "spark-core" % sparkVersion % "provided"
8 | val jcache = "javax.cache" % "cache-api" % jcacheVersion
9 |
10 | // Test
11 | val junit = "junit" % "junit" % junitVersion % "test"
12 | val hazelcastTest = "com.hazelcast" % "hazelcast" % hazelcastVersion % "provided" classifier "tests"
13 | val junitInterface = "com.novocode" % "junit-interface" % junitInterfaceVersion % "test->default"
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/project/HazelcastBuild.scala:
--------------------------------------------------------------------------------
1 | import sbt.Keys._
2 | import sbt._
3 |
4 | import scala.tools.nsc.Properties
5 |
6 | object HazelcastBuild extends Build {
7 |
8 | import Dependencies._
9 | import Settings._
10 | import Versions._
11 |
12 | val commonDeps = Seq(
13 | hazelcastClient,
14 | spark,
15 | jcache,
16 | junit,
17 | junitInterface
18 | )
19 | lazy val hazelcastSpark = Project(
20 | buildName,
21 | file("."),
22 | settings = buildSettings ++ Seq(
23 | libraryDependencies ++= commonDeps,
24 | publishArtifact in Test := false,
25 | crossPaths := false,
26 | publishMavenStyle := true,
27 | publishTo := {
28 | val nexus = "https://oss.sonatype.org/"
29 | if (isSnapshot.value)
30 | Some("snapshots" at nexus + "content/repositories/snapshots")
31 | else
32 | Some("releases" at nexus + "service/local/staging/deploy/maven2")
33 | },
34 | credentials += Credentials(file(sys.env.getOrElse("deployCredentials", Properties.userHome + "/.ivy2/.credentials"))),
35 | ivyXML :=
36 |
37 |
38 |
39 |
40 |
41 | )
42 | )
43 | }
--------------------------------------------------------------------------------
/project/Settings.scala:
--------------------------------------------------------------------------------
1 | import sbt.Keys._
2 | import sbt._
3 | import sbtassembly.AssemblyKeys._
4 |
5 | object Settings {
6 | val buildName = "hazelcast-spark"
7 | val buildVersion = "0.3-SNAPSHOT"
8 | val buildScalaVersion = "2.11.8"
9 |
10 | val buildSettings = Defaults.coreDefaultSettings ++ Seq(
11 | name := buildName,
12 | version := buildVersion,
13 | organization := "com.hazelcast",
14 | organizationName := "Hazelcast, Inc.",
15 | organizationHomepage := Some(new URL("http://www.hazelcast.com/")),
16 | scalaVersion := buildScalaVersion,
17 | shellPrompt := ShellPrompt.buildShellPrompt,
18 | resolvers += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots",
19 | resolvers += Resolver.mavenLocal,
20 | parallelExecution in Test := false,
21 | test in assembly := {},
22 | pomExtra := https://github.com/hazelcast/hazelcast-spark/
23 |
24 |
25 | The Apache Software License, Version 2.0
26 | http://www.apache.org/licenses/LICENSE-2.0.txt
27 | repo
28 |
29 |
30 |
31 | scm:git:git://github.com/hazelcast/hazelcast-spark.git
32 | scm:git:git@github.com:hazelcast/hazelcast-spark.git
33 | https://github.com/hazelcast/hazelcast-spark/
34 |
35 |
36 |
37 | eminn
38 | emin demirci
39 | emin@hazelcast.com
40 |
41 |
42 |
43 | Github
44 | https://github.com/hazelcast/hazelcast-spark/issues
45 |
46 | )
47 |
48 | }
49 |
--------------------------------------------------------------------------------
/project/ShellPrompt.scala:
--------------------------------------------------------------------------------
1 | import sbt._
2 |
3 | object ShellPrompt {
4 |
5 | object devnull extends ProcessLogger {
6 | def info(s: => String) {}
7 |
8 | def error(s: => String) {}
9 |
10 | def buffer[T](f: => T): T = f
11 | }
12 |
13 | def currBranch = (
14 | ("git status -sb" lines_! devnull headOption)
15 | getOrElse "-" stripPrefix "## "
16 | )
17 |
18 | val buildShellPrompt = {
19 | (state: State) => {
20 | val currProject = Project.extract(state).currentProject.id
21 | "%s:%s:%s> ".format(
22 | currProject, currBranch, Settings.buildVersion
23 | )
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/project/Versions.scala:
--------------------------------------------------------------------------------
1 | object Versions {
2 | val hazelcastVersion = "3.7"
3 | val sparkVersion = "2.1.0"
4 | val jcacheVersion = "1.0.0"
5 | val junitVersion = "4.12"
6 | val junitInterfaceVersion = "0.11"
7 | }
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version = 0.13.13
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | logLevel := Level.Warn
2 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.3")
3 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.0.1")
4 | testOptions += Tests.Argument(TestFrameworks.JUnit, "-q", "-v")
5 |
6 |
--------------------------------------------------------------------------------
/sbt:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # A more capable sbt runner, coincidentally also called sbt.
4 | # Author: Paul Phillips
5 |
6 | set -o pipefail
7 |
8 | declare -r sbt_release_version="0.13.13"
9 | declare -r sbt_unreleased_version="0.13.13"
10 |
11 | declare -r latest_212="2.12.1"
12 | declare -r latest_211="2.11.8"
13 | declare -r latest_210="2.10.6"
14 | declare -r latest_29="2.9.3"
15 | declare -r latest_28="2.8.2"
16 |
17 | declare -r buildProps="project/build.properties"
18 |
19 | declare -r sbt_launch_ivy_release_repo="http://repo.typesafe.com/typesafe/ivy-releases"
20 | declare -r sbt_launch_ivy_snapshot_repo="https://repo.scala-sbt.org/scalasbt/ivy-snapshots"
21 | declare -r sbt_launch_mvn_release_repo="http://repo.scala-sbt.org/scalasbt/maven-releases"
22 | declare -r sbt_launch_mvn_snapshot_repo="http://repo.scala-sbt.org/scalasbt/maven-snapshots"
23 |
24 | declare -r default_jvm_opts_common="-Xms512m -Xmx1536m -Xss2m"
25 | declare -r noshare_opts="-Dsbt.global.base=project/.sbtboot -Dsbt.boot.directory=project/.boot -Dsbt.ivy.home=project/.ivy"
26 |
27 | declare sbt_jar sbt_dir sbt_create sbt_version sbt_script sbt_new
28 | declare sbt_explicit_version
29 | declare verbose noshare batch trace_level
30 | declare sbt_saved_stty debugUs
31 |
32 | declare java_cmd="java"
33 | declare sbt_launch_dir="$HOME/.sbt/launchers"
34 | declare sbt_launch_repo
35 |
36 | # pull -J and -D options to give to java.
37 | declare -a java_args scalac_args sbt_commands residual_args
38 |
39 | # args to jvm/sbt via files or environment variables
40 | declare -a extra_jvm_opts extra_sbt_opts
41 |
42 | echoerr () { echo >&2 "$@"; }
43 | vlog () { [[ -n "$verbose" ]] && echoerr "$@"; }
44 | die () { echo "Aborting: $@" ; exit 1; }
45 |
46 | # restore stty settings (echo in particular)
47 | onSbtRunnerExit() {
48 | [[ -n "$sbt_saved_stty" ]] || return
49 | vlog ""
50 | vlog "restoring stty: $sbt_saved_stty"
51 | stty "$sbt_saved_stty"
52 | unset sbt_saved_stty
53 | }
54 |
55 | # save stty and trap exit, to ensure echo is re-enabled if we are interrupted.
56 | trap onSbtRunnerExit EXIT
57 | sbt_saved_stty="$(stty -g 2>/dev/null)"
58 | vlog "Saved stty: $sbt_saved_stty"
59 |
60 | # this seems to cover the bases on OSX, and someone will
61 | # have to tell me about the others.
62 | get_script_path () {
63 | local path="$1"
64 | [[ -L "$path" ]] || { echo "$path" ; return; }
65 |
66 | local target="$(readlink "$path")"
67 | if [[ "${target:0:1}" == "/" ]]; then
68 | echo "$target"
69 | else
70 | echo "${path%/*}/$target"
71 | fi
72 | }
73 |
74 | declare -r script_path="$(get_script_path "$BASH_SOURCE")"
75 | declare -r script_name="${script_path##*/}"
76 |
77 | init_default_option_file () {
78 | local overriding_var="${!1}"
79 | local default_file="$2"
80 | if [[ ! -r "$default_file" && "$overriding_var" =~ ^@(.*)$ ]]; then
81 | local envvar_file="${BASH_REMATCH[1]}"
82 | if [[ -r "$envvar_file" ]]; then
83 | default_file="$envvar_file"
84 | fi
85 | fi
86 | echo "$default_file"
87 | }
88 |
89 | declare sbt_opts_file="$(init_default_option_file SBT_OPTS .sbtopts)"
90 | declare jvm_opts_file="$(init_default_option_file JVM_OPTS .jvmopts)"
91 |
92 | build_props_sbt () {
93 | [[ -r "$buildProps" ]] && \
94 | grep '^sbt\.version' "$buildProps" | tr '=\r' ' ' | awk '{ print $2; }'
95 | }
96 |
97 | update_build_props_sbt () {
98 | local ver="$1"
99 | local old="$(build_props_sbt)"
100 |
101 | [[ -r "$buildProps" ]] && [[ "$ver" != "$old" ]] && {
102 | perl -pi -e "s/^sbt\.version\b.*\$/sbt.version=${ver}/" "$buildProps"
103 | grep -q '^sbt.version[ =]' "$buildProps" || printf "\nsbt.version=%s\n" "$ver" >> "$buildProps"
104 |
105 | vlog "!!!"
106 | vlog "!!! Updated file $buildProps setting sbt.version to: $ver"
107 | vlog "!!! Previous value was: $old"
108 | vlog "!!!"
109 | }
110 | }
111 |
112 | set_sbt_version () {
113 | sbt_version="${sbt_explicit_version:-$(build_props_sbt)}"
114 | [[ -n "$sbt_version" ]] || sbt_version=$sbt_release_version
115 | export sbt_version
116 | }
117 |
118 | url_base () {
119 | local version="$1"
120 |
121 | case "$version" in
122 | 0.7.*) echo "http://simple-build-tool.googlecode.com" ;;
123 | 0.10.* ) echo "$sbt_launch_ivy_release_repo" ;;
124 | 0.11.[12]) echo "$sbt_launch_ivy_release_repo" ;;
125 | 0.*-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]) # ie "*-yyyymmdd-hhMMss"
126 | echo "$sbt_launch_ivy_snapshot_repo" ;;
127 | 0.*) echo "$sbt_launch_ivy_release_repo" ;;
128 | *-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]) # ie "*-yyyymmdd-hhMMss"
129 | echo "$sbt_launch_mvn_snapshot_repo" ;;
130 | *) echo "$sbt_launch_mvn_release_repo" ;;
131 | esac
132 | }
133 |
134 | make_url () {
135 | local version="$1"
136 |
137 | local base="${sbt_launch_repo:-$(url_base "$version")}"
138 |
139 | case "$version" in
140 | 0.7.*) echo "$base/files/sbt-launch-0.7.7.jar" ;;
141 | 0.10.* ) echo "$base/org.scala-tools.sbt/sbt-launch/$version/sbt-launch.jar" ;;
142 | 0.11.[12]) echo "$base/org.scala-tools.sbt/sbt-launch/$version/sbt-launch.jar" ;;
143 | 0.*) echo "$base/org.scala-sbt/sbt-launch/$version/sbt-launch.jar" ;;
144 | *) echo "$base/org/scala-sbt/sbt-launch/$version/sbt-launch.jar" ;;
145 | esac
146 | }
147 |
148 | addJava () { vlog "[addJava] arg = '$1'" ; java_args+=("$1"); }
149 | addSbt () { vlog "[addSbt] arg = '$1'" ; sbt_commands+=("$1"); }
150 | addScalac () { vlog "[addScalac] arg = '$1'" ; scalac_args+=("$1"); }
151 | addResidual () { vlog "[residual] arg = '$1'" ; residual_args+=("$1"); }
152 |
153 | addResolver () { addSbt "set resolvers += $1"; }
154 | addDebugger () { addJava "-Xdebug" ; addJava "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=$1"; }
155 | setThisBuild () {
156 | vlog "[addBuild] args = '$@'"
157 | local key="$1" && shift
158 | addSbt "set $key in ThisBuild := $@"
159 | }
160 | setScalaVersion () {
161 | [[ "$1" == *"-SNAPSHOT" ]] && addResolver 'Resolver.sonatypeRepo("snapshots")'
162 | addSbt "++ $1"
163 | }
164 | setJavaHome () {
165 | java_cmd="$1/bin/java"
166 | setThisBuild javaHome "_root_.scala.Some(file(\"$1\"))"
167 | export JAVA_HOME="$1"
168 | export JDK_HOME="$1"
169 | export PATH="$JAVA_HOME/bin:$PATH"
170 | }
171 |
172 | getJavaVersion() { "$1" -version 2>&1 | grep -E -e '(java|openjdk) version' | awk '{ print $3 }' | tr -d \"; }
173 |
174 | checkJava() {
175 | # Warn if there is a Java version mismatch between PATH and JAVA_HOME/JDK_HOME
176 |
177 | [[ -n "$JAVA_HOME" && -e "$JAVA_HOME/bin/java" ]] && java="$JAVA_HOME/bin/java"
178 | [[ -n "$JDK_HOME" && -e "$JDK_HOME/lib/tools.jar" ]] && java="$JDK_HOME/bin/java"
179 |
180 | if [[ -n "$java" ]]; then
181 | pathJavaVersion=$(getJavaVersion java)
182 | homeJavaVersion=$(getJavaVersion "$java")
183 | if [[ "$pathJavaVersion" != "$homeJavaVersion" ]]; then
184 | echoerr "Warning: Java version mismatch between PATH and JAVA_HOME/JDK_HOME, sbt will use the one in PATH"
185 | echoerr " Either: fix your PATH, remove JAVA_HOME/JDK_HOME or use -java-home"
186 | echoerr " java version from PATH: $pathJavaVersion"
187 | echoerr " java version from JAVA_HOME/JDK_HOME: $homeJavaVersion"
188 | fi
189 | fi
190 | }
191 |
192 | java_version () {
193 | local version=$(getJavaVersion "$java_cmd")
194 | vlog "Detected Java version: $version"
195 | echo "${version:2:1}"
196 | }
197 |
198 | # MaxPermSize critical on pre-8 JVMs but incurs noisy warning on 8+
199 | default_jvm_opts () {
200 | local v="$(java_version)"
201 | if [[ $v -ge 8 ]]; then
202 | echo "$default_jvm_opts_common"
203 | else
204 | echo "-XX:MaxPermSize=384m $default_jvm_opts_common"
205 | fi
206 | }
207 |
208 | build_props_scala () {
209 | if [[ -r "$buildProps" ]]; then
210 | versionLine="$(grep '^build.scala.versions' "$buildProps")"
211 | versionString="${versionLine##build.scala.versions=}"
212 | echo "${versionString%% .*}"
213 | fi
214 | }
215 |
216 | execRunner () {
217 | # print the arguments one to a line, quoting any containing spaces
218 | vlog "# Executing command line:" && {
219 | for arg; do
220 | if [[ -n "$arg" ]]; then
221 | if printf "%s\n" "$arg" | grep -q ' '; then
222 | printf >&2 "\"%s\"\n" "$arg"
223 | else
224 | printf >&2 "%s\n" "$arg"
225 | fi
226 | fi
227 | done
228 | vlog ""
229 | }
230 |
231 | [[ -n "$batch" ]] && exec /dev/null; then
255 | curl --fail --silent --location "$url" --output "$jar"
256 | elif which wget >/dev/null; then
257 | wget -q -O "$jar" "$url"
258 | fi
259 | } && [[ -r "$jar" ]]
260 | }
261 |
262 | acquire_sbt_jar () {
263 | {
264 | sbt_jar="$(jar_file "$sbt_version")"
265 | [[ -r "$sbt_jar" ]]
266 | } || {
267 | sbt_jar="$HOME/.ivy2/local/org.scala-sbt/sbt-launch/$sbt_version/jars/sbt-launch.jar"
268 | [[ -r "$sbt_jar" ]]
269 | } || {
270 | sbt_jar="$(jar_file "$sbt_version")"
271 | download_url "$(make_url "$sbt_version")" "$sbt_jar"
272 | }
273 | }
274 |
275 | usage () {
276 | set_sbt_version
277 | cat < display stack traces with a max of frames (default: -1, traces suppressed)
296 | -debug-inc enable debugging log for the incremental compiler
297 | -no-colors disable ANSI color codes
298 | -sbt-create start sbt even if current directory contains no sbt project
299 | -sbt-dir path to global settings/plugins directory (default: ~/.sbt/)
300 | -sbt-boot path to shared boot directory (default: ~/.sbt/boot in 0.11+)
301 | -ivy path to local Ivy repository (default: ~/.ivy2)
302 | -no-share use all local caches; no sharing
303 | -offline put sbt in offline mode
304 | -jvm-debug Turn on JVM debugging, open at the given port.
305 | -batch Disable interactive mode
306 | -prompt Set the sbt prompt; in expr, 's' is the State and 'e' is Extracted
307 | -script Run the specified file as a scala script
308 |
309 | # sbt version (default: sbt.version from $buildProps if present, otherwise $sbt_release_version)
310 | -sbt-force-latest force the use of the latest release of sbt: $sbt_release_version
311 | -sbt-version use the specified version of sbt (default: $sbt_release_version)
312 | -sbt-dev use the latest pre-release version of sbt: $sbt_unreleased_version
313 | -sbt-jar use the specified jar as the sbt launcher
314 | -sbt-launch-dir directory to hold sbt launchers (default: $sbt_launch_dir)
315 | -sbt-launch-repo repo url for downloading sbt launcher jar (default: $(url_base "$sbt_version"))
316 |
317 | # scala version (default: as chosen by sbt)
318 | -28 use $latest_28
319 | -29 use $latest_29
320 | -210 use $latest_210
321 | -211 use $latest_211
322 | -212 use $latest_212
323 | -scala-home use the scala build at the specified directory
324 | -scala-version use the specified version of scala
325 | -binary-version use the specified scala version when searching for dependencies
326 |
327 | # java version (default: java from PATH, currently $(java -version 2>&1 | grep version))
328 | -java-home alternate JAVA_HOME
329 |
330 | # passing options to the jvm - note it does NOT use JAVA_OPTS due to pollution
331 | # The default set is used if JVM_OPTS is unset and no -jvm-opts file is found
332 | $(default_jvm_opts)
333 | JVM_OPTS environment variable holding either the jvm args directly, or
334 | the reference to a file containing jvm args if given path is prepended by '@' (e.g. '@/etc/jvmopts')
335 | Note: "@"-file is overridden by local '.jvmopts' or '-jvm-opts' argument.
336 | -jvm-opts file containing jvm args (if not given, .jvmopts in project root is used if present)
337 | -Dkey=val pass -Dkey=val directly to the jvm
338 | -J-X pass option -X directly to the jvm (-J is stripped)
339 |
340 | # passing options to sbt, OR to this runner
341 | SBT_OPTS environment variable holding either the sbt args directly, or
342 | the reference to a file containing sbt args if given path is prepended by '@' (e.g. '@/etc/sbtopts')
343 | Note: "@"-file is overridden by local '.sbtopts' or '-sbt-opts' argument.
344 | -sbt-opts file containing sbt args (if not given, .sbtopts in project root is used if present)
345 | -S-X add -X to sbt's scalacOptions (-S is stripped)
346 | EOM
347 | }
348 |
349 | process_args () {
350 | require_arg () {
351 | local type="$1"
352 | local opt="$2"
353 | local arg="$3"
354 |
355 | if [[ -z "$arg" ]] || [[ "${arg:0:1}" == "-" ]]; then
356 | die "$opt requires <$type> argument"
357 | fi
358 | }
359 | while [[ $# -gt 0 ]]; do
360 | case "$1" in
361 | -h|-help) usage; exit 1 ;;
362 | -v) verbose=true && shift ;;
363 | -d) addSbt "--debug" && shift ;;
364 | -w) addSbt "--warn" && shift ;;
365 | -q) addSbt "--error" && shift ;;
366 | -x) debugUs=true && shift ;;
367 | -trace) require_arg integer "$1" "$2" && trace_level="$2" && shift 2 ;;
368 | -ivy) require_arg path "$1" "$2" && addJava "-Dsbt.ivy.home=$2" && shift 2 ;;
369 | -no-colors) addJava "-Dsbt.log.noformat=true" && shift ;;
370 | -no-share) noshare=true && shift ;;
371 | -sbt-boot) require_arg path "$1" "$2" && addJava "-Dsbt.boot.directory=$2" && shift 2 ;;
372 | -sbt-dir) require_arg path "$1" "$2" && sbt_dir="$2" && shift 2 ;;
373 | -debug-inc) addJava "-Dxsbt.inc.debug=true" && shift ;;
374 | -offline) addSbt "set offline in Global := true" && shift ;;
375 | -jvm-debug) require_arg port "$1" "$2" && addDebugger "$2" && shift 2 ;;
376 | -batch) batch=true && shift ;;
377 | -prompt) require_arg "expr" "$1" "$2" && setThisBuild shellPrompt "(s => { val e = Project.extract(s) ; $2 })" && shift 2 ;;
378 | -script) require_arg file "$1" "$2" && sbt_script="$2" && addJava "-Dsbt.main.class=sbt.ScriptMain" && shift 2 ;;
379 |
380 | -sbt-create) sbt_create=true && shift ;;
381 | -sbt-jar) require_arg path "$1" "$2" && sbt_jar="$2" && shift 2 ;;
382 | -sbt-version) require_arg version "$1" "$2" && sbt_explicit_version="$2" && shift 2 ;;
383 | -sbt-force-latest) sbt_explicit_version="$sbt_release_version" && shift ;;
384 | -sbt-dev) sbt_explicit_version="$sbt_unreleased_version" && shift ;;
385 | -sbt-launch-dir) require_arg path "$1" "$2" && sbt_launch_dir="$2" && shift 2 ;;
386 | -sbt-launch-repo) require_arg path "$1" "$2" && sbt_launch_repo="$2" && shift 2 ;;
387 | -scala-version) require_arg version "$1" "$2" && setScalaVersion "$2" && shift 2 ;;
388 | -binary-version) require_arg version "$1" "$2" && setThisBuild scalaBinaryVersion "\"$2\"" && shift 2 ;;
389 | -scala-home) require_arg path "$1" "$2" && setThisBuild scalaHome "_root_.scala.Some(file(\"$2\"))" && shift 2 ;;
390 | -java-home) require_arg path "$1" "$2" && setJavaHome "$2" && shift 2 ;;
391 | -sbt-opts) require_arg path "$1" "$2" && sbt_opts_file="$2" && shift 2 ;;
392 | -jvm-opts) require_arg path "$1" "$2" && jvm_opts_file="$2" && shift 2 ;;
393 |
394 | -D*) addJava "$1" && shift ;;
395 | -J*) addJava "${1:2}" && shift ;;
396 | -S*) addScalac "${1:2}" && shift ;;
397 | -28) setScalaVersion "$latest_28" && shift ;;
398 | -29) setScalaVersion "$latest_29" && shift ;;
399 | -210) setScalaVersion "$latest_210" && shift ;;
400 | -211) setScalaVersion "$latest_211" && shift ;;
401 | -212) setScalaVersion "$latest_212" && shift ;;
402 | new) sbt_new=true && sbt_explicit_version="$sbt_release_version" && addResidual "$1" && shift ;;
403 | *) addResidual "$1" && shift ;;
404 | esac
405 | done
406 | }
407 |
408 | # process the direct command line arguments
409 | process_args "$@"
410 |
411 | # skip #-styled comments and blank lines
412 | readConfigFile() {
413 | local end=false
414 | until $end; do
415 | read || end=true
416 | [[ $REPLY =~ ^# ]] || [[ -z $REPLY ]] || echo "$REPLY"
417 | done < "$1"
418 | }
419 |
420 | # if there are file/environment sbt_opts, process again so we
421 | # can supply args to this runner
422 | if [[ -r "$sbt_opts_file" ]]; then
423 | vlog "Using sbt options defined in file $sbt_opts_file"
424 | while read opt; do extra_sbt_opts+=("$opt"); done < <(readConfigFile "$sbt_opts_file")
425 | elif [[ -n "$SBT_OPTS" && ! ("$SBT_OPTS" =~ ^@.*) ]]; then
426 | vlog "Using sbt options defined in variable \$SBT_OPTS"
427 | extra_sbt_opts=( $SBT_OPTS )
428 | else
429 | vlog "No extra sbt options have been defined"
430 | fi
431 |
432 | [[ -n "${extra_sbt_opts[*]}" ]] && process_args "${extra_sbt_opts[@]}"
433 |
434 | # reset "$@" to the residual args
435 | set -- "${residual_args[@]}"
436 | argumentCount=$#
437 |
438 | # set sbt version
439 | set_sbt_version
440 |
441 | checkJava
442 |
443 | # only exists in 0.12+
444 | setTraceLevel() {
445 | case "$sbt_version" in
446 | "0.7."* | "0.10."* | "0.11."* ) echoerr "Cannot set trace level in sbt version $sbt_version" ;;
447 | *) setThisBuild traceLevel $trace_level ;;
448 | esac
449 | }
450 |
451 | # set scalacOptions if we were given any -S opts
452 | [[ ${#scalac_args[@]} -eq 0 ]] || addSbt "set scalacOptions in ThisBuild += \"${scalac_args[@]}\""
453 |
454 | # Update build.properties on disk to set explicit version - sbt gives us no choice
455 | [[ -n "$sbt_explicit_version" && -z "$sbt_new" ]] && update_build_props_sbt "$sbt_explicit_version"
456 | vlog "Detected sbt version $sbt_version"
457 |
458 | if [[ -n "$sbt_script" ]]; then
459 | residual_args=( $sbt_script ${residual_args[@]} )
460 | else
461 | # no args - alert them there's stuff in here
462 | (( argumentCount > 0 )) || {
463 | vlog "Starting $script_name: invoke with -help for other options"
464 | residual_args=( shell )
465 | }
466 | fi
467 |
468 | # verify this is an sbt dir, -create was given or user attempts to run a scala script
469 | [[ -r ./build.sbt || -d ./project || -n "$sbt_create" || -n "$sbt_script" || -n "$sbt_new" ]] || {
470 | cat < HazelcastRDDFunctions javaPairRddFunctions(JavaPairRDD rdd) {
15 | return new HazelcastRDDFunctions<>(rdd.rdd());
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/main/java/com/hazelcast/spark/connector/HazelcastSparkContext.java:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector;
2 |
3 | import com.hazelcast.spark.connector.rdd.HazelcastJavaRDD;
4 | import com.hazelcast.spark.connector.rdd.HazelcastRDD;
5 | import com.hazelcast.spark.connector.util.HazelcastUtil;
6 | import org.apache.spark.api.java.JavaSparkContext;
7 | import scala.reflect.ClassTag;
8 |
9 | /**
10 | * Wrapper over {@link JavaSparkContext} to bring Hazelcast related functionality to the Spark Context.
11 | */
12 | public class HazelcastSparkContext {
13 |
14 | private final JavaSparkContext jsc;
15 | private final HazelcastSparkContextFunctions hazelcastSparkContextFunctions;
16 |
17 | public HazelcastSparkContext(JavaSparkContext jsc) {
18 | this.jsc = jsc;
19 | this.hazelcastSparkContextFunctions = new HazelcastSparkContextFunctions(jsc.sc());
20 | }
21 |
22 | public HazelcastJavaRDD fromHazelcastCache(String cacheName) {
23 | HazelcastRDD hazelcastRDD = hazelcastSparkContextFunctions.fromHazelcastCache(cacheName);
24 | ClassTag kt = HazelcastUtil.getClassTag();
25 | ClassTag vt = HazelcastUtil.getClassTag();
26 | return new HazelcastJavaRDD<>(hazelcastRDD, kt, vt);
27 | }
28 |
29 | public HazelcastJavaRDD fromHazelcastMap(String mapName) {
30 | HazelcastRDD hazelcastRDD = hazelcastSparkContextFunctions.fromHazelcastMap(mapName);
31 | ClassTag kt = HazelcastUtil.getClassTag();
32 | ClassTag vt = HazelcastUtil.getClassTag();
33 | return new HazelcastJavaRDD<>(hazelcastRDD, kt, vt);
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Set everything to be logged to the console
2 | log4j.rootCategory=INFO, console
3 | log4j.appender.console=org.apache.log4j.ConsoleAppender
4 | log4j.appender.console.target=System.err
5 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
6 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
7 | # Set the default spark-shell log level to WARN. When running the spark-shell, the
8 | # log level for this class is used to overwrite the root logger's log level, so that
9 | # the user can have different defaults for the shell and regular Spark apps.
10 | log4j.logger.org.apache.spark.repl.Main=WARN
11 | # Settings to quiet third party logs that are too verbose
12 | log4j.logger.org.spark_project.jetty=WARN
13 | log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
14 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
15 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
16 | # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
17 | log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
18 | log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
19 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/HazelcastSparkContextFunctions.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector
2 |
3 | import com.hazelcast.spark.connector.conf.SerializableConf
4 | import com.hazelcast.spark.connector.rdd.HazelcastRDD
5 | import com.hazelcast.spark.connector.util.CleanupUtil.addCleanupListener
6 | import org.apache.spark.SparkContext
7 |
8 | class HazelcastSparkContextFunctions(@transient val sc: SparkContext) extends Serializable {
9 |
10 | def fromHazelcastCache[K, V](cacheName: String): HazelcastRDD[K, V] = {
11 | addCleanupListener(sc)
12 | new HazelcastRDD[K, V](sc, cacheName, true, new SerializableConf(sc))
13 | }
14 |
15 | def fromHazelcastMap[K, V](mapName: String): HazelcastRDD[K, V] = {
16 | addCleanupListener(sc)
17 | new HazelcastRDD[K, V](sc, mapName, false, new SerializableConf(sc))
18 | }
19 |
20 | }
21 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/conf/ConfigurationProperties.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.conf
2 |
3 | import org.apache.spark.SparkContext
4 |
5 | import scala.util.Try
6 |
7 | object ConfigurationProperties {
8 |
9 | val READ_BATCH_SIZE_PROP: String = "hazelcast.spark.readBatchSize"
10 | val WRITE_BATCH_SIZE_PROP: String = "hazelcast.spark.writeBatchSize"
11 | val BATCH_VALUES_PROP: String = "hazelcast.spark.valueBatchingEnabled"
12 | val SERVER_ADDRESS_PROP: String = "hazelcast.server.addresses"
13 | val SERVER_GROUP_NAME_PROP: String = "hazelcast.server.groupName"
14 | val SERVER_GROUP_PASS_PROP: String = "hazelcast.server.groupPass"
15 | val CLIENT_XML_PATH_PROP: String = "hazelcast.spark.clientXmlPath"
16 |
17 | def getReadBatchSize(sc: SparkContext): Int = {
18 | Try(sc.getConf.get(READ_BATCH_SIZE_PROP).toInt).getOrElse(1000)
19 | }
20 |
21 | def getWriteBatchSize(sc: SparkContext): Int = {
22 | Try(sc.getConf.get(WRITE_BATCH_SIZE_PROP).toInt).getOrElse(1000)
23 | }
24 |
25 | def isValueBatchingEnabled(sc: SparkContext): Boolean = {
26 | Try(sc.getConf.get(BATCH_VALUES_PROP).toBoolean).getOrElse(true)
27 | }
28 |
29 | def getServerAddress(sc: SparkContext): String = {
30 | sc.getConf.get(SERVER_ADDRESS_PROP)
31 | }
32 |
33 | def getServerGroupName(sc: SparkContext): String = {
34 | Try(sc.getConf.get(SERVER_GROUP_NAME_PROP)).getOrElse("dev")
35 | }
36 |
37 | def getServerGroupPass(sc: SparkContext): String = {
38 | Try(sc.getConf.get(SERVER_GROUP_PASS_PROP)).getOrElse("dev-pass")
39 | }
40 |
41 | def getClientXmlPath(sc: SparkContext): String = {
42 | Try(sc.getConf.get(CLIENT_XML_PATH_PROP)).getOrElse(null)
43 | }
44 |
45 | }
46 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/conf/SerializableConf.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.conf
2 |
3 | import com.hazelcast.spark.connector.conf.ConfigurationProperties._
4 | import org.apache.spark.SparkContext
5 |
6 | class SerializableConf(sc: SparkContext) extends Serializable {
7 |
8 | val serverAddresses: String = getServerAddress(sc)
9 | val xmlPath: String = getClientXmlPath(sc)
10 | val groupName: String = getServerGroupName(sc)
11 | val groupPass: String = getServerGroupPass(sc)
12 | val readBatchSize: Int = getReadBatchSize(sc)
13 | val writeBatchSize: Int = getWriteBatchSize(sc)
14 | val valueBatchingEnabled: Boolean = isValueBatchingEnabled(sc)
15 |
16 |
17 | override def toString = s"SerializableConf(serverAddresses=$serverAddresses, xmlPath=$xmlPath, groupName=$groupName, groupPass=$groupPass, readBatchSize=$readBatchSize, writeBatchSize=$writeBatchSize, valueBatchingEnabled=$valueBatchingEnabled)"
18 | }
19 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/iterator/CacheIterator.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.iterator
2 |
3 | import java.util
4 | import javax.cache.Cache.Entry
5 |
6 | class CacheIterator[K, V](val iterator: util.Iterator[Entry[K, V]]) extends Iterator[(K, V)] {
7 | override def hasNext: Boolean = iterator.hasNext
8 |
9 | override def next(): (K, V) = {
10 | val entry: Entry[K, V] = iterator.next()
11 | (entry.getKey, entry.getValue)
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/iterator/MapIterator.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.iterator
2 |
3 | import java.util
4 | import java.util.Map.Entry
5 |
6 | class MapIterator[K, V](val iterator: util.Iterator[Entry[K, V]]) extends Iterator[(K, V)] {
7 | override def hasNext: Boolean = iterator.hasNext
8 |
9 | override def next(): (K, V) = {
10 | val entry: Entry[K, V] = iterator.next()
11 | (entry.getKey, entry.getValue)
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/package.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark
2 |
3 | import com.hazelcast.spark.connector.rdd.HazelcastRDDFunctions
4 | import org.apache.spark.SparkContext
5 | import org.apache.spark.rdd.RDD
6 |
7 | import scala.reflect.ClassTag
8 |
9 | package object connector {
10 |
11 | implicit def toSparkContextFunctions(sc: SparkContext): HazelcastSparkContextFunctions =
12 | new HazelcastSparkContextFunctions(sc)
13 |
14 | implicit def toHazelcastRDDFunctions[K: ClassTag, V: ClassTag]
15 | (self: RDD[(K, V)]): HazelcastRDDFunctions[K, V] = new HazelcastRDDFunctions(self)
16 |
17 |
18 | }
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/rdd/HazelcastJavaRDD.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.rdd
2 |
3 | import org.apache.spark.api.java.JavaPairRDD
4 |
5 | import scala.reflect.ClassTag
6 |
7 | class HazelcastJavaRDD[K, V](rdd: HazelcastRDD[K, V])(
8 | implicit override val kClassTag: ClassTag[K],
9 | implicit override val vClassTag: ClassTag[V])
10 | extends JavaPairRDD[K, V](rdd) {
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/rdd/HazelcastRDD.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.rdd
2 |
3 | import com.hazelcast.client.HazelcastClientNotActiveException
4 | import com.hazelcast.client.cache.impl.ClientCacheProxy
5 | import com.hazelcast.client.proxy.ClientMapProxy
6 | import com.hazelcast.core.{HazelcastInstance, Partition => HazelcastPartition}
7 | import com.hazelcast.spark.connector.conf.SerializableConf
8 | import com.hazelcast.spark.connector.iterator.{CacheIterator, MapIterator}
9 | import com.hazelcast.spark.connector.util.ConnectionUtil.{closeHazelcastConnection, getHazelcastConnection}
10 | import com.hazelcast.spark.connector.util.HazelcastUtil._
11 | import org.apache.spark.annotation.DeveloperApi
12 | import org.apache.spark.rdd.RDD
13 | import org.apache.spark.{Partition, SparkContext, TaskContext}
14 |
15 | import scala.collection.JavaConversions._
16 | import scala.util.Try
17 |
18 |
19 | class HazelcastRDD[K, V](@transient val sc: SparkContext, val hzName: String,
20 | val isCache: Boolean, val config: SerializableConf) extends RDD[(K, V)](sc, Seq.empty) {
21 |
22 | @transient lazy val hazelcastPartitions: scala.collection.mutable.Map[Int, String] = {
23 | val client: HazelcastInstance = getHazelcastConnection(config.serverAddresses, id, config)
24 | val partitions: scala.collection.mutable.Map[Int, String] = scala.collection.mutable.Map[Int, String]()
25 | client.getPartitionService.getPartitions.foreach { p =>
26 | partitions.put(p.getPartitionId, p.getOwner.getAddress.getHost + ":" + p.getOwner.getAddress.getPort)
27 | }
28 | closeHazelcastConnection(config.serverAddresses, id)
29 | partitions
30 | }
31 |
32 | @DeveloperApi
33 | override def compute(split: Partition, context: TaskContext): Iterator[(K, V)] = {
34 | Try(computeInternal(split)).recover[Iterator[(K, V)]]({
35 | case e: HazelcastClientNotActiveException ⇒ computeInternal(split)
36 | }).get
37 | }
38 |
39 | def computeInternal(split: Partition): Iterator[(K, V)] = {
40 | val partitionLocationInfo = split.asInstanceOf[PartitionLocationInfo]
41 | val client: HazelcastInstance = getHazelcastConnection(partitionLocationInfo.location, id, config)
42 | if (isCache) {
43 | val cache: ClientCacheProxy[K, V] = getClientCacheProxy(hzName, client)
44 | new CacheIterator[K, V](cache.iterator(config.readBatchSize, split.index, config.valueBatchingEnabled))
45 | } else {
46 | val map: ClientMapProxy[K, V] = getClientMapProxy(hzName, client)
47 | new MapIterator[K, V](map.iterator(config.readBatchSize, split.index, config.valueBatchingEnabled))
48 | }
49 | }
50 |
51 | override protected def getPartitions: Array[Partition] = {
52 | var array: Array[Partition] = Array[Partition]()
53 | for (i <- 0 until hazelcastPartitions.size) {
54 | array = array :+ new PartitionLocationInfo(i, hazelcastPartitions.get(i).get)
55 | }
56 | array
57 | }
58 |
59 | override protected def getPreferredLocations(split: Partition): Seq[String] = {
60 | Seq(hazelcastPartitions.get(split.index).get)
61 | }
62 |
63 | }
64 |
65 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/rdd/HazelcastRDDFunctions.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.rdd
2 |
3 | import com.hazelcast.client.HazelcastClientNotActiveException
4 | import com.hazelcast.client.cache.impl.ClientCacheProxy
5 | import com.hazelcast.client.proxy.ClientMapProxy
6 | import com.hazelcast.core.HazelcastInstance
7 | import com.hazelcast.spark.connector.conf.SerializableConf
8 | import com.hazelcast.spark.connector.util.CleanupUtil.addCleanupListener
9 | import com.hazelcast.spark.connector.util.ConnectionUtil._
10 | import com.hazelcast.spark.connector.util.HazelcastUtil.{getClientCacheProxy, getClientMapProxy}
11 | import org.apache.spark.TaskContext
12 | import org.apache.spark.rdd.RDD
13 |
14 | import scala.collection.JavaConversions._
15 | import scala.util.Try
16 |
17 | class HazelcastRDDFunctions[K, V](val rdd: RDD[(K, V)]) extends Serializable {
18 | val conf: SerializableConf = new SerializableConf(rdd.context)
19 |
20 | def saveToHazelcastCache(cacheName: String): Unit = {
21 | val job = (ctx: TaskContext, iterator: Iterator[(K, V)]) => {
22 | new HazelcastWriteToCacheJob().runJob(ctx, iterator, cacheName)
23 | }
24 | addCleanupListener(rdd.context)
25 | rdd.sparkContext.runJob(rdd, job)
26 |
27 | }
28 |
29 | def saveToHazelcastMap(mapName: String): Unit = {
30 | val job = (ctx: TaskContext, iterator: Iterator[(K, V)]) => {
31 | new HazelcastWriteToMapJob().runJob(ctx, iterator, mapName)
32 | }
33 | addCleanupListener(rdd.context)
34 | rdd.sparkContext.runJob(rdd, job)
35 | }
36 |
37 | private class HazelcastWriteToCacheJob extends Serializable {
38 | def runJob(ctx: TaskContext, iterator: Iterator[(K, V)], cacheName: String): Unit = {
39 | Try(writeInternal(iterator, cacheName)).recover({
40 | case e: HazelcastClientNotActiveException ⇒ writeInternal(iterator, cacheName)
41 | case e: Exception => throw e;
42 | })
43 | }
44 |
45 | def writeInternal(iterator: Iterator[(K, V)], cacheName: String): Unit = {
46 | val client: HazelcastInstance = getHazelcastConnection(conf.serverAddresses, rdd.id, conf)
47 | val cache: ClientCacheProxy[K, V] = getClientCacheProxy(cacheName, client)
48 | iterator.grouped(conf.writeBatchSize).foreach((kv) => Try(cache.putAll(mapAsJavaMap(kv.toMap))).recover({
49 | case e: Exception => e.printStackTrace();
50 | }))
51 | }
52 | }
53 |
54 | private class HazelcastWriteToMapJob extends Serializable {
55 | def runJob(ctx: TaskContext, iterator: Iterator[(K, V)], mapName: String): Unit = {
56 | Try(writeInternal(iterator, mapName)).recover({
57 | case e: HazelcastClientNotActiveException ⇒ writeInternal(iterator, mapName)
58 | case e: Exception => throw e;
59 | })
60 | }
61 |
62 | def writeInternal(iterator: Iterator[(K, V)], mapName: String): Unit = {
63 | val client: HazelcastInstance = getHazelcastConnection(conf.serverAddresses, rdd.id, conf)
64 | val map: ClientMapProxy[K, V] = getClientMapProxy(mapName, client)
65 | iterator.grouped(conf.writeBatchSize).foreach((kv) => Try(map.putAll(mapAsJavaMap(kv.toMap))).recover({
66 | case e: Exception => e.printStackTrace();
67 | }))
68 | }
69 | }
70 |
71 | }
72 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/rdd/PartitionLocationInfo.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.rdd
2 |
3 | import org.apache.spark.Partition
4 |
5 | class PartitionLocationInfo(val partitionId: Int, val location: String) extends Partition {
6 | override def index: Int = partitionId
7 | }
8 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/util/CleanupUtil.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.util
2 |
3 | import com.hazelcast.spark.connector.util.ConnectionUtil.closeAll
4 | import org.apache.spark.SparkContext
5 | import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd, SparkListenerJobStart}
6 |
7 | object CleanupUtil {
8 |
9 | val jobIds: collection.mutable.Map[Int, Seq[Int]] = collection.mutable.Map[Int, Seq[Int]]()
10 | val cleanupJobRddName: String = "HazelcastResourceCleanupJob"
11 |
12 | def addCleanupListener(sc: SparkContext): Unit = {
13 | sc.addSparkListener(new SparkListener {
14 | override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
15 | this.synchronized {
16 | jobStart.stageInfos.foreach(info => {
17 | info.rddInfos.foreach(rdd => {
18 | if (!cleanupJobRddName.equals(rdd.name)) {
19 | val ids: Seq[Int] = info.rddInfos.map(_.id)
20 | val maybeIds: Option[Seq[Int]] = jobIds.get(jobStart.jobId)
21 | if (maybeIds.isDefined) {
22 | jobIds.put(jobStart.jobId, ids ++ maybeIds.get)
23 | } else {
24 | jobIds.put(jobStart.jobId, ids)
25 | }
26 | }
27 | })
28 | })
29 | }
30 | }
31 |
32 | override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
33 | this.synchronized {
34 | if (jobIds.contains(jobEnd.jobId)) {
35 | try {
36 | val workers = sc.getConf.getInt("spark.executor.instances", sc.getExecutorStorageStatus.length)
37 | val rddId: Option[Seq[Int]] = jobIds.get(jobEnd.jobId)
38 | if (rddId.isDefined) {
39 | sc.parallelize(1 to workers, workers).setName(cleanupJobRddName).foreachPartition(it ⇒ closeAll(rddId.get))
40 | }
41 | jobIds -= jobEnd.jobId
42 | } catch {
43 | case e: Exception =>
44 | }
45 | }
46 | }
47 | }
48 | })
49 | }
50 |
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/util/ConnectionUtil.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.util
2 |
3 | import com.hazelcast.client.HazelcastClient
4 | import com.hazelcast.client.config.{ClientConfig, XmlClientConfigBuilder}
5 | import com.hazelcast.core.HazelcastInstance
6 | import com.hazelcast.spark.connector.conf.SerializableConf
7 |
8 | import scala.collection.{JavaConversions, mutable}
9 |
10 | object ConnectionUtil {
11 |
12 | private[connector] val instances = mutable.Map[String, HazelcastInstance]()
13 |
14 | def getHazelcastConnection(member: String, rddId: Int, conf: SerializableConf): HazelcastInstance = {
15 | def createClientInstance: HazelcastInstance = {
16 | val client: HazelcastInstance = HazelcastClient.newHazelcastClient(createClientConfig(conf, member))
17 | instances.put(member + "#" + rddId, client)
18 | client
19 | }
20 | this.synchronized {
21 | val maybeInstance: Option[HazelcastInstance] = instances.get(member + "#" + rddId)
22 | if (maybeInstance.isEmpty) {
23 | createClientInstance
24 | } else {
25 | val instance: HazelcastInstance = maybeInstance.get
26 | if (instance.getLifecycleService.isRunning) {
27 | instance
28 | } else {
29 | createClientInstance
30 | }
31 | }
32 | }
33 | }
34 |
35 | def closeHazelcastConnection(member: String, rddId: Int): Unit = {
36 | this.synchronized {
37 | val maybeInstance: Option[HazelcastInstance] = instances.get(member + "#" + rddId)
38 | if (maybeInstance.isDefined) {
39 | val instance: HazelcastInstance = maybeInstance.get
40 | if (instance.getLifecycleService.isRunning) {
41 | instance.getLifecycleService.shutdown()
42 | }
43 | instances.remove(member + "#" + rddId)
44 | }
45 | }
46 | }
47 |
48 | def closeAll(rddIds: Seq[Int]): Unit = {
49 | this.synchronized {
50 | instances.keys.foreach({
51 | key => {
52 | val instanceRddId: String = key.split("#")(1)
53 | if (rddIds.contains(instanceRddId.toInt)) {
54 | val instance: HazelcastInstance = instances.get(key).get
55 | if (instance.getLifecycleService.isRunning) {
56 | instance.shutdown()
57 | }
58 | instances.remove(key)
59 | }
60 | }
61 | })
62 | }
63 | }
64 |
65 | private def createClientConfig(conf: SerializableConf, member: String): ClientConfig = {
66 | var config: ClientConfig = null
67 | if (conf.xmlPath != null) {
68 | config = new XmlClientConfigBuilder(conf.xmlPath).build()
69 | } else {
70 | config = new ClientConfig
71 | config.getGroupConfig.setName(conf.groupName)
72 | config.getGroupConfig.setPassword(conf.groupPass)
73 | config.getNetworkConfig.setAddresses(JavaConversions.seqAsJavaList(member.split(",")))
74 | }
75 | config
76 | }
77 |
78 | }
79 |
--------------------------------------------------------------------------------
/src/main/scala/com/hazelcast/spark/connector/util/HazelcastUtil.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector.util
2 |
3 | import javax.cache.{Cache, CacheManager}
4 |
5 | import com.hazelcast.cache.impl.{CacheProxy, HazelcastServerCachingProvider}
6 | import com.hazelcast.client.cache.impl.{ClientCacheProxy, HazelcastClientCachingProvider}
7 | import com.hazelcast.client.proxy.ClientMapProxy
8 | import com.hazelcast.config.CacheConfig
9 | import com.hazelcast.core.{HazelcastInstance, IMap}
10 | import com.hazelcast.map.impl.proxy.MapProxyImpl
11 |
12 | import scala.reflect.ClassTag
13 |
14 | object HazelcastUtil {
15 |
16 | def getClientMapProxy[K, V](name: String, instance: HazelcastInstance): ClientMapProxy[K, V] = {
17 | val map: IMap[K, V] = instance.getMap(name)
18 | map.asInstanceOf[ClientMapProxy[K, V]]
19 | }
20 |
21 | def getServerMapProxy[K, V](name: String, instance: HazelcastInstance): MapProxyImpl[K, V] = {
22 | val map: IMap[K, V] = instance.getMap(name)
23 | map.asInstanceOf[MapProxyImpl[K, V]]
24 | }
25 |
26 | def getClientCacheProxy[K, V](name: String, instance: HazelcastInstance): ClientCacheProxy[K, V] = {
27 | val cachingProvider: HazelcastClientCachingProvider = HazelcastClientCachingProvider.createCachingProvider(instance)
28 | val cacheManager: CacheManager = cachingProvider.getCacheManager()
29 | val cacheConfig: CacheConfig[K, V] = new CacheConfig[K, V](name)
30 | val cache: Cache[K, V] = cacheManager.createCache(name, cacheConfig)
31 | cache.asInstanceOf[ClientCacheProxy[K, V]]
32 | }
33 |
34 | def getServerCacheProxy[K, V](name: String, instance: HazelcastInstance): CacheProxy[K, V] = {
35 | val cachingProvider: HazelcastServerCachingProvider = HazelcastServerCachingProvider.createCachingProvider(instance)
36 | val cacheManager: CacheManager = cachingProvider.getCacheManager()
37 | val cacheConfig: CacheConfig[K, V] = new CacheConfig[K, V](name)
38 | val cache: Cache[K, V] = cacheManager.createCache(name, cacheConfig)
39 | cache.asInstanceOf[CacheProxy[K, V]]
40 | }
41 |
42 | def getClassTag[T]: ClassTag[T] = ClassTag.AnyRef.asInstanceOf[ClassTag[T]]
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/src/test/java/ReadFromHazelcastJavaTest.java:
--------------------------------------------------------------------------------
1 | import com.hazelcast.cache.impl.CacheProxy;
2 | import com.hazelcast.client.HazelcastClientManager;
3 | import com.hazelcast.config.Config;
4 | import com.hazelcast.core.HazelcastInstance;
5 | import com.hazelcast.core.IMap;
6 | import com.hazelcast.spark.connector.HazelcastSparkContext;
7 | import com.hazelcast.spark.connector.rdd.HazelcastJavaRDD;
8 | import com.hazelcast.spark.connector.util.HazelcastUtil;
9 | import com.hazelcast.test.HazelcastTestSupport;
10 | import org.apache.spark.SparkConf;
11 | import org.apache.spark.api.java.JavaPairRDD;
12 | import org.apache.spark.api.java.JavaSparkContext;
13 | import org.apache.spark.api.java.function.FlatMapFunction;
14 | import org.apache.spark.api.java.function.Function;
15 | import org.junit.After;
16 | import org.junit.Before;
17 | import org.junit.Test;
18 | import org.junit.runner.RunWith;
19 | import org.junit.runners.Parameterized;
20 | import scala.Tuple2;
21 |
22 | import java.io.Serializable;
23 | import java.util.Arrays;
24 | import java.util.Collections;
25 | import java.util.Comparator;
26 | import java.util.Iterator;
27 | import java.util.List;
28 | import java.util.Map;
29 | import java.util.Objects;
30 |
31 | import static org.junit.Assert.assertEquals;
32 | import static org.junit.Assert.assertFalse;
33 | import static org.junit.Assert.assertTrue;
34 |
35 | @RunWith(Parameterized.class)
36 | public class ReadFromHazelcastJavaTest extends HazelcastTestSupport {
37 |
38 | public static final String GROUP_NAME = randomName();
39 |
40 | @Parameterized.Parameter
41 | public boolean fromCache;
42 |
43 | @Parameterized.Parameters(name = "fromCache:{0}")
44 | public static Iterable parameters() {
45 | return Arrays.asList(new Object[]{Boolean.TRUE}, new Object[]{Boolean.FALSE});
46 | }
47 |
48 | HazelcastInstance server;
49 | JavaSparkContext sparkContext;
50 |
51 | @Before
52 | public void setUp() throws Exception {
53 | System.setProperty("hazelcast.test.use.network", "true");
54 | System.setProperty("hazelcast.local.localAddress", "127.0.0.1");
55 | Config config = getConfig();
56 | config.getGroupConfig().setName(GROUP_NAME);
57 | server = createHazelcastInstance(config);
58 |
59 | SparkConf conf = new SparkConf().setMaster("local[8]").setAppName(this.getClass().getName())
60 | .set("hazelcast.server.addresses", "127.0.0.1:5701")
61 | .set("hazelcast.server.groupName", GROUP_NAME)
62 | .set("spark.driver.host", "127.0.0.1");
63 | sparkContext = new JavaSparkContext(conf);
64 | }
65 |
66 | @After
67 | public void tearDown() throws Exception {
68 | while (HazelcastClientManager.getAllHazelcastClients().size() > 0) {
69 | sleepMillis(50);
70 | }
71 | server.getLifecycleService().terminate();
72 | sparkContext.stop();
73 | }
74 |
75 | @Test
76 | public void count() {
77 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
78 | assertEquals(100, hazelcastRDD.count());
79 | }
80 |
81 | @Test
82 | public void isEmpty() {
83 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
84 | assertFalse(hazelcastRDD.isEmpty());
85 | }
86 |
87 | @Test
88 | public void sortByKey() {
89 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
90 | Tuple2 first = hazelcastRDD.sortByKey().first();
91 | assertEquals(0, first._1().intValue());
92 | }
93 |
94 | @Test
95 | public void countByKey() {
96 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
97 | Map map = hazelcastRDD.countByKey();
98 | for (Object count : map.values()) {
99 | assertEquals(1L, count);
100 | }
101 | }
102 |
103 | @Test
104 | public void countByValue() {
105 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
106 | Map, Long> map = hazelcastRDD.countByValue();
107 | for (Long count : map.values()) {
108 | assertEquals(1, count.longValue());
109 | }
110 | }
111 |
112 | @Test
113 | public void filter() {
114 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
115 | JavaPairRDD filter = hazelcastRDD.filter(new LessThan10Filter());
116 | for (Integer value : filter.values().collect()) {
117 | assertTrue(value < 10);
118 | }
119 | }
120 |
121 | @Test
122 | public void min() {
123 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
124 | Tuple2 min = hazelcastRDD.min(new ValueComparator());
125 | assertEquals(0, min._1().intValue());
126 | assertEquals(0, min._2().intValue());
127 | }
128 |
129 | @Test
130 | public void max() {
131 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
132 | Tuple2 max = hazelcastRDD.max(new ValueComparator());
133 | assertEquals(99, max._1().intValue());
134 | assertEquals(99, max._2().intValue());
135 | }
136 |
137 | @Test
138 | public void flatMap() {
139 | HazelcastJavaRDD hazelcastRDD = getPrepopulatedRDD();
140 | List values = hazelcastRDD.flatMap(new FlatMapValues()).collect();
141 | assertEquals(100, values.size());
142 | }
143 |
144 | private HazelcastJavaRDD getPrepopulatedRDD() {
145 | HazelcastSparkContext hazelcastSparkContext = new HazelcastSparkContext(sparkContext);
146 | String name = randomName();
147 | if (fromCache) {
148 | CacheProxy cacheProxy = HazelcastUtil.getServerCacheProxy(name, server);
149 | for (int i = 0; i < 100; i++) {
150 | cacheProxy.put(i, i);
151 | }
152 | return hazelcastSparkContext.fromHazelcastCache(name);
153 | } else {
154 | IMap map = server.getMap(name);
155 | for (int i = 0; i < 100; i++) {
156 | map.put(i, i);
157 | }
158 | return hazelcastSparkContext.fromHazelcastMap(name);
159 | }
160 | }
161 |
162 | private static class FlatMapValues implements FlatMapFunction, Integer>, Serializable {
163 | @Override
164 | public Iterator call(Tuple2 integerIntegerTuple2) throws Exception {
165 | return Collections.singletonList(integerIntegerTuple2._2()).iterator();
166 | }
167 | }
168 |
169 | private static class LessThan10Filter implements Function, Boolean>, Serializable {
170 | @Override
171 | public Boolean call(Tuple2 v1) throws Exception {
172 | return v1._2() < 10;
173 | }
174 | }
175 |
176 | private static class ValueComparator implements Comparator>, Serializable {
177 | @Override
178 | public int compare(Tuple2 o1, Tuple2 o2) {
179 | if (o1._2() < o2._2()) {
180 | return -1;
181 | } else if (Objects.equals(o1._2(), o2._2())) {
182 | return 0;
183 | } else {
184 | return 1;
185 | }
186 | }
187 | }
188 | }
189 |
--------------------------------------------------------------------------------
/src/test/java/WriteToHazelcastJavaTest.java:
--------------------------------------------------------------------------------
1 | import com.hazelcast.cache.impl.CacheProxy;
2 | import com.hazelcast.client.HazelcastClientManager;
3 | import com.hazelcast.config.Config;
4 | import com.hazelcast.core.HazelcastInstance;
5 | import com.hazelcast.map.impl.proxy.MapProxyImpl;
6 | import com.hazelcast.spark.connector.util.HazelcastUtil;
7 | import com.hazelcast.test.HazelcastTestSupport;
8 | import java.util.ArrayList;
9 | import java.util.Arrays;
10 | import org.apache.spark.SparkConf;
11 | import org.apache.spark.api.java.JavaPairRDD;
12 | import org.apache.spark.api.java.JavaSparkContext;
13 | import org.apache.spark.api.java.function.Function;
14 | import org.junit.After;
15 | import org.junit.Before;
16 | import org.junit.Test;
17 | import org.junit.runner.RunWith;
18 | import org.junit.runners.Parameterized;
19 |
20 | import static com.hazelcast.spark.connector.HazelcastJavaPairRDDFunctions.javaPairRddFunctions;
21 | import static org.junit.Assert.assertEquals;
22 |
23 | @RunWith(Parameterized.class)
24 | public class WriteToHazelcastJavaTest extends HazelcastTestSupport {
25 |
26 | public static final String GROUP_NAME = randomName();
27 |
28 | @Parameterized.Parameter
29 | public boolean toCache;
30 |
31 | @Parameterized.Parameters(name = "toCache:{0}")
32 | public static Iterable parameters() {
33 | return Arrays.asList(new Object[]{Boolean.TRUE}, new Object[]{Boolean.FALSE});
34 | }
35 |
36 | HazelcastInstance server;
37 | JavaSparkContext sparkContext;
38 |
39 | @Before
40 | public void setUp() throws Exception {
41 | System.setProperty("hazelcast.test.use.network", "true");
42 | System.setProperty("hazelcast.local.localAddress", "127.0.0.1");
43 | Config config = getConfig();
44 | config.getGroupConfig().setName(GROUP_NAME);
45 | server = createHazelcastInstance(config);
46 |
47 | SparkConf conf = new SparkConf().setMaster("local[8]").setAppName(this.getClass().getName())
48 | .set("hazelcast.server.addresses", "127.0.0.1:5701")
49 | .set("hazelcast.server.groupName", GROUP_NAME)
50 | .set("spark.driver.host", "127.0.0.1");
51 | sparkContext = new JavaSparkContext(conf);
52 | }
53 |
54 | @After
55 | public void tearDown() throws Exception {
56 | while (HazelcastClientManager.getAllHazelcastClients().size() > 0) {
57 | sleepMillis(50);
58 | }
59 | server.getLifecycleService().terminate();
60 | sparkContext.stop();
61 | }
62 |
63 | @Test
64 | public void zipWithIndex() throws Exception {
65 | JavaPairRDD pairRDD = sparkContext.parallelize(new ArrayList() {{
66 | add(1);
67 | add(2);
68 | add(3);
69 | add(4);
70 | add(5);
71 | }}).zipWithIndex();
72 | String name = randomMapName();
73 | saveToHazelcast(pairRDD, name);
74 |
75 | assertSize(name, 5);
76 | }
77 |
78 |
79 | @Test
80 | public void mapValues() throws Exception {
81 | JavaPairRDD pairRDD = sparkContext.parallelize(new ArrayList() {{
82 | add(1);
83 | add(2);
84 | add(3);
85 | add(4);
86 | add(5);
87 | }}).zipWithIndex();
88 | String name = randomMapName();
89 | JavaPairRDD mapValues = pairRDD.mapValues(new MapValueTo5());
90 | saveToHazelcast(mapValues, name);
91 |
92 | assertSize(name, 5);
93 | assertValue(name, 5, 5);
94 | }
95 |
96 | private void assertSize(String name, int size) {
97 | if (toCache) {
98 | CacheProxy cache = HazelcastUtil.getServerCacheProxy(name, server);
99 | assertEquals("Cache size should be " + size, size, cache.size());
100 | } else {
101 | MapProxyImpl map = HazelcastUtil.getServerMapProxy(name, server);
102 | assertEquals("Map size should be " + size, size, map.size());
103 | }
104 | }
105 |
106 | private void assertValue(String name, int size, int value) {
107 | if (toCache) {
108 | CacheProxy cache = HazelcastUtil.getServerCacheProxy(name, server);
109 | for (int i = 1; i <= size; i++) {
110 | assertEquals(value, cache.get(i));
111 | }
112 | } else {
113 | MapProxyImpl map = HazelcastUtil.getServerMapProxy(name, server);
114 | for (int i = 1; i <= size; i++) {
115 | assertEquals(value, map.get(i));
116 | }
117 | }
118 | }
119 |
120 | private void saveToHazelcast(JavaPairRDD rdd, String name) {
121 | if (toCache) {
122 | javaPairRddFunctions(rdd).saveToHazelcastCache(name);
123 | } else {
124 | javaPairRddFunctions(rdd).saveToHazelcastMap(name);
125 | }
126 | }
127 |
128 |
129 | static class MapValueTo5 implements Function {
130 | @Override
131 | public Object call(Long v1) throws Exception {
132 | return 5;
133 | }
134 | }
135 |
136 |
137 | }
138 |
--------------------------------------------------------------------------------
/src/test/scala/com/hazelcast/spark/connector/ReadFromHazelcastTest.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector
2 |
3 |
4 | import com.hazelcast.cache.impl.CacheProxy
5 | import com.hazelcast.client.HazelcastClientManager
6 | import com.hazelcast.config.Config
7 | import com.hazelcast.core.HazelcastInstance
8 | import com.hazelcast.map.impl.proxy.MapProxyImpl
9 | import com.hazelcast.spark.connector.rdd.HazelcastRDD
10 | import com.hazelcast.spark.connector.util.HazelcastUtil
11 | import com.hazelcast.test.HazelcastTestSupport
12 | import com.hazelcast.test.HazelcastTestSupport._
13 | import org.apache.spark.{SparkConf, SparkContext}
14 | import org.junit.Assert._
15 | import org.junit._
16 | import org.junit.runner.RunWith
17 | import org.junit.runners.Parameterized
18 |
19 | @RunWith(value = classOf[Parameterized])
20 | class ReadFromHazelcastTest(fromCache: Boolean) extends HazelcastTestSupport {
21 |
22 | var sparkContext: SparkContext = null
23 | var hazelcastInstance: HazelcastInstance = null
24 | val groupName: String = randomName()
25 |
26 | @Before
27 | def before(): Unit = {
28 | System.setProperty("hazelcast.test.use.network", "true")
29 | System.setProperty("hazelcast.local.localAddress", "127.0.0.1")
30 | val config: Config = getConfig
31 | config.getGroupConfig.setName(groupName)
32 | hazelcastInstance = createHazelcastInstance(config)
33 | sparkContext = getSparkContext
34 | }
35 |
36 | @After
37 | def after(): Unit = {
38 | System.clearProperty("hazelcast.test.use.network")
39 | System.clearProperty("hazelcast.local.localAddress")
40 | while (HazelcastClientManager.getAllHazelcastClients.size > 0) {
41 | sleepMillis(50)
42 | }
43 | hazelcastInstance.getLifecycleService.terminate()
44 | sparkContext.stop()
45 | }
46 |
47 | @Test
48 | def count(): Unit = {
49 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
50 | val tuples: Array[(Int, Int)] = hazelcastRDD.collect()
51 |
52 | assertEquals("Count should be ", 100, tuples.length)
53 | }
54 |
55 | @Test
56 | def isEmpty(): Unit = {
57 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
58 | assertFalse(hazelcastRDD.isEmpty())
59 | }
60 |
61 |
62 | @Test
63 | def sortByKey(): Unit = {
64 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
65 | val first = hazelcastRDD.sortByKey().first()
66 |
67 | assertEquals("First item should be", 1, first._1)
68 | }
69 |
70 | @Test
71 | def countByKey(): Unit = {
72 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
73 | val map = hazelcastRDD.countByKey()
74 |
75 | assertTrue("All keys should have one value", map.forall({ case (k, v) => v == 1 }))
76 | }
77 |
78 | @Test
79 | def countByValue(): Unit = {
80 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
81 | val map = hazelcastRDD.countByValue()
82 |
83 | assertTrue("All values should appear once", map.forall({ case (k, v) => v == 1 }))
84 | }
85 |
86 | @Test
87 | def filter(): Unit = {
88 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
89 | val filteredRDD = hazelcastRDD.filter { case (_, v) => v < 10 }
90 |
91 | assertTrue("All values should be less than 10", filteredRDD.values.collect().forall(_ < 10))
92 | }
93 |
94 | @Test
95 | def min(): Unit = {
96 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
97 | val min = hazelcastRDD.min()
98 |
99 | assertEquals("min key should be one", 1, min._1)
100 | assertEquals("min value should be one", 1, min._2)
101 | }
102 |
103 | @Test
104 | def max(): Unit = {
105 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
106 | val max = hazelcastRDD.max()
107 |
108 | assertEquals("max key should be 100", 100, max._1)
109 | assertEquals("max value should be 100", 100, max._2)
110 | }
111 |
112 | @Test
113 | def flatMap(): Unit = {
114 | val hazelcastRDD: HazelcastRDD[Int, Int] = getPrepopulatedRDD()
115 | val values = hazelcastRDD.flatMap(e => List(e._2)).collect()
116 |
117 | assertEquals(100, values.length)
118 | }
119 |
120 | def getPrepopulatedRDD(): HazelcastRDD[Int, Int] = {
121 | val name: String = randomName()
122 | if (fromCache) {
123 | val cache: CacheProxy[Int, Int] = HazelcastUtil.getServerCacheProxy(name, hazelcastInstance)
124 | for (i <- 1 to 100) {
125 | cache.put(i, i)
126 | }
127 | val hazelcastRDD: HazelcastRDD[Int, Int] = sparkContext.fromHazelcastCache(name)
128 | hazelcastRDD
129 |
130 | } else {
131 | val map: MapProxyImpl[Int, Int] = HazelcastUtil.getServerMapProxy(name, hazelcastInstance)
132 | for (i <- 1 to 100) {
133 | map.put(i, i)
134 | }
135 | val hazelcastRDD: HazelcastRDD[Int, Int] = sparkContext.fromHazelcastMap(name)
136 | hazelcastRDD
137 | }
138 | }
139 |
140 |
141 | def getSparkContext: SparkContext = {
142 | val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getName)
143 | .set("spark.driver.host", "127.0.0.1")
144 | .set("hazelcast.server.addresses", "127.0.0.1:5701")
145 | .set("hazelcast.server.groupName", groupName)
146 | new SparkContext(conf)
147 | }
148 |
149 | }
150 |
151 | object ReadFromHazelcastTest {
152 | @Parameterized.Parameters(name = "fromCache = {0}") def parameters: java.util.Collection[Array[AnyRef]] = {
153 | java.util.Arrays.asList(Array(Boolean.box(false)), Array(Boolean.box(true)))
154 | }
155 | }
156 |
157 |
--------------------------------------------------------------------------------
/src/test/scala/com/hazelcast/spark/connector/ReadPerformanceTest.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector
2 |
3 | import com.hazelcast.client.config.ClientConfig
4 | import com.hazelcast.client.{HazelcastClient, HazelcastClientManager}
5 | import com.hazelcast.config.Config
6 | import com.hazelcast.core.{HazelcastInstance, IMap}
7 | import com.hazelcast.spark.connector.rdd.HazelcastRDD
8 | import com.hazelcast.test.HazelcastTestSupport
9 | import com.hazelcast.test.HazelcastTestSupport._
10 | import org.apache.spark.rdd.RDD
11 | import org.apache.spark.{SparkConf, SparkContext}
12 | import org.junit.{After, Before, Ignore, Test}
13 |
14 | import scala.collection.JavaConversions
15 |
16 | @Ignore // since this requires local tachyon installation
17 | class ReadPerformanceTest extends HazelcastTestSupport {
18 |
19 | var sparkContext: SparkContext = null
20 | var hazelcastInstance: HazelcastInstance = null
21 | val groupName: String = randomName()
22 | val ITEM_COUNT: Int = 5000000
23 |
24 |
25 | @Before
26 | def before(): Unit = {
27 | System.setProperty("hazelcast.test.use.network", "true")
28 | System.setProperty("hazelcast.local.localAddress", "127.0.0.1")
29 | val config: Config = getConfig
30 | config.getGroupConfig.setName(groupName)
31 | hazelcastInstance = createHazelcastInstance(config)
32 | sparkContext = getSparkContext
33 | }
34 |
35 | @After
36 | def after(): Unit = {
37 | System.clearProperty("hazelcast.test.use.network")
38 | System.clearProperty("hazelcast.local.localAddress")
39 | hazelcastInstance.getLifecycleService.terminate()
40 | }
41 |
42 |
43 | @Test
44 | def readFromSpark(): Unit = {
45 | val rdd: RDD[(Int, Long)] = sparkContext.parallelize(1 to ITEM_COUNT).zipWithIndex()
46 | rdd.persist(org.apache.spark.storage.StorageLevel.MEMORY_ONLY)
47 | rdd.count()
48 |
49 | val startSpark = System.currentTimeMillis
50 | rdd.take(ITEM_COUNT)
51 | val endSpark = System.currentTimeMillis
52 | val tookSpark = endSpark - startSpark
53 |
54 | // val writer: BufferedWriter = scala.tools.nsc.io.File("/Users/emindemirci/Desktop/sparkResults").bufferedWriter(true)
55 | // writer.append(tookSpark.toString)
56 | // writer.newLine()
57 | // writer.close()
58 |
59 | println("read via spark took : " + tookSpark)
60 | stopSpark
61 | }
62 |
63 | @Test
64 | def readFromHazelcast(): Unit = {
65 | val name: String = randomName()
66 | val map: java.util.Map[Int, Int] = JavaConversions.mapAsJavaMap((1 to ITEM_COUNT).zipWithIndex.toMap)
67 | val config: ClientConfig = new ClientConfig
68 | config.getGroupConfig.setName(groupName)
69 | val client: HazelcastInstance = HazelcastClient.newHazelcastClient(config)
70 | val hazelcastMap: IMap[Int, Int] = client.getMap(name)
71 | hazelcastMap.putAll(map)
72 | client.getLifecycleService.shutdown()
73 |
74 | val startHz = System.currentTimeMillis
75 | val hazelcastRDD: HazelcastRDD[Nothing, Nothing] = sparkContext.fromHazelcastMap(name)
76 | hazelcastRDD.take(ITEM_COUNT)
77 | val endHz = System.currentTimeMillis
78 | val tookHz = endHz - startHz
79 |
80 | // val writer: BufferedWriter = scala.tools.nsc.io.File("/Users/emindemirci/Desktop/hazelcastResults").bufferedWriter(true)
81 | // writer.append(tookHz.toString)
82 | // writer.newLine()
83 | // writer.close()
84 |
85 | println("read via hazelcast took : " + tookHz)
86 | stopSpark
87 | }
88 |
89 | @Test
90 | def readFromTachyon(): Unit = {
91 | val rdd: RDD[(Int, Long)] = sparkContext.parallelize(1 to ITEM_COUNT).zipWithIndex()
92 | rdd.persist(org.apache.spark.storage.StorageLevel.OFF_HEAP)
93 | rdd.count() // to actually persist to Tachyon
94 |
95 | val startSpark = System.currentTimeMillis
96 | rdd.take(ITEM_COUNT)
97 | val endSpark = System.currentTimeMillis
98 | val tookSpark = endSpark - startSpark
99 |
100 | // val writer: BufferedWriter = scala.tools.nsc.io.File("/Users/emindemirci/Desktop/tachyonResults").bufferedWriter(true)
101 | // writer.append(tookSpark.toString)
102 | // writer.newLine()
103 | // writer.close()
104 |
105 | println("read via tachyon took : " + tookSpark)
106 | stopSpark
107 | }
108 |
109 |
110 | def stopSpark: Unit = {
111 | while (HazelcastClientManager.getAllHazelcastClients.size > 0) {
112 | sleepMillis(50)
113 | }
114 | sparkContext.stop()
115 | }
116 |
117 | def getSparkContext: SparkContext = {
118 | val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getName)
119 | .set("spark.driver.host", "127.0.0.1")
120 | .set("hazelcast.server.addresses", "127.0.0.1:5701")
121 | .set("hazelcast.server.groupName", groupName)
122 | new SparkContext(conf)
123 | }
124 |
125 |
126 | }
127 |
128 |
129 |
130 |
--------------------------------------------------------------------------------
/src/test/scala/com/hazelcast/spark/connector/WritePerformanceTest.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector
2 |
3 | import com.hazelcast.client.config.ClientConfig
4 | import com.hazelcast.client.{HazelcastClient, HazelcastClientManager}
5 | import com.hazelcast.config.Config
6 | import com.hazelcast.core.{HazelcastInstance, IMap}
7 | import com.hazelcast.test.HazelcastTestSupport
8 | import com.hazelcast.test.HazelcastTestSupport._
9 | import org.apache.spark.rdd.RDD
10 | import org.apache.spark.{SparkConf, SparkContext}
11 | import org.junit.{After, Before, Ignore, Test}
12 |
13 | import scala.collection.{JavaConversions, Map}
14 |
15 | @Ignore // since this requires local tachyon installation
16 | class WritePerformanceTest() extends HazelcastTestSupport {
17 |
18 | var sparkContext: SparkContext = null
19 | var hazelcastInstance: HazelcastInstance = null
20 | val groupName: String = randomName()
21 | val ITEM_COUNT: Int = 1000000
22 |
23 |
24 | @Before
25 | def before(): Unit = {
26 | System.setProperty("hazelcast.test.use.network", "true")
27 | System.setProperty("hazelcast.local.localAddress", "127.0.0.1")
28 | val config: Config = getConfig
29 | config.getGroupConfig.setName(groupName)
30 | hazelcastInstance = createHazelcastInstance(config)
31 | sparkContext = getSparkContext
32 | }
33 |
34 | @After
35 | def after(): Unit = {
36 | System.clearProperty("hazelcast.test.use.network")
37 | System.clearProperty("hazelcast.local.localAddress")
38 | hazelcastInstance.getLifecycleService.terminate()
39 | }
40 |
41 |
42 | @Test
43 | def sparkWrite(): Unit = {
44 | val name: String = randomName()
45 | val rdd: RDD[(Int, Long)] = sparkContext.parallelize(1 to ITEM_COUNT).zipWithIndex()
46 | val map: Map[Int, Long] = rdd.collectAsMap()
47 | val javaMap: java.util.Map[Int, Long] = JavaConversions.mapAsJavaMap(map)
48 |
49 |
50 | val startSpark = System.currentTimeMillis
51 | rdd.saveToHazelcastMap(name)
52 | val endSpark = System.currentTimeMillis
53 | val tookSpark = endSpark - startSpark
54 |
55 | println("write via spark took : " + tookSpark)
56 | stopSpark
57 | }
58 |
59 |
60 | @Test
61 | def hazelcastWrite(): Unit = {
62 | val name: String = randomName()
63 | val map: java.util.Map[Int, Int] = JavaConversions.mapAsJavaMap((1 to ITEM_COUNT).zipWithIndex.toMap)
64 | val config: ClientConfig = new ClientConfig
65 | config.getGroupConfig.setName(groupName)
66 | val client: HazelcastInstance = HazelcastClient.newHazelcastClient(config)
67 |
68 | val hazelcastMap: IMap[Int, Int] = client.getMap(name)
69 | val startHz = System.currentTimeMillis
70 | hazelcastMap.putAll(map)
71 | val endHz = System.currentTimeMillis
72 | val tookHz = endHz - startHz
73 |
74 | println("write via hazelcast took : " + tookHz)
75 | client.getLifecycleService.shutdown()
76 | stopSpark
77 | }
78 |
79 | @Test
80 | def tachyonWrite(): Unit = {
81 | val name: String = randomName()
82 | val rdd: RDD[(Int, Long)] = sparkContext.parallelize(1 to ITEM_COUNT).zipWithIndex()
83 | val map: Map[Int, Long] = rdd.collectAsMap()
84 | val javaMap: java.util.Map[Int, Long] = JavaConversions.mapAsJavaMap(map)
85 |
86 |
87 | val startSpark = System.currentTimeMillis
88 | rdd.saveAsTextFile("tachyon://localhost:19998/result" + System.currentTimeMillis())
89 | val endSpark = System.currentTimeMillis
90 | val tookSpark = endSpark - startSpark
91 |
92 | println("write via tachyon took : " + tookSpark)
93 | stopSpark
94 | }
95 |
96 |
97 | def stopSpark: Unit = {
98 | while (HazelcastClientManager.getAllHazelcastClients.size > 0) {
99 | sleepMillis(50)
100 | }
101 | sparkContext.stop()
102 | }
103 |
104 | def getSparkContext: SparkContext = {
105 | val conf: SparkConf = new SparkConf().setMaster("local[1]").setAppName(this.getClass.getName)
106 | .set("spark.driver.host", "127.0.0.1")
107 | .set("hazelcast.server.addresses", "127.0.0.1:5701")
108 | .set("hazelcast.server.groupName", groupName)
109 | new SparkContext(conf)
110 | }
111 |
112 |
113 | }
114 |
115 |
116 |
117 |
--------------------------------------------------------------------------------
/src/test/scala/com/hazelcast/spark/connector/WriteToHazelcastTest.scala:
--------------------------------------------------------------------------------
1 | package com.hazelcast.spark.connector
2 |
3 | import com.hazelcast.cache.impl.CacheProxy
4 | import com.hazelcast.client.HazelcastClientManager
5 | import com.hazelcast.config.Config
6 | import com.hazelcast.core.HazelcastInstance
7 | import com.hazelcast.map.impl.proxy.MapProxyImpl
8 | import com.hazelcast.spark.connector.util.HazelcastUtil
9 | import com.hazelcast.test.HazelcastTestSupport
10 | import com.hazelcast.test.HazelcastTestSupport._
11 | import org.apache.spark.rdd.RDD
12 | import org.apache.spark.{SparkConf, SparkContext}
13 | import org.junit.Assert._
14 | import org.junit.runner.RunWith
15 | import org.junit.runners.Parameterized
16 | import org.junit.{After, Before, Test}
17 |
18 |
19 | @RunWith(value = classOf[Parameterized])
20 | class WriteToHazelcastTest(toCache: Boolean) extends HazelcastTestSupport {
21 |
22 | var sparkContext: SparkContext = null
23 | var hazelcastInstance: HazelcastInstance = null
24 | val groupName: String = randomName()
25 |
26 | @Before
27 | def before(): Unit = {
28 | System.setProperty("hazelcast.test.use.network", "true")
29 | System.setProperty("hazelcast.local.localAddress", "127.0.0.1")
30 | val config: Config = getConfig
31 | config.getGroupConfig.setName(groupName)
32 | hazelcastInstance = createHazelcastInstance(config)
33 | sparkContext = getSparkContext
34 | }
35 |
36 | @After
37 | def after(): Unit = {
38 | System.clearProperty("hazelcast.test.use.network")
39 | System.clearProperty("hazelcast.local.localAddress")
40 | while (HazelcastClientManager.getAllHazelcastClients.size > 0) {
41 | sleepMillis(50)
42 | }
43 | hazelcastInstance.getLifecycleService.terminate()
44 | sparkContext.stop()
45 | }
46 |
47 |
48 | @Test
49 | def zipWithIndex(): Unit = {
50 | val name: String = randomName()
51 | val rdd: RDD[(Int, Long)] = sparkContext.parallelize(1 to 1000).zipWithIndex()
52 | saveToHazelcast(rdd, name)
53 |
54 | assertSize(name, 1000)
55 | }
56 |
57 |
58 | @Test
59 | def mapValues(): Unit = {
60 | val name: String = randomName()
61 | val rdd: RDD[(Int, Long)] = sparkContext.parallelize(1 to 1000).zipWithIndex()
62 | val mapValues: RDD[(Int, Long)] = rdd.mapValues((value) => 20l)
63 | saveToHazelcast(mapValues, name)
64 |
65 | assertSize(name, 1000)
66 | assertValue(name, 1000, 20)
67 | }
68 |
69 | def assertSize(name: String, size: Int) = {
70 | if (toCache) {
71 | val cache: CacheProxy[Int, Long] = HazelcastUtil.getServerCacheProxy(name, hazelcastInstance)
72 | assertEquals("Cache size should be " + size, size, cache.size())
73 | } else {
74 | val map: MapProxyImpl[Int, Long] = HazelcastUtil.getServerMapProxy(name, hazelcastInstance)
75 | assertEquals("Map size should be " + size, size, map.size())
76 | }
77 | }
78 |
79 | def assertValue(name: String, size: Int, value: Int) = {
80 | if (toCache) {
81 | val cache: CacheProxy[Int, Long] = HazelcastUtil.getServerCacheProxy(name, hazelcastInstance)
82 | for (i <- 1 to size) {
83 | assertEquals(value, cache.get(i))
84 | }
85 | } else {
86 | val map: MapProxyImpl[Int, Long] = HazelcastUtil.getServerMapProxy(name, hazelcastInstance)
87 | for (i <- 1 to size) {
88 | assertEquals(value, map.get(i))
89 | }
90 | }
91 | }
92 |
93 | def saveToHazelcast(rdd: RDD[(Int, Long)], name: String) = {
94 | if (toCache) {
95 | rdd.saveToHazelcastCache(name)
96 | } else {
97 | rdd.saveToHazelcastMap(name)
98 | }
99 | }
100 |
101 | def getSparkContext: SparkContext = {
102 | val conf: SparkConf = new SparkConf().setMaster("local[8]").setAppName(this.getClass.getName)
103 | .set("spark.driver.host", "127.0.0.1")
104 | .set("hazelcast.server.addresses", "127.0.0.1:5701")
105 | .set("hazelcast.server.groupName", groupName)
106 | new SparkContext(conf)
107 | }
108 |
109 | }
110 |
111 | object WriteToHazelcastTest {
112 |
113 | @Parameterized.Parameters(name = "toCache = {0}") def parameters: java.util.Collection[Array[AnyRef]] = {
114 | java.util.Arrays.asList(Array(Boolean.box(false)), Array(Boolean.box(true)))
115 | }
116 | }
117 |
118 |
--------------------------------------------------------------------------------