├── scala-driver-4.x ├── project │ └── build.properties ├── src │ └── main │ │ ├── resources │ │ └── application.conf │ │ └── scala │ │ └── com │ │ └── datastax │ │ └── alexott │ │ └── demos │ │ └── objmapper │ │ ├── ObjeMapperTest.scala │ │ └── Entitites.scala └── build.sbt ├── .gitignore ├── driver-4.x └── src │ └── main │ ├── resources │ └── application.conf │ ├── java │ └── com │ │ └── datastax │ │ └── alexott │ │ └── demos │ │ ├── TestAstra.java │ │ ├── TestPointType.java │ │ ├── CreateKeyspacesInferTopology.java │ │ ├── UdtTest1.java │ │ ├── ConnectWithDCDetection.java │ │ ├── Commons.java │ │ └── DCDetectingLBPolicy.java │ ├── kotlin │ └── com │ │ └── datastax │ │ └── alexott │ │ └── demos │ │ └── KtTestObjMapper.kt │ └── scala │ └── com │ └── datastax │ └── alexott │ └── demos │ └── UdtScalaTest1.scala ├── driver-1.x └── src │ ├── test │ └── java │ │ └── com │ │ └── datastax │ │ └── alexott │ │ └── demos │ │ └── TestUtils.java │ └── main │ ├── java │ └── com │ │ └── datastax │ │ └── alexott │ │ └── demos │ │ ├── Utils.java │ │ ├── objmapper │ │ ├── STestAccessor.java │ │ ├── STest.java │ │ ├── TableObjAccessor.java │ │ ├── UDTTestType.java │ │ ├── ExpEntity.java │ │ ├── TableObjJavaTest.java │ │ ├── STestMain.java │ │ ├── TestData.java │ │ ├── AuditTestType.java │ │ ├── TableObjAccessorTest.java │ │ ├── MapperTest1.java │ │ ├── TableObjJava.java │ │ ├── UDTTestTableFR.java │ │ ├── UDTTestTableNonFR.java │ │ ├── Test4.java │ │ ├── ExpPopularity.java │ │ ├── Test4Data.java │ │ ├── TableObjectClustered.java │ │ ├── AuditTestMain.java │ │ ├── AuditTestTable.java │ │ ├── Test4_2.java │ │ ├── FRvsNonFRUDTMapping.java │ │ └── ExpMaps.java │ │ ├── product │ │ ├── Information.java │ │ ├── App.java │ │ └── Product.java │ │ ├── solr │ │ ├── DTest.java │ │ └── DTestMain.java │ │ ├── misc │ │ ├── Test3.java │ │ └── Test1.java │ │ ├── JMXTest.java │ │ ├── graph │ │ ├── GDTest1.java │ │ └── GraphLoad.java │ │ ├── TestPreparedStatements.java │ │ ├── metrics │ │ └── Metrics1.java │ │ ├── SessionLimiter.java │ │ ├── CassandraHealthCheck.java │ │ ├── WhiteListPolicyExample.java │ │ ├── TestResultSerializer.java │ │ ├── TokenRangesScan.java │ │ ├── DumpClusterConfig.java │ │ ├── QBuilder.java │ │ ├── ResultSetSerializer.java │ │ ├── TestBatches.java │ │ └── AlterTableWithChecks.java │ ├── scala │ └── com │ │ └── datastax │ │ └── alexott │ │ ├── GetDCNames.scala │ │ ├── CodecsTest.scala │ │ └── ObjMapperTest.scala │ └── kotlin │ └── com │ └── datastax │ └── alexott │ └── demos │ └── KtTestObjMapper.kt ├── cassandra-join-spark ├── setup.cql ├── src │ └── main │ │ ├── resources │ │ └── log4j2.xml │ │ ├── java │ │ └── json │ │ │ ├── ticks │ │ │ ├── TickData.java │ │ │ └── TickGenerator.java │ │ │ └── utils │ │ │ └── ExchangeUtils.java │ │ └── scala │ │ └── com │ │ └── datastax │ │ └── alexott │ │ └── demos │ │ └── streaming │ │ ├── StockTickersJoinDataFrames.scala │ │ └── StockTickersJoinRDD.scala └── pom.xml ├── spark-dse ├── src │ └── main │ │ ├── scala │ │ └── com │ │ │ └── datastax │ │ │ └── alexott │ │ │ ├── graphframes │ │ │ └── DGFSubGraph.scala │ │ │ ├── spark │ │ │ ├── JoinTestsScala.scala │ │ │ └── JoinTestsRDDScala.scala │ │ │ ├── dsefs │ │ │ ├── DsefsDownloader.scala │ │ │ ├── DsefsUploader.scala │ │ │ └── DsefsGetMerge.scala │ │ │ └── streaming │ │ │ ├── StructuredStreamingKafkaDSE.scala │ │ │ └── StructuredStreamingDSE.scala │ │ ├── java │ │ └── com │ │ │ └── datastax │ │ │ └── alexott │ │ │ └── demos │ │ │ └── spark │ │ │ ├── SparkTest1.java │ │ │ ├── UUIDData.java │ │ │ ├── JoinTests.java │ │ │ ├── UUIDTest.java │ │ │ ├── TableCreate.java │ │ │ └── JoinTestsRDD.java │ │ └── resources │ │ └── tweets-1.json └── pom.xml ├── spark-oss ├── src │ └── main │ │ ├── java │ │ └── com │ │ │ └── datastax │ │ │ └── alexott │ │ │ └── demos │ │ │ └── spark │ │ │ ├── SparkTest1.java │ │ │ ├── UUIDData.java │ │ │ ├── JoinTests.java │ │ │ ├── UUIDTest.java │ │ │ ├── TableCreate.java │ │ │ ├── JoinTestsRDD.java │ │ │ └── streaming │ │ │ └── StructuredStreaming.java │ │ └── scala │ │ └── com │ │ └── datastax │ │ └── alexott │ │ ├── spark │ │ ├── JoinTestsScala.scala │ │ └── JoinTestsRDDScala.scala │ │ └── streaming │ │ ├── StructuredStreamingForEachBatch.scala │ │ └── StructuredStreamingForEach.scala └── pom.xml ├── scc-2.5 ├── src │ └── main │ │ └── scala │ │ └── com │ │ └── datastax │ │ └── alexott │ │ ├── spark │ │ ├── JoinTestsRDDScala.scala │ │ └── JoinTestsScala.scala │ │ └── streaming │ │ ├── StructuredStreamingKafkaDSE.scala │ │ └── StructuredStreamingDSE.scala └── pom.xml ├── prometheus-java-driver ├── README.md ├── src │ └── main │ │ ├── resources │ │ └── application.conf │ │ └── java │ │ └── com │ │ └── datastax │ │ └── alexott │ │ └── demos │ │ └── MetricsWithPrometheus.java └── pom.xml └── README.md /scala-driver-4.x/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.3.12 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.classpath 2 | /.project 3 | /.settings/ 4 | /target/ 5 | *~ 6 | /.idea/ 7 | /jdtest1.iml 8 | /*/target/ 9 | /*/\.idea/ 10 | /*/*.iml 11 | /scala-driver-4.x/project/target/ 12 | -------------------------------------------------------------------------------- /driver-4.x/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | datastax-java-driver { 2 | basic.load-balancing-policy { 3 | class = com.datastax.oss.driver.internal.core.loadbalancing.DcInferringLoadBalancingPolicy 4 | } 5 | } -------------------------------------------------------------------------------- /scala-driver-4.x/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | datastax-java-driver { 2 | basic.load-balancing-policy { 3 | class = com.datastax.oss.driver.internal.core.loadbalancing.DcInferringLoadBalancingPolicy 4 | } 5 | } -------------------------------------------------------------------------------- /driver-1.x/src/test/java/com/datastax/alexott/demos/TestUtils.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.LocalDate; 4 | 5 | import junit.framework.TestCase; 6 | 7 | public class TestUtils extends TestCase { 8 | 9 | public void testDate() { 10 | assertEquals(LocalDate.fromYearMonthDay(2017,11,22), Utils.convertDate("2017-11-22")); 11 | } 12 | 13 | } 14 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/Utils.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.LocalDate; 4 | 5 | public class Utils { 6 | 7 | public static LocalDate convertDate(final String date) { 8 | String[] arr = date.split("-"); 9 | if (arr.length != 3) 10 | return null; 11 | return LocalDate.fromYearMonthDay(Integer.parseInt(arr[0]), Integer.parseInt(arr[1]), 12 | Integer.parseInt(arr[2])); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /cassandra-join-spark/setup.cql: -------------------------------------------------------------------------------- 1 | create keyspace if not exists test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}; 2 | 3 | use test; 4 | 5 | create table if not exists stock_info ( 6 | symbol text primary key, 7 | exchange text, 8 | industry text, 9 | name text, 10 | base_price double 11 | ); 12 | 13 | truncate stock_info; 14 | COPY stock_info (name, symbol, base_price, exchange, industry) FROM './src/main/resources/json/csv/exchangedata.csv'; 15 | 16 | -------------------------------------------------------------------------------- /cassandra-join-spark/src/main/resources/log4j2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/STestAccessor.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.Result; 4 | import com.datastax.driver.mapping.annotations.Accessor; 5 | import com.datastax.driver.mapping.annotations.Param; 6 | import com.datastax.driver.mapping.annotations.Query; 7 | 8 | @Accessor 9 | public interface STestAccessor { 10 | @Query("SELECT * FROM test.stest WHERE solr_query = :solr") 11 | 12 | Result getViaSolr(@Param("solr") String solr); 13 | } 14 | -------------------------------------------------------------------------------- /spark-dse/src/main/scala/com/datastax/alexott/graphframes/DGFSubGraph.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.graphframes 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | //import com.datastax.bdp.graph.spark.graphframe._ 6 | 7 | object DGFSubGraph { 8 | def main(args: Array[String]): Unit = { 9 | val sc = new SparkContext() 10 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 11 | import spark.implicits._ 12 | 13 | // val graphBuilder = spark.dseGraph("GRAPH_NAME") 14 | 15 | 16 | } 17 | 18 | } 19 | -------------------------------------------------------------------------------- /spark-dse/src/main/java/com/datastax/alexott/demos/spark/SparkTest1.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import org.apache.spark.sql.Dataset; 4 | import org.apache.spark.sql.Row; 5 | import org.apache.spark.sql.SparkSession; 6 | 7 | public class SparkTest1 { 8 | 9 | public static void main(String[] args) { 10 | SparkSession spark = SparkSession 11 | .builder() 12 | .appName("CassandraSpark") 13 | .getOrCreate(); 14 | 15 | Dataset sqlDF = spark.sql("select * from datastax.vehicle limit 1000"); 16 | sqlDF.printSchema(); 17 | sqlDF.show(); 18 | } 19 | 20 | } 21 | -------------------------------------------------------------------------------- /spark-oss/src/main/java/com/datastax/alexott/demos/spark/SparkTest1.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import org.apache.spark.sql.Dataset; 4 | import org.apache.spark.sql.Row; 5 | import org.apache.spark.sql.SparkSession; 6 | 7 | public class SparkTest1 { 8 | 9 | public static void main(String[] args) { 10 | SparkSession spark = SparkSession 11 | .builder() 12 | .appName("CassandraSpark") 13 | .getOrCreate(); 14 | 15 | Dataset sqlDF = spark.sql("select * from datastax.vehicle limit 1000"); 16 | sqlDF.printSchema(); 17 | sqlDF.show(); 18 | } 19 | 20 | } 21 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/STest.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.PartitionKey; 4 | import com.datastax.driver.mapping.annotations.Table; 5 | 6 | @Table(keyspace = "test",name = "stest") 7 | public class STest { 8 | @PartitionKey 9 | private int id; 10 | private String t; 11 | 12 | public int getId() { 13 | return id; 14 | } 15 | 16 | public void setId(int id) { 17 | this.id = id; 18 | } 19 | 20 | public String getT() { 21 | return t; 22 | } 23 | 24 | public void setT(String t) { 25 | this.t = t; 26 | } 27 | 28 | } 29 | -------------------------------------------------------------------------------- /spark-dse/src/main/java/com/datastax/alexott/demos/spark/UUIDData.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import java.util.UUID; 4 | 5 | public class UUIDData { 6 | private UUID u; 7 | private int id; 8 | 9 | public UUIDData() { 10 | } 11 | public UUIDData(int id, UUID u) { 12 | this.u = u; 13 | this.id = id; 14 | } 15 | 16 | public UUID getU() { 17 | return u; 18 | } 19 | 20 | public void setU(UUID u) { 21 | this.u = u; 22 | } 23 | 24 | public int getId() { 25 | return id; 26 | } 27 | 28 | public void setId(int id) { 29 | this.id = id; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /spark-oss/src/main/java/com/datastax/alexott/demos/spark/UUIDData.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import java.util.UUID; 4 | 5 | public class UUIDData { 6 | private UUID u; 7 | private int id; 8 | 9 | public UUIDData() { 10 | } 11 | public UUIDData(int id, UUID u) { 12 | this.u = u; 13 | this.id = id; 14 | } 15 | 16 | public UUID getU() { 17 | return u; 18 | } 19 | 20 | public void setU(UUID u) { 21 | this.u = u; 22 | } 23 | 24 | public int getId() { 25 | return id; 26 | } 27 | 28 | public void setId(int id) { 29 | this.id = id; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/product/Information.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.product; 2 | 3 | import com.datastax.driver.mapping.annotations.UDT; 4 | 5 | @UDT(keyspace = "test", name = "information") 6 | public class Information { 7 | String info1; 8 | String info2; 9 | 10 | public String getInfo1() { 11 | return info1; 12 | } 13 | public void setInfo1(String info1) { 14 | this.info1 = info1; 15 | } 16 | public String getInfo2() { 17 | return info2; 18 | } 19 | public void setInfo2(String info2) { 20 | this.info2 = info2; 21 | } 22 | @Override 23 | public String toString() { 24 | return "Information [info1=" + info1 + ", info2=" + info2 + "]"; 25 | } 26 | 27 | } 28 | -------------------------------------------------------------------------------- /driver-1.x/src/main/scala/com/datastax/alexott/GetDCNames.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott 2 | 3 | import com.datastax.driver.core.Cluster 4 | 5 | import scala.collection.JavaConverters 6 | 7 | object GetDCNames { 8 | 9 | def main(args: Array[String]): Unit = { 10 | 11 | val cluster = Cluster.builder() 12 | .addContactPoint(System.getProperty("contactPoint", "127.0.0.1")) 13 | .build(); 14 | val session = cluster.connect() 15 | 16 | val metadata = cluster.getMetadata 17 | val hosts = JavaConverters.collectionAsScalaIterableConverter(metadata.getAllHosts).asScala.toSeq 18 | val dcs = hosts.map{host => host.getDatacenter}.toSet 19 | 20 | println("All DCs: " + dcs) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/TableObjAccessor.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.objmapper; 2 | 3 | import com.datastax.driver.mapping.Result; 4 | import com.datastax.driver.mapping.annotations.Accessor; 5 | import com.datastax.driver.mapping.annotations.Param; 6 | import com.datastax.driver.mapping.annotations.Query; 7 | 8 | @Accessor 9 | public interface TableObjAccessor { 10 | @Query("SELECT * from test.scala_test_complex where p1 = :p1 and p2 = :p2") 11 | Result getByPartKey(@Param int p1, @Param int p2); 12 | 13 | @Query("DELETE from test.scala_test_complex where p1 = :p1 and p2 = :p2") 14 | void deleteByPartKey(@Param int p1, @Param int p2); 15 | } 16 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/UDTTestType.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.UDT; 4 | 5 | // CREATE TYPE test.tudt (id int, t text); 6 | @UDT(name = "tudt", keyspace = "test") 7 | public class UDTTestType { 8 | int id; 9 | String t; 10 | 11 | public UDTTestType(int id, String t) { 12 | this.id = id; 13 | this.t = t; 14 | } 15 | 16 | public UDTTestType() { 17 | } 18 | 19 | public int getId() { 20 | return id; 21 | } 22 | 23 | public void setId(int id) { 24 | this.id = id; 25 | } 26 | 27 | public String getT() { 28 | return t; 29 | } 30 | 31 | public void setT(String t) { 32 | this.t = t; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /spark-dse/src/main/scala/com/datastax/alexott/spark/JoinTestsScala.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.spark 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | 6 | object JoinTestsScala { 7 | def main(args: Array[String]): Unit = { 8 | 9 | val sc = new SparkContext() 10 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 11 | import spark.implicits._ 12 | 13 | val toJoin = spark.range(1, 1000).map(x => x.intValue).withColumnRenamed("value", "id") 14 | 15 | val dataset = spark.read 16 | .format("org.apache.spark.sql.cassandra") 17 | .options(Map("table" -> "jtest", "keyspace" -> "test")) 18 | .load 19 | val joined = toJoin.join(dataset, dataset("id") === toJoin("id")) 20 | joined.explain 21 | joined.show(10) 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /spark-oss/src/main/scala/com/datastax/alexott/spark/JoinTestsScala.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.spark 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | 6 | object JoinTestsScala { 7 | def main(args: Array[String]): Unit = { 8 | 9 | val sc = new SparkContext() 10 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 11 | import spark.implicits._ 12 | 13 | val toJoin = spark.range(1, 1000).map(x => x.intValue).withColumnRenamed("value", "id") 14 | 15 | val dataset = spark.read 16 | .format("org.apache.spark.sql.cassandra") 17 | .options(Map("table" -> "jtest", "keyspace" -> "test")) 18 | .load 19 | val joined = toJoin.join(dataset, dataset("id") === toJoin("id")) 20 | joined.explain 21 | joined.show(10) 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/solr/DTest.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import java.time.Instant; 4 | import java.util.Date; 5 | import org.apache.solr.client.solrj.beans.Field; 6 | import com.fasterxml.jackson.annotation.JsonIgnore; 7 | 8 | public class DTest { 9 | @Field("id") 10 | private int id; 11 | 12 | private Instant t; 13 | 14 | public int getId() { 15 | return id; 16 | } 17 | 18 | public void setId(int id) { 19 | this.id = id; 20 | } 21 | 22 | public Date getT() { 23 | return new Date(t.toEpochMilli()); 24 | } 25 | 26 | @Field("t") 27 | public void setT(Date t) { 28 | this.t = t.toInstant(); 29 | } 30 | 31 | @JsonIgnore 32 | public void setInstant(Instant t) { 33 | this.t = t; 34 | } 35 | 36 | @JsonIgnore 37 | public Instant getInstant() { 38 | return t; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /scc-2.5/src/main/scala/com/datastax/alexott/spark/JoinTestsRDDScala.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.spark 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | import com.datastax.spark.connector._ 6 | 7 | object JoinTestsRDDScala { 8 | def main(args: Array[String]): Unit = { 9 | 10 | val sc = new SparkContext() 11 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 12 | import spark.implicits._ 13 | 14 | val toJoin = spark.range(1, 100).map(x => x.intValue).withColumnRenamed("value", "id").rdd 15 | 16 | val joined = toJoin.joinWithCassandraTable("test","jtest") 17 | println("Plan: " + joined.toDebugString) 18 | joined.cache() 19 | println("Count: " + joined.count()) 20 | print("Data: ") 21 | joined.take(10).foreach(print) 22 | println() 23 | 24 | } 25 | 26 | } 27 | -------------------------------------------------------------------------------- /spark-dse/src/main/scala/com/datastax/alexott/spark/JoinTestsRDDScala.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.spark 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | import com.datastax.spark.connector._ 6 | 7 | object JoinTestsRDDScala { 8 | def main(args: Array[String]): Unit = { 9 | 10 | val sc = new SparkContext() 11 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 12 | import spark.implicits._ 13 | 14 | val toJoin = spark.range(1, 100).map(x => x.intValue).withColumnRenamed("value", "id").rdd 15 | 16 | val joined = toJoin.joinWithCassandraTable("test","jtest") 17 | println("Plan: " + joined.toDebugString) 18 | joined.cache() 19 | println("Count: " + joined.count()) 20 | print("Data: ") 21 | joined.take(10).foreach(print) 22 | println() 23 | 24 | } 25 | 26 | } 27 | -------------------------------------------------------------------------------- /spark-oss/src/main/scala/com/datastax/alexott/spark/JoinTestsRDDScala.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.spark 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | import com.datastax.spark.connector._ 6 | 7 | object JoinTestsRDDScala { 8 | def main(args: Array[String]): Unit = { 9 | 10 | val sc = new SparkContext() 11 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 12 | import spark.implicits._ 13 | 14 | val toJoin = spark.range(1, 100).map(x => x.intValue).withColumnRenamed("value", "id").rdd 15 | 16 | val joined = toJoin.joinWithCassandraTable("test","jtest") 17 | println("Plan: " + joined.toDebugString) 18 | joined.cache() 19 | println("Count: " + joined.count()) 20 | print("Data: ") 21 | joined.take(10).foreach(print) 22 | println() 23 | 24 | } 25 | 26 | } 27 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/ExpEntity.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.PartitionKey; 4 | import com.datastax.driver.mapping.annotations.Table; 5 | 6 | import java.util.Set; 7 | 8 | @Table(name = "entities_udt", keyspace = "srs") 9 | public class ExpEntity { 10 | 11 | @PartitionKey 12 | int hcom_geo_id; 13 | 14 | Set popularity; 15 | 16 | public int getHcom_geo_id() { 17 | return hcom_geo_id; 18 | } 19 | 20 | public void setHcom_geo_id(int hcom_geo_id) { 21 | this.hcom_geo_id = hcom_geo_id; 22 | } 23 | 24 | public Set getPopularity() { 25 | return popularity; 26 | } 27 | 28 | public void setPopularity(Set popularity) { 29 | this.popularity = popularity; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /driver-4.x/src/main/java/com/datastax/alexott/demos/TestAstra.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.oss.driver.api.core.CqlSession; 4 | import com.datastax.oss.driver.api.core.cql.ResultSet; 5 | import com.datastax.oss.driver.api.core.cql.Row; 6 | 7 | import java.nio.file.Paths; 8 | 9 | public class TestAstra { 10 | public static void main(String[] args) { 11 | try (CqlSession session = CqlSession.builder() 12 | .withCloudSecureConnectBundle(Paths.get("/Users/ott/Downloads/secure-connect-test.zip")) 13 | .withAuthCredentials("test", "...") 14 | .build()) { 15 | ResultSet rs = session.execute("select id,v from test.t1"); 16 | for (Row row: rs) { 17 | System.out.println("id=" + row.getInt("id") + ", v=" + row.getInt("v")); 18 | } 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/TableObjJavaTest.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.objmapper; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Session; 5 | import com.datastax.driver.mapping.Mapper; 6 | import com.datastax.driver.mapping.MappingManager; 7 | 8 | public class TableObjJavaTest { 9 | public static void main(String[] args) { 10 | String server = System.getProperty("contactPoint", "127.0.0.1"); 11 | try (Cluster cluster = Cluster.builder().addContactPoint(server).build(); 12 | Session session = cluster.connect()) { 13 | MappingManager manager = new MappingManager(session); 14 | Mapper mapper = manager.mapper(TableObjJava.class); 15 | 16 | TableObjJava obj = mapper.get(1); 17 | System.out.println("Obj(1)=" + obj); 18 | } 19 | } 20 | 21 | } 22 | -------------------------------------------------------------------------------- /driver-4.x/src/main/java/com/datastax/alexott/demos/TestPointType.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.oss.driver.api.core.CqlSession; 4 | import com.datastax.oss.driver.api.core.cql.ResultSet; 5 | import com.datastax.oss.driver.api.core.cql.Row; 6 | import com.datastax.dse.driver.api.core.data.geometry.Point; 7 | 8 | public class TestPointType { 9 | public static void main(String[] args) { 10 | try (CqlSession session = CqlSession.builder() 11 | .addContactPoints(Commons.getContactPoints("10.101.34.176,10.101.34.94")) 12 | .build()) { 13 | ResultSet rs = session.execute("select point from test.gen_events1"); 14 | for (Row row: rs) { 15 | Point point = row.get("point", Point.class); 16 | if (point != null) 17 | System.out.println("point = " + point); 18 | } 19 | } 20 | 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/STestMain.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Session; 5 | import com.datastax.driver.mapping.MappingManager; 6 | import com.datastax.driver.mapping.Result; 7 | 8 | public class STestMain { 9 | public static void main(String[] args) { 10 | String server = System.getProperty("contactPoint", "127.0.0.1"); 11 | Cluster cluster = Cluster.builder().addContactPoint(server).build(); 12 | Session session = cluster.connect(); 13 | 14 | MappingManager manager = new MappingManager(session); 15 | 16 | STestAccessor sa = manager.createAccessor(STestAccessor.class); 17 | Result rs = sa.getViaSolr("*:*"); 18 | 19 | for (STest sTest : rs) { 20 | System.out.println("id=" + sTest.getId() + ", text=" + sTest.getT()); 21 | } 22 | 23 | session.close(); 24 | cluster.close(); 25 | } 26 | 27 | } 28 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/product/App.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.product; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.CodecRegistry; 5 | import com.datastax.driver.core.Session; 6 | import com.datastax.driver.mapping.Mapper; 7 | import com.datastax.driver.mapping.MappingManager; 8 | 9 | public class App { 10 | 11 | public static void main(String[] args) { 12 | CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; 13 | String server = System.getProperty("contactPoint", "127.0.0.1"); 14 | Cluster cluster = Cluster.builder().addContactPoint(server).withCodecRegistry(codecRegistry).build(); 15 | Session session = cluster.connect(); 16 | 17 | MappingManager manager = new MappingManager(session); 18 | Mapper mapper = manager.mapper(Product.class); 19 | Product product = mapper.get("test"); 20 | System.out.println("Product: " + product); 21 | 22 | session.close(); 23 | } 24 | 25 | } 26 | -------------------------------------------------------------------------------- /scala-driver-4.x/src/main/scala/com/datastax/alexott/demos/objmapper/ObjeMapperTest.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.objmapper 2 | 3 | import java.net.InetSocketAddress 4 | import java.util 5 | 6 | import com.datastax.oss.driver.api.core.CqlSession 7 | 8 | import collection.JavaConverters._ 9 | 10 | object ObjeMapperTest { 11 | 12 | val CQL_PORT: Int = System.getProperty("cqlPort", "9042").toInt 13 | 14 | def getContactPoints(contactPoints: String): util.Collection[InetSocketAddress] = { 15 | contactPoints.split(",") 16 | .map(host => InetSocketAddress.createUnresolved(host, CQL_PORT)) 17 | .toSeq.asJava 18 | } 19 | 20 | def main(args: Array[String]): Unit = { 21 | val session = CqlSession.builder.addContactPoints( 22 | getContactPoints("10.101.34.176")).build 23 | 24 | session.execute("select * from system_auth.roles") 25 | .all().asScala.foreach(x => println(x.getFormattedContents)) 26 | 27 | 28 | session.close() 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/TestData.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import java.time.LocalDate; 4 | import java.util.UUID; 5 | 6 | import com.datastax.driver.extras.codecs.jdk8.LocalDateCodec; 7 | import com.datastax.driver.mapping.annotations.Column; 8 | import com.datastax.driver.mapping.annotations.PartitionKey; 9 | import com.datastax.driver.mapping.annotations.Table; 10 | 11 | @Table(keyspace = "test", name = "dtest", readConsistency = "ONE", writeConsistency = "ONE") 12 | public class TestData { 13 | @PartitionKey 14 | @Column(name = "id") 15 | private UUID id; 16 | 17 | @Column(name = "ddate", codec = LocalDateCodec.class) 18 | LocalDate ddate; 19 | 20 | public UUID getId() { 21 | return id; 22 | } 23 | 24 | public void setId(UUID id) { 25 | this.id = id; 26 | } 27 | 28 | public LocalDate getDdate() { 29 | return ddate; 30 | } 31 | 32 | public void setDdate(LocalDate ddate) { 33 | this.ddate = ddate; 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/AuditTestType.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.UDT; 4 | 5 | // create type test.audit_type(id int, t text); 6 | @UDT(keyspace="test", name="audit_type") 7 | public class AuditTestType { 8 | int id; 9 | String t; 10 | 11 | public AuditTestType() { 12 | } 13 | 14 | public AuditTestType(int id, String t) { 15 | this.id = id; 16 | this.t = t; 17 | } 18 | 19 | public int getId() { 20 | return id; 21 | } 22 | 23 | public void setId(int id) { 24 | this.id = id; 25 | } 26 | 27 | public String getT() { 28 | return t; 29 | } 30 | 31 | public void setT(String t) { 32 | this.t = t; 33 | } 34 | 35 | @Override 36 | public String toString() { 37 | return "AuditTestType{" + 38 | "id=" + id + 39 | ", t='" + t + '\'' + 40 | '}'; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /scala-driver-4.x/src/main/scala/com/datastax/alexott/demos/objmapper/Entitites.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.objmapper 2 | 3 | import com.datastax.oss.driver.api.mapper.annotations.{CqlName, Entity, PartitionKey} 4 | 5 | import scala.annotation.meta.field 6 | 7 | /* 8 | 9 | CREATE TYPE test.udt ( 10 | id int, 11 | t1 int, 12 | t2 int, 13 | a2 int 14 | ); 15 | CREATE TABLE test.u2 ( 16 | id int PRIMARY KEY, 17 | u udt 18 | ); 19 | */ 20 | 21 | 22 | @Entity 23 | case class udt(@(CqlName @field)("id") id: java.lang.Integer, 24 | @(CqlName @field)("t1") t1: java.lang.Integer, 25 | @(CqlName @field)("t2") t2: java.lang.Integer, 26 | @(CqlName @field)("a2") a2: java.lang.Integer) { 27 | def this() { 28 | this(0,0,0,0) 29 | } 30 | } 31 | 32 | @Entity 33 | case class u2(@(PartitionKey @field) id: java.lang.Integer, 34 | @(CqlName @field)(value = "udt") udt: udt) { 35 | def this() { 36 | this(0, new udt) 37 | } 38 | } -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/TableObjAccessorTest.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.objmapper; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Session; 5 | import com.datastax.driver.mapping.MappingManager; 6 | import com.datastax.driver.mapping.Result; 7 | 8 | public class TableObjAccessorTest { 9 | public static void main(String[] args) { 10 | String server = System.getProperty("contactPoint", "127.0.0.1"); 11 | try (Cluster cluster = Cluster.builder().addContactPoint(server).build(); 12 | Session session = cluster.connect()) { 13 | MappingManager manager = new MappingManager(session); 14 | TableObjAccessor accessor = manager.createAccessor(TableObjAccessor.class); 15 | Result objs = accessor.getByPartKey(0, 1); 16 | for (TableObjectClustered obj: objs) { 17 | System.out.println("Obj=" + obj); 18 | } 19 | accessor.deleteByPartKey(0,0); 20 | } 21 | 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /scc-2.5/src/main/scala/com/datastax/alexott/spark/JoinTestsScala.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.spark 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | 6 | /** 7 | * Demonstration of Cassandra direct join in the SCC 2.5+ 8 | * 9 | * spark-submit need to be executed with 10 | * --conf spark.sql.extensions=com.datastax.spark.connector.CassandraSparkExtensions 11 | */ 12 | object JoinTestsScala { 13 | def main(args: Array[String]): Unit = { 14 | 15 | val sc = new SparkContext() 16 | val spark = SparkSession.builder() 17 | .config(sc.getConf) 18 | .getOrCreate() 19 | import spark.implicits._ 20 | 21 | val toJoin = spark.range(1, 1000).map(x => x.intValue).withColumnRenamed("value", "id") 22 | 23 | val dataset = spark.read 24 | .format("org.apache.spark.sql.cassandra") 25 | .options(Map("table" -> "jtest", "keyspace" -> "test")) 26 | .load 27 | val joined = toJoin.join(dataset, dataset("id") === toJoin("id")) 28 | joined.explain 29 | joined.show(10) 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/MapperTest1.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import java.util.UUID; 4 | 5 | import com.datastax.driver.core.Cluster; 6 | import com.datastax.driver.core.Session; 7 | import com.datastax.driver.mapping.Mapper; 8 | import com.datastax.driver.mapping.MappingManager; 9 | 10 | public class MapperTest1 { 11 | public static void main(String[] args) { 12 | String server = System.getProperty("contactPoint", "127.0.0.1"); 13 | Cluster cluster = Cluster.builder().addContactPoint(server).build(); 14 | Session session = cluster.connect(); 15 | 16 | MappingManager manager = new MappingManager(session); 17 | 18 | Mapper mapper = manager.mapper(TestData.class); 19 | 20 | UUID uuid = UUID.fromString("e7ae5cf3-d358-4d99-b900-85902fda9bb1"); 21 | TestData td = mapper.get(uuid); 22 | 23 | if (td == null) { 24 | System.out.println("Can't find given UUID"); 25 | } else { 26 | System.out.println("UUID: " + td.getId() + ", date: " + td.getDdate()); 27 | } 28 | 29 | session.close(); 30 | cluster.close(); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/TableObjJava.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.objmapper; 2 | 3 | import com.datastax.driver.mapping.annotations.PartitionKey; 4 | import com.datastax.driver.mapping.annotations.Table; 5 | 6 | import java.util.Date; 7 | 8 | @Table(name="scala_test", keyspace = "test") 9 | public class TableObjJava { 10 | @PartitionKey 11 | int id = 0; 12 | String t = ""; 13 | Date tm = new Date(); 14 | 15 | public int getId() { 16 | return id; 17 | } 18 | 19 | public void setId(int id) { 20 | this.id = id; 21 | } 22 | 23 | public String getT() { 24 | return t; 25 | } 26 | 27 | public void setT(String t) { 28 | this.t = t; 29 | } 30 | 31 | public Date getTm() { 32 | return tm; 33 | } 34 | 35 | public void setTm(Date tm) { 36 | this.tm = tm; 37 | } 38 | 39 | @Override 40 | public String toString() { 41 | return "TableObjJava{" + 42 | "id=" + id + 43 | ", t='" + t + '\'' + 44 | ", tm=" + tm + 45 | '}'; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /prometheus-java-driver/README.md: -------------------------------------------------------------------------------- 1 | This directory contains an example of how we can export metrics from DataStax Java driver 2 | 4.x to Prometheus via [Prometheus Java Client](https://github.com/prometheus/client_java). 3 | 4 | Exporting of [Java driver metrics](https://docs.datastax.com/en/developer/java-driver/4.3/manual/core/metrics/) is simple, we just need to add following lines: 5 | 6 | ```java 7 | MetricRegistry registry = session.getMetrics() 8 | .orElseThrow(() -> new IllegalStateException("Metrics are disabled")) 9 | .getRegistry(); 10 | CollectorRegistry.defaultRegistry.register(new DropwizardExports(registry)); 11 | ``` 12 | 13 | and then expose metrics to Prometheus by specific implementation - this example uses 14 | Prometheus's `HTTPServer`, running on the port 9095 (overridable via `prometheusPort` Java 15 | property). 16 | 17 | Run example with following command: 18 | 19 | ```sh 20 | mvn clean compile exec:java -Dexec.mainClass="com.datastax.alexott.demos.MetricsWithPrometheus" \ 21 | -DcontactPoint=10.101.34.241 -DdcName=dc_datastax 22 | ``` 23 | 24 | You need to pass contact point & data center name as Java properties (`contactPoint` and 25 | `dcName` correspondingly). 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This repository contains different code samples that are related to the usage of DataStax 2 | Enterprise (DSE). This repository is successor of 3 | [dse-java-playground](https://github.com/alexott/dse-java-playground) repository, but it's 4 | restructured to be more modular, so we can use examples with different versions of Java 5 | driver, separate artifact for Spark code, etc. 6 | 7 | The code is organized as following: 8 | 9 | * `driver-1.x` - samples that use DSE Java driver 1.x (mostly compatible with DataStax 10 | Java driver 3.x); 11 | * `driver-4.x` - samples that use DataStax Java driver 2.x (mostly compatible with DSE 12 | Java driver 2.x); 13 | * `spark-dse` - samples that use DSE Analytics (should be mostly compatible with OSS 14 | Spark, but there are some differences, like, support for DSE Direct Join for data 15 | frames); 16 | * `spark-oss` - samples that demonstrate the use of Spark with OSS Spark Cassandra 17 | Connector, version < 2.5.0 18 | * `scc-2.5` - samples that demonstrate the use of Spark with OSS Spark Cassandra 19 | Connector, version = 2.5.x 20 | * `scc-3.0` - samples that demonstrate the use of Spark with OSS Spark Cassandra 21 | Connector, version >= 3.0 22 | 23 | 24 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/misc/Test3.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.misc; 2 | import java.util.Collections; 3 | 4 | import com.datastax.driver.core.BoundStatement; 5 | import com.datastax.driver.core.Cluster; 6 | import com.datastax.driver.core.PreparedStatement; 7 | import com.datastax.driver.core.Session; 8 | import com.fasterxml.jackson.core.JsonProcessingException; 9 | 10 | public class Test3 { 11 | public static void main(String[] args) throws JsonProcessingException { 12 | String server = System.getProperty("contactPoint", "127.0.0.1"); 13 | Cluster cluster = Cluster.builder().addContactPoint(server).withCredentials("user.0", "password").build(); 14 | Session session = cluster.connect(); 15 | 16 | PreparedStatement prepared = session.prepare("UPDATE test.st SET cities = cities + ? WHERE zip = ? and state = ?"); 17 | 18 | BoundStatement bound = prepared.bind(Collections.singleton("t2"), "2", "1"); 19 | session.execute(bound); 20 | 21 | BoundStatement bound2 = prepared.bind(); 22 | bound2.setSet(0, Collections.singleton("t3")); 23 | bound2.setString(1, "2"); 24 | bound2.setString(2, "1"); 25 | session.execute(bound2); 26 | 27 | session.close(); 28 | cluster.close(); 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /driver-4.x/src/main/java/com/datastax/alexott/demos/CreateKeyspacesInferTopology.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.oss.driver.api.core.CqlSession; 4 | 5 | import java.util.Map; 6 | import java.util.TreeMap; 7 | 8 | import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; 9 | 10 | public class CreateKeyspacesInferTopology { 11 | private static final String KS_NAME = "my_super_ks"; 12 | private static final int MAX_RF = 3; 13 | 14 | public static void main(String[] args) { 15 | try (CqlSession session = CqlSession.builder() 16 | .addContactPoints(Commons.getContactPoints()) 17 | .build()) { 18 | Commons.executeDDL(session, dropKeyspace(KS_NAME).ifExists().build()); 19 | Map rfPerDC = new TreeMap<>(); 20 | for (Map.Entry e: Commons.getDataCenters(session).entrySet()) { 21 | rfPerDC.put(e.getKey(), Math.min(e.getValue(), MAX_RF)); 22 | } 23 | 24 | Commons.executeDDL(session, 25 | createKeyspace(KS_NAME).ifNotExists() 26 | .withNetworkTopologyStrategy(rfPerDC).build()); 27 | 28 | } catch (InterruptedException e) { 29 | e.printStackTrace(); 30 | } 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/product/Product.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.product; 2 | 3 | import java.util.List; 4 | import java.util.Map; 5 | 6 | import com.datastax.driver.mapping.annotations.Frozen; 7 | import com.datastax.driver.mapping.annotations.FrozenValue; 8 | import com.datastax.driver.mapping.annotations.PartitionKey; 9 | import com.datastax.driver.mapping.annotations.Table; 10 | 11 | @Table(keyspace = "test", name = "product") 12 | public class Product { 13 | @PartitionKey 14 | String id; 15 | 16 | @FrozenValue 17 | @Frozen 18 | Map details; 19 | 20 | @Frozen 21 | List moreDetails; 22 | 23 | public String getId() { 24 | return id; 25 | } 26 | 27 | public void setId(String id) { 28 | this.id = id; 29 | } 30 | 31 | public Map getDetails() { 32 | return details; 33 | } 34 | 35 | public void setDetails(Map details) { 36 | this.details = details; 37 | } 38 | 39 | public List getMoreDetails() { 40 | return moreDetails; 41 | } 42 | 43 | public void setMoreDetails(List moreDetails) { 44 | this.moreDetails = moreDetails; 45 | } 46 | 47 | @Override 48 | public String toString() { 49 | return "Product [id=" + id + ", details=" + details + ", moreDetails=" + moreDetails + "]"; 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /spark-dse/src/main/scala/com/datastax/alexott/dsefs/DsefsDownloader.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.dsefs 2 | 3 | import java.io._ 4 | 5 | import com.datastax.alexott.dsefs.DsefsUploader.getBool 6 | import org.apache.hadoop.fs.{FileSystem, FileUtil, Path} 7 | import org.apache.spark.sql.SparkSession 8 | 9 | object DsefsDownloader { 10 | def main(args: Array[String]): Unit = { 11 | if (args.length < 2) { 12 | println("Usage: DsefsDownloader fileOrDirectoryToDownload destination") 13 | System.exit(1) 14 | } 15 | val spark = SparkSession.builder().getOrCreate() 16 | 17 | // import spark.implicits._ 18 | 19 | val remoteFS = FileSystem.get(spark.sparkContext.hadoopConfiguration) 20 | val path = new Path(args(0)) 21 | if (!remoteFS.exists(path)) { 22 | println("The file or directory '" + args(0) + "' doesn't exist!") 23 | System.exit(1) 24 | } 25 | 26 | val outfile = new File(args(1)) 27 | if (outfile.exists()) { 28 | if (getBool("overwriteMode")) { 29 | outfile.delete() 30 | } else { 31 | println("File '" + args(1) + "' exists on disk! Remove it, or pass -DoverwriteMode=true to the job!") 32 | System.exit(1) 33 | } 34 | } 35 | 36 | FileUtil.copy(remoteFS, path, outfile, false, spark.sparkContext.hadoopConfiguration) 37 | 38 | System.exit(0) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /cassandra-join-spark/src/main/java/json/ticks/TickData.java: -------------------------------------------------------------------------------- 1 | package json.ticks; 2 | 3 | import java.time.LocalDateTime; 4 | import java.time.temporal.ChronoUnit; 5 | import java.util.concurrent.atomic.AtomicLong; 6 | 7 | public class TickData { 8 | private static final LocalDateTime BASE_TIME = LocalDateTime.now(); 9 | private static final AtomicLong TIME_OFFSET = new AtomicLong(); 10 | 11 | private String symbol; 12 | private double value; 13 | private String datetime; 14 | 15 | public TickData(String symbol, double value) { 16 | this.symbol = symbol; 17 | this.value = value; 18 | this.datetime = BASE_TIME.plus(TIME_OFFSET.incrementAndGet(), ChronoUnit.SECONDS).toString(); 19 | } 20 | 21 | public String getSymbol() { 22 | return symbol; 23 | } 24 | 25 | public double getValue() { 26 | return value; 27 | } 28 | 29 | public String getDatetime() { 30 | return datetime; 31 | } 32 | 33 | public void setDatetime(){ 34 | this.datetime = BASE_TIME.plus(TIME_OFFSET.incrementAndGet(), ChronoUnit.SECONDS).toString(); 35 | } 36 | 37 | public void setValue(double v){ 38 | this.value = v; 39 | } 40 | 41 | @Override 42 | public String toString() { 43 | return "TickData [" + 44 | "symbol=" + symbol + ", " + 45 | "value=" + value + "]"; 46 | } 47 | 48 | } 49 | 50 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/UDTTestTableFR.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.ClusteringColumn; 4 | import com.datastax.driver.mapping.annotations.PartitionKey; 5 | import com.datastax.driver.mapping.annotations.Table; 6 | 7 | @Table(name = "udt_test_fr", keyspace = "test") 8 | public class UDTTestTableFR { 9 | @PartitionKey 10 | int id; 11 | @ClusteringColumn 12 | int cid; 13 | UDTTestType udt; 14 | 15 | public UDTTestTableFR(int id, int cid, UDTTestType udt) { 16 | this.id = id; 17 | this.cid = cid; 18 | this.udt = udt; 19 | } 20 | 21 | public UDTTestTableFR() { 22 | } 23 | 24 | @Override 25 | public String toString() { 26 | return "UDTTestTableFR{" + 27 | "id=" + id + 28 | ", cid=" + cid + 29 | ", udt=" + udt + 30 | '}'; 31 | } 32 | 33 | public int getId() { 34 | return id; 35 | } 36 | 37 | public void setId(int id) { 38 | this.id = id; 39 | } 40 | 41 | public int getCid() { 42 | return cid; 43 | } 44 | 45 | public void setCid(int cid) { 46 | this.cid = cid; 47 | } 48 | 49 | public UDTTestType getUdt() { 50 | return udt; 51 | } 52 | 53 | public void setUdt(UDTTestType udt) { 54 | this.udt = udt; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/UDTTestTableNonFR.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.ClusteringColumn; 4 | import com.datastax.driver.mapping.annotations.PartitionKey; 5 | import com.datastax.driver.mapping.annotations.Table; 6 | 7 | @Table(name = "udt_test", keyspace = "test") 8 | public class UDTTestTableNonFR { 9 | @PartitionKey 10 | int id; 11 | @ClusteringColumn 12 | int cid; 13 | UDTTestType udt; 14 | 15 | public UDTTestTableNonFR(int id, int cid, UDTTestType udt) { 16 | this.id = id; 17 | this.cid = cid; 18 | this.udt = udt; 19 | } 20 | 21 | public UDTTestTableNonFR() { 22 | } 23 | 24 | @Override 25 | public String toString() { 26 | return "UDTTestTableNonFR{" + 27 | "id=" + id + 28 | ", cid=" + cid + 29 | ", udt=" + udt + 30 | '}'; 31 | } 32 | 33 | public int getId() { 34 | return id; 35 | } 36 | 37 | public void setId(int id) { 38 | this.id = id; 39 | } 40 | 41 | public int getCid() { 42 | return cid; 43 | } 44 | 45 | public void setCid(int cid) { 46 | this.cid = cid; 47 | } 48 | 49 | public UDTTestType getUdt() { 50 | return udt; 51 | } 52 | 53 | public void setUdt(UDTTestType udt) { 54 | this.udt = udt; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /spark-dse/src/main/scala/com/datastax/alexott/dsefs/DsefsUploader.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.dsefs 2 | 3 | import java.io.File 4 | 5 | import org.apache.hadoop.fs.{FileSystem, FileUtil, Path} 6 | import org.apache.spark.sql.SparkSession 7 | 8 | object DsefsUploader { 9 | def getBool(name: String): Boolean = { 10 | java.lang.Boolean.getBoolean(name) 11 | } 12 | 13 | def main(args: Array[String]): Unit = { 14 | if (args.length < 2) { 15 | println("Usage: DsefsUploader fileOrDirectoryToUpload destination") 16 | System.exit(1) 17 | } 18 | val spark = SparkSession.builder().getOrCreate() 19 | 20 | val infile = new File(args(0)) 21 | if (!infile.exists()) { 22 | println("File '" + args(0) + " doesn't exist!") 23 | } 24 | 25 | val fileSystem = FileSystem.get(spark.sparkContext.hadoopConfiguration) 26 | 27 | val name = if ("/".equals(args(1))) { 28 | "/" + infile.getName 29 | } else{ 30 | args(1) 31 | } 32 | val path = new Path(name) 33 | if (fileSystem.exists(path)) { 34 | if (getBool("overwriteMode")) { 35 | fileSystem.delete(path, true) 36 | } else { 37 | println("File or directory '" + args(1) + "' exists on DSEFS! Remove it, or pass -DoverwriteMode=true to the job!") 38 | System.exit(1) 39 | } 40 | } 41 | 42 | FileUtil.copy(infile, fileSystem, path, false, spark.sparkContext.hadoopConfiguration) 43 | 44 | System.exit(0) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/misc/Test1.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.misc; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.ColumnDefinitions.Definition; 5 | import com.datastax.driver.core.DataType; 6 | import com.datastax.driver.core.ResultSet; 7 | import com.datastax.driver.core.Row; 8 | import com.datastax.driver.core.Session; 9 | import com.fasterxml.jackson.core.JsonProcessingException; 10 | 11 | import java.util.Set; 12 | 13 | public class Test1 { 14 | public static void main(String[] args) throws JsonProcessingException { 15 | String server = System.getProperty("contactPoint", "127.0.0.1"); 16 | Cluster cluster = Cluster.builder().addContactPoint(server).build(); 17 | Session session = cluster.connect(); 18 | 19 | ResultSet rs = session.execute("select * from test.ftest ;"); 20 | System.out.print("["); 21 | for (Row row : rs) { 22 | for (Definition key : row.getColumnDefinitions()) { 23 | System.out.println(key.getName() + ", type=" + key.getType()); 24 | if (key.getType().equals(DataType.frozenSet(DataType.varchar()))) { 25 | System.out.println("\tbingo!"); 26 | Set ts = row.getSet(key.getName(), String.class); 27 | for (String string : ts) { 28 | System.out.println("\tval=" + string); 29 | } 30 | } 31 | } 32 | } 33 | 34 | session.close(); 35 | cluster.close(); 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /driver-1.x/src/main/scala/com/datastax/alexott/CodecsTest.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott 2 | 3 | import com.datastax.driver.core.{Cluster, Row, TypeCodec} 4 | import com.datastax.driver.extras.codecs.jdk8.OptionalCodec 5 | 6 | import scala.collection.JavaConverters._ 7 | import scala.compat.java8.OptionConverters._ 8 | 9 | 10 | object Optionals { 11 | private val intCodec = TypeCodec.cint() 12 | private val optionalIntCodec = new OptionalCodec[java.lang.Integer](intCodec) 13 | private val javaIntType = optionalIntCodec.getJavaType() 14 | 15 | def registerCodecs(cluster: Cluster): Unit = { 16 | val codecRegistry = cluster.getConfiguration.getCodecRegistry 17 | 18 | codecRegistry.register(optionalIntCodec) 19 | } 20 | 21 | def getInt(row: Row, col: String): Option[java.lang.Integer] = { 22 | row.get(col, javaIntType).asScala 23 | } 24 | def getInt(row: Row, col: Int): Option[java.lang.Integer] = { 25 | row.get(col, javaIntType).asScala 26 | } 27 | } 28 | 29 | object CodecsTest { 30 | def main(args: Array[String]): Unit = { 31 | 32 | val cluster = Cluster.builder().addContactPoint("10.200.176.39").build() 33 | Optionals.registerCodecs(cluster) 34 | val session = cluster.connect() 35 | 36 | for (row <- session.execute("select id, c1, v1 from test.st1 where id = 2").all().asScala) { 37 | println("id=" + Optionals.getInt(row, "id") 38 | + ", c1=" + Optionals.getInt(row, "c1") 39 | + ", v1=" + Optionals.getInt(row, "v1")) 40 | } 41 | session.close() 42 | cluster.close() 43 | 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/solr/DTestMain.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import java.io.IOException; 4 | import java.util.List; 5 | 6 | import org.apache.solr.client.solrj.SolrClient; 7 | import org.apache.solr.client.solrj.SolrQuery; 8 | import org.apache.solr.client.solrj.SolrServerException; 9 | import org.apache.solr.client.solrj.beans.DocumentObjectBinder; 10 | import org.apache.solr.client.solrj.impl.HttpSolrClient; 11 | import org.apache.solr.client.solrj.response.QueryResponse; 12 | import org.apache.solr.common.SolrDocumentList; 13 | 14 | import com.fasterxml.jackson.databind.ObjectMapper; 15 | import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; 16 | 17 | public class DTestMain { 18 | public static void main(String[] args) throws SolrServerException, IOException { 19 | String url = "http://localhost:8983/solr/test.dtest"; 20 | 21 | ObjectMapper mapper = new ObjectMapper(); 22 | mapper.findAndRegisterModules(); 23 | mapper.registerModule(new JavaTimeModule()); 24 | 25 | SolrClient client = new HttpSolrClient(url); 26 | SolrQuery query = new SolrQuery(); 27 | query.setQuery("*:*"); 28 | query.addFilterQuery("id:1"); 29 | query.setRows(10); 30 | QueryResponse response = client.query(query); 31 | SolrDocumentList list = response.getResults(); 32 | DocumentObjectBinder binder = new DocumentObjectBinder(); 33 | 34 | List lst = binder.getBeans(DTest.class, list); 35 | for (DTest dTest : lst) { 36 | System.out.println("id=" + dTest.getId() + ", t=" + dTest.getT()); 37 | } 38 | 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /driver-4.x/src/main/java/com/datastax/alexott/demos/UdtTest1.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.oss.driver.api.core.CqlSession; 4 | import com.datastax.oss.driver.api.core.cql.BoundStatement; 5 | import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; 6 | import com.datastax.oss.driver.api.core.cql.PreparedStatement; 7 | import com.datastax.oss.driver.api.core.type.UserDefinedType; 8 | 9 | public class UdtTest1 { 10 | /* 11 | 12 | CREATE TYPE test.udt ( 13 | id int, 14 | t1 int, 15 | t2 int, 16 | a2 int 17 | ); 18 | CREATE TABLE test.u2 ( 19 | id int PRIMARY KEY, 20 | u udt 21 | ); 22 | */ 23 | 24 | public static void main(String[] args) { 25 | try (CqlSession session = CqlSession.builder() 26 | .addContactPoints(Commons.getContactPoints()) 27 | .build()) { 28 | UserDefinedType udtType = session 29 | .getMetadata() 30 | .getKeyspace("test") 31 | .flatMap(ks -> ks.getUserDefinedType("udt")) 32 | .orElseThrow(IllegalStateException::new); 33 | PreparedStatement preparedStatement = session.prepare( 34 | "insert into test.u2(id, u) values(?, ?)"); 35 | for (int i = 0; i < 5; i++) { 36 | BoundStatement boundStatement = 37 | preparedStatement.bind(i, udtType.newValue(i, i, i, i)); 38 | session.execute(boundStatement); 39 | } 40 | 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/JMXTest.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import java.util.Set; 4 | 5 | import javax.management.JMX; 6 | import javax.management.MBeanServerConnection; 7 | import javax.management.ObjectInstance; 8 | import javax.management.ObjectName; 9 | import javax.management.remote.JMXConnector; 10 | import javax.management.remote.JMXConnectorFactory; 11 | import javax.management.remote.JMXServiceURL; 12 | 13 | import org.apache.cassandra.metrics.CassandraMetricsRegistry; 14 | 15 | public class JMXTest { 16 | 17 | public static void main(String[] args) throws Exception { 18 | JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://[127.0.0.1]:7199/jmxrmi"); 19 | JMXConnector jmxc = JMXConnectorFactory.connect(url, null); 20 | MBeanServerConnection mbsc = jmxc.getMBeanServerConnection(); 21 | 22 | Set objs = mbsc.queryMBeans(ObjectName 23 | .getInstance("org.apache.cassandra.metrics:type=ClientRequest,scope=Read-ALL,name=TotalLatency"), null); 24 | for (ObjectInstance obj : objs) { 25 | Object proxy = JMX.newMBeanProxy(mbsc, obj.getObjectName(), CassandraMetricsRegistry.JmxCounterMBean.class); 26 | if (proxy instanceof CassandraMetricsRegistry.JmxCounterMBean) { 27 | System.out.println("TotalLatency = " + ((CassandraMetricsRegistry.JmxCounterMBean) proxy).getCount()); 28 | } 29 | } 30 | jmxc.close(); 31 | } 32 | 33 | } 34 | 35 | /* 36 | * Set names = mbsc.queryNames(null, null); for (ObjectName name : 37 | * names) { System.out.println("\tObjectName = " + name); } 38 | */ 39 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/Test4.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.BatchStatement; 4 | import com.datastax.driver.core.Cluster; 5 | import com.datastax.driver.core.Session; 6 | import com.datastax.driver.core.Statement; 7 | import com.datastax.driver.mapping.Mapper; 8 | import com.datastax.driver.mapping.MappingManager; 9 | import com.google.common.collect.Lists; 10 | import com.google.common.collect.Maps; 11 | import com.google.common.collect.Sets; 12 | 13 | import java.util.Map; 14 | 15 | public class Test4 { 16 | public static void main(String[] args) { 17 | String server = System.getProperty("contactPoint", "127.0.0.1"); 18 | try(Cluster cluster = Cluster.builder().addContactPoint(server).build(); 19 | Session session = cluster.connect()) { 20 | MappingManager manager = new MappingManager(session); 21 | Mapper mapper = manager.mapper(Test4Data.class); 22 | 23 | for (int i = 0; i < 2; i++) { 24 | BatchStatement batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED); 25 | for (int j = 0; j < 5; j++) { 26 | Statement statement = mapper.saveQuery(new Test4Data(i, j, "t " + i + "," + j)); 27 | System.out.println(statement.getClass()); 28 | batchStatement.add(statement); 29 | } 30 | session.execute(batchStatement); 31 | } 32 | 33 | Test4Data test4Data = mapper.get(0, 1); 34 | System.out.println(test4Data); 35 | } 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /driver-4.x/src/main/java/com/datastax/alexott/demos/ConnectWithDCDetection.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.oss.driver.api.core.CqlSession; 4 | import com.datastax.oss.driver.api.core.cql.ResultSet; 5 | import com.datastax.oss.driver.api.core.cql.Row; 6 | 7 | import java.net.InetSocketAddress; 8 | import java.util.Arrays; 9 | import java.util.stream.Collectors; 10 | 11 | public class ConnectWithDCDetection { 12 | 13 | public static void main(String[] args) { 14 | String contactPointsStr = System.getProperty("contactPoints", ""); 15 | if (contactPointsStr.isEmpty()) { 16 | System.err.println("Please pass Cassandra contact points as Java system property with name 'contactPoints'"); 17 | System.exit(1); 18 | } 19 | 20 | // this is not really necessary, because of the existence of the DcInferringLoadBalancingPolicy implemented as 21 | // part of JAVA-2459 22 | String[] contactPoints = contactPointsStr.split(","); 23 | String dcName = DCDetectingLBPolicy.detectDcName(contactPoints); 24 | System.out.println("Detected DC Name: '" + dcName + "'"); 25 | 26 | try(CqlSession session = CqlSession.builder() 27 | .addContactPoints(Arrays.stream(contactPoints) 28 | .map(x -> new InetSocketAddress(x, 9042)).collect(Collectors.toList())) 29 | .withLocalDatacenter(dcName) 30 | .build()) { 31 | ResultSet rs = session.execute("select data_center, host_id from system.peers"); 32 | for (Row row: rs) { 33 | System.out.println(String.format("Host ID: %s, DC: %s", row.getUuid("host_id"), row.getString("data_center"))); 34 | } 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/graph/GDTest1.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.graph; 2 | 3 | import java.util.UUID; 4 | 5 | import com.datastax.driver.dse.DseCluster; 6 | import com.datastax.driver.dse.DseSession; 7 | import com.datastax.driver.dse.graph.GraphOptions; 8 | import com.datastax.driver.dse.graph.SimpleGraphStatement; 9 | 10 | public class GDTest1 { 11 | 12 | public static void main(String[] args) { 13 | String server = System.getProperty("contactPoint", "127.0.0.1"); 14 | try (DseCluster dseCluster = DseCluster.builder().addContactPoints(server) 15 | .withGraphOptions(new GraphOptions().setGraphName("test")).build(); 16 | DseSession session = dseCluster.connect()) { 17 | 18 | long start = System.nanoTime(); 19 | long startL = System.nanoTime(); 20 | for (int i = 1; i <= 1000; i++) { 21 | // String s = String.format("g.addV(label, 'person' ,'id', '%s' ," 22 | // + "'email', 'sample%d@gmail.com')", UUID.randomUUID().toString(), i); 23 | // session.executeGraph(s).one().asVertex(); 24 | 25 | SimpleGraphStatement s = new SimpleGraphStatement( 26 | "g.addV(label, 'person' ,'id', idV , 'email', emailV)").set("idV", UUID.randomUUID().toString()) 27 | .set("emailV", "sample@gmail.com" + Integer.toString(i)); 28 | session.executeGraph(s).one().asVertex(); 29 | 30 | if ((i % 100) == 0) { 31 | long endL = System.nanoTime(); 32 | System.out.printf("%d time = %d ms\n", i, (endL - startL) / 1000000); 33 | startL = System.nanoTime(); 34 | } 35 | } 36 | long end = System.nanoTime(); 37 | System.out.printf("Total time = %d ms\n", (end - start) / 1000000); 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/ExpPopularity.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.UDT; 4 | 5 | import java.util.Objects; 6 | 7 | @UDT(keyspace = "src", name = "popularity") 8 | public class ExpPopularity { 9 | 10 | String locale; 11 | double pop_a; 12 | double pop_b; 13 | 14 | public ExpPopularity(String locale, double pop_a, double pop_b) { 15 | this.locale = locale; 16 | this.pop_a = pop_a; 17 | this.pop_b = pop_b; 18 | } 19 | public ExpPopularity() { 20 | locale = ""; 21 | pop_a = 0; 22 | pop_b = 0; 23 | } 24 | 25 | public String getLocale() { 26 | return locale; 27 | } 28 | 29 | public void setLocale(String locale) { 30 | this.locale = locale; 31 | } 32 | 33 | public double getPop_a() { 34 | return pop_a; 35 | } 36 | 37 | public void setPop_a(double pop_a) { 38 | this.pop_a = pop_a; 39 | } 40 | 41 | public double getPop_b() { 42 | return pop_b; 43 | } 44 | 45 | public void setPop_b(double pop_b) { 46 | this.pop_b = pop_b; 47 | } 48 | 49 | @Override 50 | public boolean equals(Object o) { 51 | if (this == o) return true; 52 | if (!(o instanceof ExpPopularity)) return false; 53 | ExpPopularity that = (ExpPopularity) o; 54 | return Double.compare(that.pop_a, pop_a) == 0 && 55 | Double.compare(that.pop_b, pop_b) == 0 && 56 | Objects.equals(locale, that.locale); 57 | } 58 | 59 | @Override 60 | public int hashCode() { 61 | return Objects.hash(locale, pop_a, pop_b); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/Test4Data.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.ClusteringColumn; 4 | import com.datastax.driver.mapping.annotations.Column; 5 | import com.datastax.driver.mapping.annotations.PartitionKey; 6 | import com.datastax.driver.mapping.annotations.Table; 7 | 8 | // create KEYSPACE if not exists test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; 9 | // create table if not exists test.t4(id int, c int, t text, primary key(id, c)); 10 | @Table(name = "t4", keyspace = "test") 11 | public class Test4Data { 12 | @PartitionKey 13 | int id; 14 | 15 | @ClusteringColumn 16 | @Column(name = "c") 17 | int clCol; 18 | 19 | @Column(name = "t") 20 | String text; 21 | 22 | public int getId() { 23 | return id; 24 | } 25 | 26 | public Test4Data() { 27 | 28 | } 29 | 30 | public Test4Data(int id, int clCol, String text) { 31 | this.id = id; 32 | this.clCol = clCol; 33 | this.text = text; 34 | } 35 | 36 | public void setId(int id) { 37 | this.id = id; 38 | } 39 | 40 | public int getClCol() { 41 | return clCol; 42 | } 43 | 44 | public void setClCol(int clCol) { 45 | this.clCol = clCol; 46 | } 47 | 48 | public String getText() { 49 | return text; 50 | } 51 | 52 | public void setText(String text) { 53 | this.text = text; 54 | } 55 | 56 | @Override 57 | public String toString() { 58 | return "Test4Data{" + 59 | "id=" + id + 60 | ", clCol=" + clCol + 61 | ", text='" + text + '\'' + 62 | '}'; 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/TableObjectClustered.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.objmapper; 2 | 3 | import com.datastax.driver.mapping.annotations.Table; 4 | 5 | import java.util.Date; 6 | 7 | @Table(name = "scala_test_complex", keyspace = "test") 8 | public class TableObjectClustered { 9 | int p1 = 0; 10 | int p2 = 0; 11 | int c1 = 0; 12 | int c2 = 0; 13 | String t = ""; 14 | Date tm = new Date(); 15 | 16 | TableObjectClustered() { 17 | } 18 | 19 | public int getP1() { 20 | return p1; 21 | } 22 | 23 | public void setP1(int p1) { 24 | this.p1 = p1; 25 | } 26 | 27 | public int getP2() { 28 | return p2; 29 | } 30 | 31 | public void setP2(int p2) { 32 | this.p2 = p2; 33 | } 34 | 35 | public int getC1() { 36 | return c1; 37 | } 38 | 39 | public void setC1(int c1) { 40 | this.c1 = c1; 41 | } 42 | 43 | public int getC2() { 44 | return c2; 45 | } 46 | 47 | public void setC2(int c2) { 48 | this.c2 = c2; 49 | } 50 | 51 | public String getT() { 52 | return t; 53 | } 54 | 55 | public void setT(String t) { 56 | this.t = t; 57 | } 58 | 59 | public Date getTm() { 60 | return tm; 61 | } 62 | 63 | public void setTm(Date tm) { 64 | this.tm = tm; 65 | } 66 | 67 | @Override 68 | public String toString() { 69 | return "TableObjectClustered{" + 70 | "p1=" + p1 + 71 | ", p2=" + p2 + 72 | ", c1=" + c1 + 73 | ", c2=" + c2 + 74 | ", t='" + t + '\'' + 75 | ", tm=" + tm + 76 | '}'; 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/AuditTestMain.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Session; 5 | import com.datastax.driver.core.policies.LoadBalancingPolicy; 6 | import com.datastax.driver.core.policies.RoundRobinPolicy; 7 | import com.datastax.driver.core.policies.WhiteListPolicy; 8 | import com.datastax.driver.mapping.Mapper; 9 | import com.datastax.driver.mapping.MappingManager; 10 | import com.google.common.collect.Lists; 11 | import com.google.common.collect.Maps; 12 | import com.google.common.collect.Sets; 13 | 14 | import java.net.InetSocketAddress; 15 | import java.util.Collections; 16 | import java.util.Map; 17 | 18 | public class AuditTestMain { 19 | public static void main(String[] args) { 20 | LoadBalancingPolicy lbpolicy = new WhiteListPolicy(new RoundRobinPolicy(), 21 | Collections.singletonList(new InetSocketAddress("10.200.180.207", 9042))); 22 | try (Cluster cluster = Cluster.builder().addContactPoint("10.200.180.207") 23 | .withLoadBalancingPolicy(lbpolicy) 24 | .build(); 25 | Session session = cluster.connect()) { 26 | 27 | MappingManager manager = new MappingManager(session); 28 | Mapper mapper = manager.mapper(AuditTestTable.class); 29 | 30 | Map m = Maps.newHashMap(); 31 | m.put(1, "m 1"); 32 | m.put(2, "m 2"); 33 | mapper.save(new AuditTestTable(2, new AuditTestType(2, "test 2"), 34 | Sets.newHashSet("s 1", " s 2"), Lists.newArrayList("l 1", "l 2"), 35 | m)); 36 | 37 | System.out.println(mapper.get(2)); 38 | } 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /prometheus-java-driver/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | datastax-java-driver { 2 | basic { 3 | contact-points = ["127.0.0.1:9042"] 4 | session-name = example_app 5 | load-balancing-policy { 6 | local-datacenter = datacenter1 7 | } 8 | } 9 | 10 | // see https://docs.datastax.com/en/developer/java-driver/4.3/manual/core/configuration/reference/ 11 | advanced.metrics { 12 | session { 13 | enabled = [ bytes-sent, bytes-received, connected-nodes, cql-requests, cql-client-timeouts, 14 | cql-prepared-cache-size, throttling.delay, throttling.queue-size, throttling.errors 15 | ] 16 | cql-requests { 17 | highest-latency = 3 seconds 18 | significant-digits = 3 19 | refresh-interval = 5 minutes 20 | } 21 | throttling.delay { 22 | highest-latency = 3 seconds 23 | significant-digits = 3 24 | refresh-interval = 5 minutes 25 | } 26 | } 27 | 28 | node { 29 | enabled = [ pool.open-connections, pool.available-streams, pool.in-flight, pool.orphaned-streams, 30 | bytes-sent, bytes-received, cql-messages, errors.request.unsent, errors.request.aborted, 31 | errors.request.write-timeouts, errors.request.read-timeouts, errors.request.unavailables, 32 | errors.request.others, retries.total, retries.aborted, retries.read-timeout, 33 | retries.write-timeout, retries.unavailable, retries.other, ignores.total, ignores.aborted, 34 | ignores.read-timeout, ignores.write-timeout, ignores.unavailable, ignores.other, speculative-executions, 35 | errors.connection.init, errors.connection.auth 36 | ] 37 | 38 | cql-messages { 39 | highest-latency = 3 seconds 40 | significant-digits = 3 41 | refresh-interval = 5 minutes 42 | } 43 | } 44 | } 45 | } -------------------------------------------------------------------------------- /spark-dse/src/main/java/com/datastax/alexott/demos/spark/JoinTests.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import org.apache.spark.api.java.JavaRDD; 4 | import org.apache.spark.api.java.function.Function; 5 | import org.apache.spark.sql.Dataset; 6 | import org.apache.spark.sql.Row; 7 | import org.apache.spark.sql.RowFactory; 8 | import org.apache.spark.sql.SparkSession; 9 | import org.apache.spark.sql.types.DataTypes; 10 | import org.apache.spark.sql.types.StructField; 11 | import org.apache.spark.sql.types.StructType; 12 | import org.spark_project.guava.collect.ImmutableMap; 13 | import scala.Tuple1; 14 | 15 | import java.util.ArrayList; 16 | import java.util.List; 17 | import java.util.Random; 18 | 19 | // create table if not exists test.jtest (id int primary key, v text); 20 | 21 | public class JoinTests { 22 | public static void main(String[] args) { 23 | SparkSession spark = SparkSession 24 | .builder() 25 | .appName("CassandraSparkWithJoin") 26 | // .config("spark.cassandra.connection.host", "192.168.0.10") 27 | .getOrCreate(); 28 | 29 | // Dataset df = spark.sql("select * from test.jtest"); 30 | // df.show(); 31 | Dataset toJoin = spark.range(1, 1000).selectExpr("cast(id as int) as id"); 32 | 33 | toJoin.printSchema(); 34 | // toJoin.show(); 35 | 36 | Dataset dataset = spark.read() 37 | .format("org.apache.spark.sql.cassandra") 38 | .options(ImmutableMap.of("table", "jtest", "keyspace", "test")) 39 | .load(); 40 | 41 | Dataset joined = toJoin.join(dataset, 42 | dataset.col("id").equalTo(toJoin.col("id"))); 43 | joined.printSchema(); 44 | joined.explain(); 45 | joined.show(10); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /spark-oss/src/main/java/com/datastax/alexott/demos/spark/JoinTests.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import org.apache.spark.api.java.JavaRDD; 4 | import org.apache.spark.api.java.function.Function; 5 | import org.apache.spark.sql.Dataset; 6 | import org.apache.spark.sql.Row; 7 | import org.apache.spark.sql.RowFactory; 8 | import org.apache.spark.sql.SparkSession; 9 | import org.apache.spark.sql.types.DataTypes; 10 | import org.apache.spark.sql.types.StructField; 11 | import org.apache.spark.sql.types.StructType; 12 | import org.spark_project.guava.collect.ImmutableMap; 13 | import scala.Tuple1; 14 | 15 | import java.util.ArrayList; 16 | import java.util.List; 17 | import java.util.Random; 18 | 19 | // create table if not exists test.jtest (id int primary key, v text); 20 | 21 | public class JoinTests { 22 | public static void main(String[] args) { 23 | SparkSession spark = SparkSession 24 | .builder() 25 | .appName("CassandraSparkWithJoin") 26 | // .config("spark.cassandra.connection.host", "192.168.0.10") 27 | .getOrCreate(); 28 | 29 | // Dataset df = spark.sql("select * from test.jtest"); 30 | // df.show(); 31 | Dataset toJoin = spark.range(1, 1000).selectExpr("cast(id as int) as id"); 32 | 33 | toJoin.printSchema(); 34 | // toJoin.show(); 35 | 36 | Dataset dataset = spark.read() 37 | .format("org.apache.spark.sql.cassandra") 38 | .options(ImmutableMap.of("table", "jtest", "keyspace", "test")) 39 | .load(); 40 | 41 | Dataset joined = toJoin.join(dataset, 42 | dataset.col("id").equalTo(toJoin.col("id"))); 43 | joined.printSchema(); 44 | joined.explain(); 45 | joined.show(10); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /cassandra-join-spark/src/main/java/json/ticks/TickGenerator.java: -------------------------------------------------------------------------------- 1 | package json.ticks; 2 | 3 | import org.slf4j.Logger; 4 | import org.slf4j.LoggerFactory; 5 | 6 | import java.io.Serializable; 7 | import java.util.List; 8 | 9 | public class TickGenerator { 10 | 11 | static final Logger log = LoggerFactory.getLogger("TickGenerator"); 12 | 13 | private final List stocksList; 14 | 15 | public TickGenerator(List stocks) { 16 | this.stocksList = stocks; 17 | } 18 | 19 | public TickValue getTickValueRandom(int i) { 20 | TickData thisStock = stocksList.get(i); 21 | TickValue tickValue = new TickValue(thisStock.getSymbol(), thisStock.getValue()); 22 | tickValue.value = this.createRandomValue(tickValue.value); 23 | return tickValue; 24 | } 25 | 26 | public TickData getStockWithRandomValue(int i) { 27 | TickData thisStock = stocksList.get(i); 28 | thisStock.setValue(this.createRandomValue(thisStock.getValue())); 29 | return thisStock; 30 | } 31 | 32 | public int getStocksCount() { 33 | return stocksList.size(); 34 | } 35 | 36 | class TickValue implements Serializable { 37 | String tickSymbol; 38 | double value; 39 | 40 | public TickValue(String tickSymbol, double value) { 41 | super(); 42 | this.tickSymbol = tickSymbol; 43 | this.value = value; 44 | } 45 | } 46 | 47 | private double createRandomValue(double lastValue) { 48 | 49 | double up = Math.random() * 2; 50 | double percentMove = (Math.random() * 1.0) / 100; 51 | 52 | if (up < 1) { 53 | lastValue -= percentMove*lastValue; 54 | } else { 55 | lastValue += percentMove*lastValue; 56 | } 57 | 58 | return lastValue; 59 | } 60 | 61 | } 62 | 63 | -------------------------------------------------------------------------------- /spark-dse/src/main/java/com/datastax/alexott/demos/spark/UUIDTest.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import com.datastax.spark.connector.japi.CassandraJavaUtil; 4 | import com.datastax.spark.connector.japi.RDDJavaFunctions; 5 | import com.datastax.spark.connector.japi.rdd.CassandraJavaPairRDD; 6 | import com.datastax.spark.connector.japi.rdd.CassandraJavaRDD; 7 | import org.apache.spark.api.java.JavaRDD; 8 | import org.apache.spark.api.java.function.Function; 9 | import org.apache.spark.sql.SparkSession; 10 | import scala.Tuple1; 11 | import scala.Tuple2; 12 | 13 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.javaFunctions; 14 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapRowTo; 15 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapRowToTuple; 16 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapToRow; 17 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapTupleToRow; 18 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.someColumns; 19 | 20 | // create table if not exists test.utest (id int primary key, u uuid); 21 | public class UUIDTest { 22 | public static void main(String[] args) { 23 | SparkSession spark = SparkSession 24 | .builder() 25 | .appName("UUIDTest") 26 | .getOrCreate(); 27 | 28 | CassandraJavaRDD uuids = javaFunctions(spark.sparkContext()) 29 | .cassandraTable("test", "utest", mapRowTo(UUIDData.class)); 30 | 31 | uuids.collect().forEach(System.out::println); 32 | 33 | JavaRDD uuids2 = uuids.map(x -> new UUIDData(x.getId() + 10, x.getU())); 34 | 35 | CassandraJavaUtil.javaFunctions(uuids2) 36 | .writerBuilder("test", "utest", mapToRow(UUIDData.class)) 37 | .saveToCassandra(); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /spark-oss/src/main/java/com/datastax/alexott/demos/spark/UUIDTest.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import com.datastax.spark.connector.japi.CassandraJavaUtil; 4 | import com.datastax.spark.connector.japi.RDDJavaFunctions; 5 | import com.datastax.spark.connector.japi.rdd.CassandraJavaPairRDD; 6 | import com.datastax.spark.connector.japi.rdd.CassandraJavaRDD; 7 | import org.apache.spark.api.java.JavaRDD; 8 | import org.apache.spark.api.java.function.Function; 9 | import org.apache.spark.sql.SparkSession; 10 | import scala.Tuple1; 11 | import scala.Tuple2; 12 | 13 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.javaFunctions; 14 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapRowTo; 15 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapRowToTuple; 16 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapToRow; 17 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapTupleToRow; 18 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.someColumns; 19 | 20 | // create table if not exists test.utest (id int primary key, u uuid); 21 | public class UUIDTest { 22 | public static void main(String[] args) { 23 | SparkSession spark = SparkSession 24 | .builder() 25 | .appName("UUIDTest") 26 | .getOrCreate(); 27 | 28 | CassandraJavaRDD uuids = javaFunctions(spark.sparkContext()) 29 | .cassandraTable("test", "utest", mapRowTo(UUIDData.class)); 30 | 31 | uuids.collect().forEach(System.out::println); 32 | 33 | JavaRDD uuids2 = uuids.map(x -> new UUIDData(x.getId() + 10, x.getU())); 34 | 35 | CassandraJavaUtil.javaFunctions(uuids2) 36 | .writerBuilder("test", "utest", mapToRow(UUIDData.class)) 37 | .saveToCassandra(); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /scala-driver-4.x/build.sbt: -------------------------------------------------------------------------------- 1 | scalaVersion := "2.11.12" 2 | 3 | lazy val library = new { 4 | val mapperRuntime = "com.datastax.oss" % "java-driver-mapper-runtime" % "4.7.0" 5 | val mapperProcessor = "com.datastax.oss" % "java-driver-mapper-processor" % "4.7.0" % "provided" 6 | val queryBuilder = "com.datastax.oss" % "java-driver-query-builder" % "4.7.0" 7 | } 8 | 9 | lazy val processAnnotations = taskKey[Unit]("Process annotations") 10 | 11 | processAnnotations := { 12 | val log = streams.value.log 13 | log.info("Processing annotations ...") 14 | 15 | val classpath = ((products in Compile).value ++ ((dependencyClasspath in Compile).value.files)) mkString ":" 16 | val destinationDirectory = (classDirectory in Compile).value 17 | 18 | val processor = "com.datastax.oss.driver.internal.mapper.processor.MapperProcessor" 19 | val classesToProcess = Seq("com.datastax.alexott.demos.objmapper.u2", 20 | "com.datastax.alexott.demos.objmapper.udt") mkString " " 21 | 22 | val command = s"javac -cp $classpath -proc:only -processor $processor -XprintRounds -d $destinationDirectory $classesToProcess" 23 | 24 | runCommand(command, "Failed to process annotations.", log) 25 | log.info("Done processing annotations.") 26 | } 27 | 28 | def runCommand(command: String, message: => String, log: Logger) = { 29 | import scala.sys.process._ 30 | 31 | val result = command ! 32 | 33 | if (result != 0) { 34 | log.error(message) 35 | sys.error("Failed running command: " + command) 36 | } 37 | } 38 | 39 | packageBin in Compile := (packageBin in Compile dependsOn (processAnnotations in Compile)).value 40 | 41 | organization := "com.datastax.alexott" 42 | version := "1.0" 43 | name := "demos" 44 | scalacOptions += "-target:jvm-1.8" 45 | javacOptions ++= Seq("-source", "1.8", "-target", "1.8") 46 | libraryDependencies ++= Seq( 47 | library.mapperRuntime, 48 | library.queryBuilder, 49 | library.mapperProcessor 50 | ) 51 | -------------------------------------------------------------------------------- /driver-1.x/src/main/kotlin/com/datastax/alexott/demos/KtTestObjMapper.kt: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos 2 | 3 | import com.datastax.driver.core.Cluster 4 | import com.datastax.driver.mapping.MappingManager 5 | import com.datastax.driver.mapping.annotations.Column 6 | import com.datastax.driver.mapping.annotations.PartitionKey 7 | import com.datastax.driver.mapping.annotations.Table 8 | 9 | @Table(keyspace = "test", name = "app_category_agg") 10 | class AppCategoryAggData { 11 | 12 | @PartitionKey 13 | lateinit var category: String 14 | 15 | @Column(name = "app_count") 16 | var appCount: Int = 0 17 | 18 | @Column(name = "sp_count") 19 | var spCount: Int = 0 20 | 21 | @Column(name = "subscriber_count") 22 | var subscriberCount: Int = 0 23 | 24 | @Column(name = "window_revenue") 25 | var windowRevenue: Long = 0 26 | 27 | @Column(name = "top_apps") 28 | var topApps: List> = emptyList() 29 | 30 | override fun toString(): String { 31 | return "AppCategoryAggData(category='$category', appCount=$appCount, spCount=$spCount, subscriberCount=$subscriberCount, windowRevenue=$windowRevenue, topApps=$topApps)" 32 | } 33 | } 34 | 35 | object KtTestObjMapper { 36 | @JvmStatic 37 | fun main(args: Array) { 38 | val cluster = Cluster.builder() 39 | .addContactPoint("10.101.34.176") 40 | .build() 41 | val session = cluster.connect() 42 | 43 | val manager = MappingManager(session) 44 | val mapper = manager.mapper(AppCategoryAggData::class.java) 45 | 46 | val appObj = AppCategoryAggData() 47 | appObj.category = "kotlin" 48 | appObj.appCount = 5 49 | appObj.spCount = 10 50 | appObj.subscriberCount = 50 51 | appObj.windowRevenue = 10000 52 | appObj.topApps = listOf(mapOf("t2" to 2)) 53 | mapper.save(appObj) 54 | 55 | val obj2 = mapper.get("test") 56 | println("Object from =$obj2") 57 | 58 | session.close() 59 | cluster.close() 60 | 61 | } 62 | 63 | } -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/AuditTestTable.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.mapping.annotations.PartitionKey; 4 | import com.datastax.driver.mapping.annotations.Table; 5 | 6 | import java.util.List; 7 | import java.util.Map; 8 | import java.util.Set; 9 | 10 | // create table test.audit_test(id int primary key, u test.audit_type, s set, l list, m map); 11 | @Table(keyspace="test", name="audit_test") 12 | public class AuditTestTable { 13 | 14 | @PartitionKey 15 | int id; 16 | AuditTestType u; 17 | Set s; 18 | List l; 19 | Map m; 20 | 21 | public AuditTestTable() { 22 | } 23 | 24 | public AuditTestTable(int id, AuditTestType u, Set s, List l, Map m) { 25 | this.id = id; 26 | this.u = u; 27 | this.s = s; 28 | this.l = l; 29 | this.m = m; 30 | } 31 | 32 | public int getId() { 33 | return id; 34 | } 35 | 36 | public void setId(int id) { 37 | this.id = id; 38 | } 39 | 40 | public AuditTestType getU() { 41 | return u; 42 | } 43 | 44 | public void setU(AuditTestType u) { 45 | this.u = u; 46 | } 47 | 48 | public Set getS() { 49 | return s; 50 | } 51 | 52 | public void setS(Set s) { 53 | this.s = s; 54 | } 55 | 56 | public List getL() { 57 | return l; 58 | } 59 | 60 | public void setL(List l) { 61 | this.l = l; 62 | } 63 | 64 | public Map getM() { 65 | return m; 66 | } 67 | 68 | public void setM(Map m) { 69 | this.m = m; 70 | } 71 | 72 | @Override 73 | public String toString() { 74 | return "AuditTestTable{" + 75 | "id=" + id + 76 | ", u=" + u + 77 | ", s=" + s + 78 | ", l=" + l + 79 | ", m=" + m + 80 | '}'; 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /driver-4.x/src/main/kotlin/com/datastax/alexott/demos/KtTestObjMapper.kt: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos 2 | 3 | import com.datastax.oss.driver.api.core.CqlIdentifier 4 | import com.datastax.oss.driver.api.core.CqlSession 5 | import com.datastax.oss.driver.api.mapper.annotations.* 6 | import java.net.InetSocketAddress 7 | 8 | 9 | @Entity 10 | @CqlName("app_category_agg") 11 | data class AppCategoryAggData( 12 | @PartitionKey var category: String, 13 | @CqlName("app_count") var appCount: Int? = null, 14 | @CqlName("sp_count") var spCount: Int? = null, 15 | @CqlName("subscriber_count") var subscriberCount: Int? = null, 16 | @CqlName("window_revenue") var windowRevenue: Long? = null, 17 | @CqlName("top_apps") var topApps: List>? = null 18 | ) { 19 | constructor() : this("") 20 | } 21 | 22 | @Dao 23 | interface AppCategoryAggDao { 24 | @Insert 25 | fun insert(appCatAgg: AppCategoryAggData) 26 | 27 | @Select 28 | fun findByCategory(appCat: String): AppCategoryAggData? 29 | } 30 | 31 | @Mapper 32 | interface AppCategoryMapper { 33 | @DaoFactory 34 | fun appCategoryDao(@DaoKeyspace keyspace: CqlIdentifier?): AppCategoryAggDao? 35 | } 36 | 37 | object KtTestObjMapper { 38 | @JvmStatic 39 | fun main(args: Array) { 40 | val session = CqlSession.builder() 41 | .addContactPoint(InetSocketAddress("10.101.34.176", 9042)) 42 | .build() 43 | 44 | // get mapper - please note that we need to use AppCategoryMapperBuilder 45 | // that is generated by annotation processor 46 | val mapper: AppCategoryMapper = AppCategoryMapperBuilder(session).build() 47 | 48 | val dao: AppCategoryAggDao? = mapper.appCategoryDao(CqlIdentifier.fromCql("test")) 49 | 50 | val appObj = AppCategoryAggData("kotlin2", 51 | 10, 11, 12, 34, 52 | listOf(mapOf("t2" to 2))) 53 | dao?.insert(appObj) 54 | 55 | val obj2 = dao?.findByCategory("test") 56 | println("Object from =$obj2") 57 | 58 | session.close() 59 | 60 | } 61 | 62 | } -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/Test4_2.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.ProtocolVersion; 5 | import com.datastax.driver.core.QueryOptions; 6 | import com.datastax.driver.core.Session; 7 | import com.datastax.driver.mapping.Mapper; 8 | import com.datastax.driver.mapping.MappingManager; 9 | 10 | import java.util.Random; 11 | 12 | public class Test4_2 { 13 | public static void main(String[] args) throws InterruptedException { 14 | String server = System.getProperty("contactPoint", "127.0.0.1"); 15 | QueryOptions queryOptions = new QueryOptions().setDefaultIdempotence(true); 16 | try(Cluster cluster = Cluster.builder().withProtocolVersion(ProtocolVersion.V4) 17 | .addContactPoint(server) 18 | .withQueryOptions(queryOptions) 19 | // for working with Cassandra 2.1-2.2 20 | .withProtocolVersion(ProtocolVersion.V3) 21 | .build(); 22 | Session session = cluster.connect()) { 23 | 24 | session.execute("create KEYSPACE if not exists test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};"); 25 | session.execute("create table if not exists test.t4(id int, c int, t text, primary key(id, c));"); 26 | 27 | MappingManager manager = new MappingManager(session); 28 | Mapper mapper = manager.mapper(Test4Data.class); 29 | 30 | Random rnd = new Random(); 31 | 32 | long c = 0; 33 | while(true) { 34 | int i = rnd.nextInt(); 35 | int j = rnd.nextInt(); 36 | try { 37 | mapper.save(new Test4Data(i, j, "t " + i + "," + j)); 38 | } catch (Exception ex) { 39 | System.out.println("Got exception: " + ex.getMessage()); 40 | } 41 | Thread.sleep(10); 42 | c++; 43 | if ((c % 100) == 0) { 44 | System.out.println("Submitted " + c + " requests"); 45 | } 46 | } 47 | } 48 | } 49 | 50 | } 51 | -------------------------------------------------------------------------------- /spark-oss/src/main/java/com/datastax/alexott/demos/spark/TableCreate.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import com.datastax.spark.connector.DataFrameFunctions; 4 | import com.datastax.spark.connector.cql.CassandraConnector; 5 | import com.datastax.spark.connector.cql.CassandraConnectorConf; 6 | import org.apache.spark.sql.Dataset; 7 | import org.apache.spark.sql.Row; 8 | import org.apache.spark.sql.SparkSession; 9 | import org.spark_project.guava.collect.ImmutableMap; 10 | import scala.Option; 11 | import scala.Some; 12 | import scala.collection.JavaConversions; 13 | import scala.collection.Seq; 14 | 15 | import java.util.Arrays; 16 | 17 | public class TableCreate { 18 | public static void main(String[] args) { 19 | SparkSession spark = SparkSession 20 | .builder() 21 | .appName("CassandraTableCreate") 22 | .getOrCreate(); 23 | 24 | /* 25 | CREATE TABLE test.widerows4 ( 26 | part text, 27 | clust text, 28 | col2 text, 29 | data text, 30 | PRIMARY KEY (part, clust)); 31 | */ 32 | 33 | Dataset dataset = spark.read() 34 | .format("org.apache.spark.sql.cassandra") 35 | .options(ImmutableMap.of("table", "widerows4", "keyspace", "test")) 36 | .load(); 37 | 38 | dataset.printSchema(); 39 | 40 | 41 | DataFrameFunctions dfFunctions = new DataFrameFunctions(dataset); 42 | Option> partitionSeqlist = new Some<>(JavaConversions.asScalaBuffer( 43 | Arrays.asList("part")).seq()); 44 | Option> clusteringSeqlist = new Some<>(JavaConversions.asScalaBuffer( 45 | Arrays.asList("clust", "col2")).seq()); 46 | CassandraConnector connector = new CassandraConnector(CassandraConnectorConf.apply(spark.sparkContext().getConf())); 47 | dfFunctions.createCassandraTable("test", "widerows6", 48 | partitionSeqlist, clusteringSeqlist, connector); 49 | dataset.write().format("org.apache.spark.sql.cassandra") 50 | .options(ImmutableMap.of("table", "widerows6", "keyspace", "test")) 51 | .save(); 52 | } 53 | 54 | 55 | } 56 | -------------------------------------------------------------------------------- /spark-dse/src/main/scala/com/datastax/alexott/dsefs/DsefsGetMerge.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.dsefs 2 | 3 | import java.io._ 4 | 5 | import com.datastax.alexott.dsefs.DsefsUploader.getBool 6 | import org.apache.commons.io.IOUtils 7 | import org.apache.hadoop.fs.{FileSystem, Path} 8 | import org.apache.spark.sql.SparkSession 9 | 10 | object DsefsGetMerge { 11 | def main(args: Array[String]): Unit = { 12 | if (args.length < 2) { 13 | println("Usage: DsefsGetMerge directoryToDownload destination") 14 | System.exit(1) 15 | } 16 | val spark = SparkSession.builder().getOrCreate() 17 | 18 | // import spark.implicits._ 19 | 20 | val outfile = new File(args(1)) 21 | if (outfile.exists()) { 22 | if (getBool("overwriteMode")) { 23 | outfile.delete() 24 | } else { 25 | println("File '" + args(1) + "' exists on disk! Remove it, or pass -DoverwriteMode=true to the job!") 26 | System.exit(1) 27 | } 28 | } 29 | 30 | val appendNewline = getBool("addNewLine") 31 | val skipEmptyFiles = getBool("skipEmptyFiles") 32 | 33 | val fileSystem = FileSystem.get(spark.sparkContext.hadoopConfiguration) 34 | val path = new Path(args(0)) 35 | if (!fileSystem.isDirectory(path)) { 36 | println("The '" + args(0) + "' is not a directory!") 37 | System.exit(1) 38 | } 39 | 40 | val out = new BufferedOutputStream(new FileOutputStream(outfile)) 41 | val fileList = fileSystem.listFiles(path, false) // TODO: sort entries by file name? 42 | while(fileList.hasNext) { 43 | val it = fileList.next() 44 | val fpath = it.getPath() 45 | if (!fpath.getName.startsWith("part-")) { 46 | println("Skipping non-part file... " + fpath) 47 | } else { 48 | if (skipEmptyFiles && it.getLen == 0) { 49 | println("Skipping empty file file... " + fpath) 50 | } else { 51 | println("Copying data from " + fpath) 52 | val in = fileSystem.open(fpath) 53 | IOUtils.copy(in, out) 54 | in.close() 55 | if (appendNewline) { 56 | out.write('\n') 57 | } 58 | } 59 | } 60 | } 61 | 62 | out.close() 63 | System.exit(0) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/TestPreparedStatements.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.BoundStatement; 4 | import com.datastax.driver.core.Cluster; 5 | import com.datastax.driver.core.ColumnDefinitions; 6 | import com.datastax.driver.core.PreparedStatement; 7 | import com.datastax.driver.core.ResultSet; 8 | import com.datastax.driver.core.Row; 9 | import com.datastax.driver.core.Session; 10 | 11 | public class TestPreparedStatements { 12 | 13 | public static BoundStatement nullsToUnset(BoundStatement statement) { 14 | ColumnDefinitions variables = statement.preparedStatement().getVariables(); 15 | int numPlaceholders = variables.size(); 16 | for (int i = 0; i < numPlaceholders; i++) { 17 | if (statement.isNull(i)) { 18 | System.out.println("Found null in position " + i + ", column: " + variables.getName(i)); 19 | statement.unset(i); 20 | } 21 | } 22 | return statement; 23 | } 24 | 25 | public static void main(String[] args) { 26 | String server = System.getProperty("contactPoint", "127.0.0.1"); 27 | try (Cluster cluster = Cluster.builder().addContactPoint(server).build(); 28 | Session session = cluster.connect()) { 29 | 30 | session.execute("create keyspace if not exists test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"); 31 | session.execute("create table if not exists test.unsets(id int primary key, t text, i int);"); 32 | session.execute("truncate test.unsets;"); 33 | 34 | PreparedStatement preparedStatement = session.prepare("insert into test.unsets(id, t, i) values(?,?,?);"); 35 | 36 | // this will insert tombstone 37 | BoundStatement b1 = preparedStatement.bind(1, null, 2); 38 | session.execute(b1); 39 | 40 | // this will not 41 | BoundStatement b2 = preparedStatement.bind(2, null, 3); 42 | session.execute(nullsToUnset(b2)); 43 | 44 | ResultSet rs = session.execute("select * from test.unsets;"); 45 | for (Row row: rs) { 46 | System.out.println(row); 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /spark-dse/src/main/java/com/datastax/alexott/demos/spark/TableCreate.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | 4 | import com.datastax.spark.connector.DatasetFunctions; 5 | import com.datastax.spark.connector.cql.CassandraConnector; 6 | import com.datastax.spark.connector.cql.CassandraConnectorConf; 7 | import org.apache.spark.sql.Dataset; 8 | import org.apache.spark.sql.Row; 9 | import org.apache.spark.sql.SparkSession; 10 | import org.apache.spark.sql.catalyst.encoders.RowEncoder; 11 | import org.spark_project.guava.collect.ImmutableMap; 12 | import scala.Option; 13 | import scala.Some; 14 | import scala.collection.JavaConversions; 15 | import scala.collection.Seq; 16 | import java.util.Arrays; 17 | 18 | public class TableCreate { 19 | public static void main(String[] args) { 20 | SparkSession spark = SparkSession 21 | .builder() 22 | .appName("CassandraTableCreate") 23 | .getOrCreate(); 24 | 25 | /* 26 | CREATE TABLE test.widerows4 ( 27 | part text, 28 | clust text, 29 | col2 text, 30 | data text, 31 | PRIMARY KEY (part, clust)); 32 | */ 33 | 34 | Dataset dataset = spark.read() 35 | .format("org.apache.spark.sql.cassandra") 36 | .options(ImmutableMap.of("table", "widerows4", "keyspace", "test")) 37 | .load(); 38 | dataset.printSchema(); 39 | 40 | DatasetFunctions dfFunctions = new DatasetFunctions<>(dataset, RowEncoder.apply(dataset.schema())); 41 | Option> partitionSeqlist = new Some<>(JavaConversions.asScalaBuffer( 42 | Arrays.asList("part")).seq()); 43 | Option> clusteringSeqlist = new Some<>(JavaConversions.asScalaBuffer( 44 | Arrays.asList("clust", "col2")).seq()); 45 | CassandraConnector connector = new CassandraConnector(CassandraConnectorConf.apply(spark.sparkContext().getConf())); 46 | dfFunctions.createCassandraTable("test", "widerows6", 47 | partitionSeqlist, clusteringSeqlist, connector); 48 | dataset.write().format("org.apache.spark.sql.cassandra") 49 | .options(ImmutableMap.of("table", "widerows6", "keyspace", "test")) 50 | .save(); 51 | } 52 | 53 | 54 | } 55 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/metrics/Metrics1.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.metrics; 2 | 3 | import com.codahale.metrics.JmxReporter; 4 | import com.datastax.driver.core.Cluster; 5 | import com.datastax.driver.core.Metrics; 6 | import com.datastax.driver.core.Session; 7 | 8 | public class Metrics1 { 9 | 10 | public static void main(String[] args) throws InterruptedException { 11 | String server = System.getProperty("contactPoint", "127.0.0.1"); 12 | try (Cluster cluster = Cluster.builder().addContactPoint(server).build(); 13 | Session session = cluster.connect()) { 14 | 15 | for(int i = 0; i < 3000; i++) { 16 | for(int j = 0; j < 1000; j++) { 17 | session.executeAsync(String.format("insert into test.abc (id, t1, t2) values (%d, 't1-%d', 't2-%d');", 18 | i+j, i, j)); 19 | } 20 | Metrics metrics = cluster.getMetrics(); 21 | System.out.println("Doing iteration " + (i+1)); 22 | System.out.println("BlockingExecutorQueueDepth: " + metrics.getBlockingExecutorQueueDepth().getValue() ); 23 | System.out.println("BytesReceived: " + metrics.getBytesReceived().getCount() ); 24 | System.out.println("BytesSent: " + metrics.getBytesSent().getCount() ); 25 | System.out.println("ConnectedToHosts: " + metrics.getConnectedToHosts().getValue() ); 26 | // System.out.println("ErrorMetrics: " + metrics.getErrorMetrics(). ); 27 | System.out.println("ExecutorQueueDepth: " + metrics.getExecutorQueueDepth().getValue() ); 28 | System.out.println("InFlightRequests: " + metrics.getInFlightRequests().getValue() ); 29 | System.out.println("KnownHosts: " + metrics.getKnownHosts().getValue() ); 30 | System.out.println("OpenConnections: " + metrics.getOpenConnections().getValue() ); 31 | System.out.println("ReconnectionSchedulerQueueSize: " + metrics.getReconnectionSchedulerQueueSize().getValue() ); 32 | System.out.println("RequestsTimer: " + metrics.getRequestsTimer().getMeanRate() ); 33 | System.out.println("TrashedConnections: " + metrics.getTrashedConnections().getValue() ); 34 | 35 | Thread.sleep(5000); 36 | } 37 | } 38 | } 39 | 40 | } 41 | -------------------------------------------------------------------------------- /spark-dse/src/main/resources/tweets-1.json: -------------------------------------------------------------------------------- 1 | {"created_at": "Sun Jul 01 07:52:55 +0000 2018", "id": 1013329589983117312, "id_str": "1013329589983117312", "clear_text": "test", "text": "أعوذ بكلمات الله التامات من شر ما خلق\n♻️ https://t.co/tTuXJIaSZq", "source": "تطبيق زاد المسلم", "truncated": false, "in_reply_to_status_id": null, "in_reply_to_status_id_str": null, "in_reply_to_user_id": null, "in_reply_to_user_id_str": null, "in_reply_to_screen_name": null, "user": {"id": 502056236, "id_str": "502056236", "name": "هيثم الصباح", "screen_name": "haithamjuve", "location": null, "url": null, "description": "أتشرف بأني أحد أبناء دولة الإمارات 🇦🇪 ولاعب منتخب الإمارات الوطني لكرة القدم الشاطئية .. الرقم واحد شعاري 🇦🇪", "translator_type": "none", "protected": false, "verified": false, "followers_count": 1678, "friends_count": 2105, "listed_count": 1, "favourites_count": 18533, "statuses_count": 51682, "created_at": "Fri Feb 24 19:24:02 +0000 2012", "utc_offset": null, "time_zone": null, "geo_enabled": false, "lang": "en", "contributors_enabled": false, "is_translator": false, "profile_background_color": "C0DEED", "profile_background_image_url": "http://abs.twimg.com/images/themes/theme1/bg.png", "profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme1/bg.png", "profile_background_tile": false, "profile_link_color": "1DA1F2", "profile_sidebar_border_color": "C0DEED", "profile_sidebar_fill_color": "DDEEF6", "profile_text_color": "333333", "profile_use_background_image": true, "profile_image_url": "http://pbs.twimg.com/profile_images/840215791345664000/xyyoVbA3_normal.jpg", "profile_image_url_https": "https://pbs.twimg.com/profile_images/840215791345664000/xyyoVbA3_normal.jpg", "profile_banner_url": "https://pbs.twimg.com/profile_banners/502056236/1489158032", "default_profile": true, "default_profile_image": false, "following": null, "follow_request_sent": null, "notifications": null}, "geo": null, "coordinates": null, "place": null, "contributors": null, "is_quote_status": false, "quote_count": 0, "reply_count": 0, "retweet_count": 0, "favorite_count": 0, "entities": {"hashtags": [], "urls": [{"url": "https://t.co/tTuXJIaSZq", "expanded_url": "https://du3a.org", "display_url": "du3a.org", "indices": [41, 64]}], "user_mentions": [], "symbols": []}, "favorited": false, "retweeted": false, "possibly_sensitive": false, "filter_level": "low", "lang": "ar", "timestamp_ms": "1530431575662"} 2 | -------------------------------------------------------------------------------- /scc-2.5/src/main/scala/com/datastax/alexott/streaming/StructuredStreamingKafkaDSE.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.streaming 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | import org.apache.spark.sql.functions._ 6 | import org.apache.spark.sql.streaming.OutputMode 7 | import org.apache.spark.sql.types._ 8 | import scala.io.Source 9 | 10 | object StructuredStreamingKafkaDSE { 11 | 12 | def main(args: Array[String]): Unit = { 13 | val sc = new SparkContext() 14 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 15 | import spark.implicits._ 16 | 17 | val fileStream = StructuredStreamingKafkaDSE.getClass.getResourceAsStream("/tweets-1.json") 18 | val jsonSampleString = Source.fromInputStream(fileStream).getLines().next() 19 | val jsonSampleDS = spark.createDataset(List(jsonSampleString)) 20 | val jsonSample = spark.read.json(jsonSampleDS) 21 | val schema = jsonSample.schema 22 | 23 | val streamingInputDF = spark.readStream 24 | .format("kafka") 25 | .option("kafka.bootstrap.servers", "192.168.0.10:9092") 26 | .option("subscribe", "tweets-txt") 27 | .load() 28 | 29 | val tweetDF = streamingInputDF.selectExpr("CAST(value AS STRING)") 30 | .select(from_json($"value", schema).as("tweet")) 31 | .select(unix_timestamp($"tweet.created_at", "EEE MMM dd HH:mm:ss Z yyyy").as("created_at").cast(TimestampType), 32 | $"tweet.lang".as("lang")) 33 | 34 | val streamingCountsDF = tweetDF 35 | .where(col("lang").isNotNull) 36 | .groupBy($"lang", window($"created_at", "1 minutes")) 37 | .count() 38 | .select($"lang", $"window.start".as("window"), $"count") 39 | 40 | // need to have table created with following CQL: 41 | // create table test.sttest_tweets(lang text, window timestamp, count int, primary key(lang, window)); 42 | 43 | // This works only with Spark 2.2 (if BYOS 6.0.4 is used) 44 | val query = streamingCountsDF.writeStream 45 | .outputMode(OutputMode.Update) 46 | .format("org.apache.spark.sql.cassandra") 47 | .option("checkpointLocation", "webhdfs://192.168.0.10:5598/checkpoint") 48 | .option("keyspace", "test") 49 | .option("table", "sttest_tweets") 50 | .start() 51 | 52 | /*val query = streamingCountsDF.writeStream 53 | .outputMode("complete") 54 | .format("console") 55 | .start()*/ 56 | 57 | query.awaitTermination() 58 | 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /spark-dse/src/main/scala/com/datastax/alexott/streaming/StructuredStreamingKafkaDSE.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.streaming 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | import org.apache.spark.sql.functions._ 6 | import org.apache.spark.sql.streaming.OutputMode 7 | import org.apache.spark.sql.types._ 8 | import scala.io.Source 9 | 10 | object StructuredStreamingKafkaDSE { 11 | 12 | def main(args: Array[String]): Unit = { 13 | val sc = new SparkContext() 14 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 15 | import spark.implicits._ 16 | 17 | val fileStream = StructuredStreamingKafkaDSE.getClass.getResourceAsStream("/tweets-1.json") 18 | val jsonSampleString = Source.fromInputStream(fileStream).getLines().next() 19 | val jsonSampleDS = spark.createDataset(List(jsonSampleString)) 20 | val jsonSample = spark.read.json(jsonSampleDS) 21 | val schema = jsonSample.schema 22 | 23 | val streamingInputDF = spark.readStream 24 | .format("kafka") 25 | .option("kafka.bootstrap.servers", "192.168.0.10:9092") 26 | .option("subscribe", "tweets-txt") 27 | .load() 28 | 29 | val tweetDF = streamingInputDF.selectExpr("CAST(value AS STRING)") 30 | .select(from_json($"value", schema).as("tweet")) 31 | .select(unix_timestamp($"tweet.created_at", "EEE MMM dd HH:mm:ss Z yyyy").as("created_at").cast(TimestampType), 32 | $"tweet.lang".as("lang")) 33 | 34 | val streamingCountsDF = tweetDF 35 | .where(col("lang").isNotNull) 36 | .groupBy($"lang", window($"created_at", "1 minutes")) 37 | .count() 38 | .select($"lang", $"window.start".as("window"), $"count") 39 | 40 | // need to have table created with following CQL: 41 | // create table test.sttest_tweets(lang text, window timestamp, count int, primary key(lang, window)); 42 | 43 | // This works only with Spark 2.2 (if BYOS 6.0.4 is used) 44 | val query = streamingCountsDF.writeStream 45 | .outputMode(OutputMode.Update) 46 | .format("org.apache.spark.sql.cassandra") 47 | .option("checkpointLocation", "webhdfs://192.168.0.10:5598/checkpoint") 48 | .option("keyspace", "test") 49 | .option("table", "sttest_tweets") 50 | .start() 51 | 52 | /*val query = streamingCountsDF.writeStream 53 | .outputMode("complete") 54 | .format("console") 55 | .start()*/ 56 | 57 | query.awaitTermination() 58 | 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /prometheus-java-driver/src/main/java/com/datastax/alexott/demos/MetricsWithPrometheus.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.codahale.metrics.MetricRegistry; 4 | import com.datastax.oss.driver.api.core.CqlSession; 5 | import io.prometheus.client.CollectorRegistry; 6 | import io.prometheus.client.dropwizard.DropwizardExports; 7 | import io.prometheus.client.exporter.HTTPServer; 8 | import io.prometheus.client.hotspot.DefaultExports; 9 | 10 | import java.io.IOException; 11 | import java.net.InetSocketAddress; 12 | import java.util.Optional; 13 | 14 | public class MetricsWithPrometheus { 15 | public static void main(String[] args) throws InterruptedException { 16 | String contactPoint = System.getProperty("contactPoint", "127.0.0.1"); 17 | // init default prometheus stuff 18 | DefaultExports.initialize(); 19 | // setup Prometheus HTTP server 20 | Optional prometheusServer = Optional.empty(); 21 | try { 22 | prometheusServer = Optional.of(new HTTPServer(Integer.getInteger("prometheusPort", 9095))); 23 | } catch (IOException e) { 24 | System.out.println("Exception when creating HTTP server for Prometheus: " + e.getMessage()); 25 | } 26 | 27 | try (CqlSession session = CqlSession.builder() 28 | .addContactPoint(new InetSocketAddress(contactPoint, 9042)) 29 | .withLocalDatacenter(System.getProperty("dcName")) 30 | .build()) { 31 | 32 | MetricRegistry registry = session.getMetrics() 33 | .orElseThrow(() -> new IllegalStateException("Metrics are disabled")) 34 | .getRegistry(); 35 | CollectorRegistry.defaultRegistry.register(new DropwizardExports(registry)); 36 | 37 | session.execute("create keyspace if not exists test with replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"); 38 | session.execute("create table if not exists test.abc (id int, t1 text, t2 text, primary key (id, t1));"); 39 | session.execute("truncate test.abc;"); 40 | 41 | for(int i = 0; i < 3000; i++) { 42 | for(int j = 0; j < 1000; j++) { 43 | session.executeAsync(String.format("insert into test.abc (id, t1, t2) values (%d, 't1-%d', 't2-%d');", 44 | i+j, i, j)); 45 | } 46 | Thread.sleep(5000); 47 | } 48 | } 49 | prometheusServer.ifPresent(HTTPServer::stop); 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/FRvsNonFRUDTMapping.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.BatchStatement; 4 | import com.datastax.driver.core.Cluster; 5 | import com.datastax.driver.core.PreparedStatement; 6 | import com.datastax.driver.core.ResultSet; 7 | import com.datastax.driver.core.Row; 8 | import com.datastax.driver.core.Session; 9 | import com.datastax.driver.core.Statement; 10 | import com.datastax.driver.core.UDTValue; 11 | import com.datastax.driver.mapping.Mapper; 12 | import com.datastax.driver.mapping.MappingManager; 13 | 14 | public class FRvsNonFRUDTMapping { 15 | public static void main(String[] args) { 16 | String server = System.getProperty("contactPoint", "127.0.0.1"); 17 | try(Cluster cluster = Cluster.builder().addContactPoint(server).build(); 18 | Session session = cluster.connect("test")) { 19 | MappingManager manager = new MappingManager(session); 20 | Mapper mapperNonFR = manager.mapper(UDTTestTableNonFR.class); 21 | Mapper mapperFR = manager.mapper(UDTTestTableFR.class); 22 | 23 | UDTTestTableFR t1 = mapperFR.get(1, 1); 24 | System.out.print("t1=" + t1); 25 | UDTTestTableNonFR t2 = mapperNonFR.get(1, 1); 26 | System.out.print("t2=" + t2); 27 | 28 | t1.setId(2); 29 | t2.setId(2); 30 | 31 | mapperFR.save(t1); 32 | mapperNonFR.save(t2); 33 | 34 | ResultSet rs = session.execute("select * from test.udt_test where id = 1 and cid = 1"); 35 | Row row = rs.one(); 36 | UDTValue udt = row.getUDTValue("udt"); 37 | System.out.println("udt=" + udt.getInt("id") + ", " + udt.getString("t")); 38 | PreparedStatement updateNonFr = session.prepare("update test.udt_test set udt = ? where id = 1 and cid = 1"); 39 | udt.setInt("id", udt.getInt("id") + 1); 40 | session.execute(updateNonFr.bind(udt)); 41 | 42 | rs = session.execute("select * from test.udt_test_fr where id = 1 and cid = 1"); 43 | row = rs.one(); 44 | udt = row.getUDTValue("udt"); 45 | System.out.println("udt=" + udt.getInt("id") + ", " + udt.getString("t")); 46 | PreparedStatement updateFr = session.prepare("update test.udt_test_fr set udt = ? where id = 1 and cid = 1"); 47 | udt.setInt("id", udt.getInt("id") + 1); 48 | session.execute(updateFr.bind(udt)); 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /cassandra-join-spark/src/main/scala/com/datastax/alexott/demos/streaming/StockTickersJoinDataFrames.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.streaming 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | import org.apache.spark.sql.functions._ 6 | import org.apache.spark.sql.types._ 7 | 8 | object StockTickersJoinDataFrames { 9 | 10 | /* 11 | * Code need to be executed with 12 | * --conf spark.sql.extensions=com.datastax.spark.connector.CassandraSparkExtensions 13 | * to enable Direct Join optimization 14 | */ 15 | 16 | def main(args: Array[String]): Unit = { 17 | if (args.length < 2) { 18 | println("Usage: StockTickersJoinDataFrames kafka-servers topic-name") 19 | System.exit(1) 20 | } 21 | val kafkaServes = args(0) 22 | val topicName = args(1) 23 | val spark = SparkSession.builder() 24 | .getOrCreate() 25 | import spark.implicits._ 26 | 27 | val schema = StructType(List( 28 | StructField("symbol", StringType), 29 | StructField("value", DoubleType), 30 | StructField("datetime", TimestampType) 31 | )) 32 | 33 | val streamingInputDF = spark.readStream 34 | .format("kafka") 35 | .option("kafka.bootstrap.servers", kafkaServes) 36 | .option("subscribe", topicName) 37 | .load() 38 | 39 | val parsed = streamingInputDF.selectExpr("CAST(value AS STRING)") 40 | .select(from_json($"value", schema).as("stock")) 41 | .select("stock.*") 42 | .withColumnRenamed("symbol", "ticker") 43 | 44 | // get the dataset from Cassandra 45 | // if it's "stable" then we can also cache it to speedup processing 46 | val cassandra = spark.read 47 | .format("org.apache.spark.sql.cassandra") 48 | .options(Map("table" -> "stock_info", "keyspace" -> "test")) 49 | .load 50 | 51 | // we can use left join to detect what data is incorrect - if we don't have some data in the 52 | // Cassandra, then symbol field will be null, so we can detect such entries, and do something with that 53 | // we can omit the joinType parameter, in that case, we'll process only data that are in the Cassandra 54 | val joined = parsed.join(cassandra, cassandra("symbol") === parsed("ticker"), "left") 55 | .drop("ticker") 56 | 57 | joined.explain 58 | joined.printSchema 59 | 60 | val query = joined.writeStream 61 | .outputMode("update") 62 | .format("console") 63 | .start() 64 | 65 | query.awaitTermination() 66 | //Thread.sleep(10000) 67 | //query.stop() 68 | 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /spark-dse/src/main/java/com/datastax/alexott/demos/spark/JoinTestsRDD.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import com.datastax.spark.connector.ColumnSelector; 4 | import com.datastax.spark.connector.japi.RDDJavaFunctions; 5 | import com.datastax.spark.connector.japi.rdd.CassandraJavaPairRDD; 6 | import com.datastax.spark.connector.japi.rdd.CassandraJavaRDD; 7 | import com.datastax.spark.connector.writer.RowWriterFactory; 8 | import org.apache.spark.api.java.JavaRDD; 9 | import org.apache.spark.sql.SparkSession; 10 | import scala.Tuple1; 11 | import scala.Tuple2; 12 | 13 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.javaFunctions; 14 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapRowToTuple; 15 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapTupleToRow; 16 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.someColumns; 17 | 18 | 19 | // create table if not exists test.jtest (id int primary key, v text); 20 | 21 | public class JoinTestsRDD { 22 | public static void main(String[] args) { 23 | SparkSession spark = SparkSession 24 | .builder() 25 | .appName("CassandraSparkWithJoin") 26 | // .config("spark.cassandra.connection.host", "192.168.0.10") 27 | .getOrCreate(); 28 | 29 | // Dataset df = spark.sql("select * from test.jtest"); 30 | // df.show(); 31 | JavaRDD> toJoinRDD = spark 32 | .range(1, 100) 33 | .javaRDD() 34 | .map(x -> new Tuple1(x.intValue())); 35 | 36 | // CassandraJavaRDD> rdd = javaFunctions(spark.sparkContext()) 37 | // .cassandraTable("test", "jtest", mapRowToTuple(Integer.class, String.class)) 38 | // .select("id", "v"); 39 | // 40 | // System.out.println(rdd.take(10)); 41 | 42 | 43 | RDDJavaFunctions> trdd = new RDDJavaFunctions>(toJoinRDD.rdd()); 44 | CassandraJavaPairRDD, Tuple2> joinedRDD = 45 | trdd.joinWithCassandraTable("test", "jtest", 46 | someColumns("id", "v"), someColumns("id"), 47 | mapRowToTuple(Integer.class, String.class), mapTupleToRow(Integer.class)); 48 | System.out.println("Plan: " + joinedRDD.toDebugString()); 49 | joinedRDD.cache(); 50 | System.out.print("Count: " + joinedRDD.count()); 51 | System.out.println("Data: " + joinedRDD.take(10)); 52 | 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /spark-oss/src/main/java/com/datastax/alexott/demos/spark/JoinTestsRDD.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark; 2 | 3 | import com.datastax.spark.connector.ColumnSelector; 4 | import com.datastax.spark.connector.japi.RDDJavaFunctions; 5 | import com.datastax.spark.connector.japi.rdd.CassandraJavaPairRDD; 6 | import com.datastax.spark.connector.japi.rdd.CassandraJavaRDD; 7 | import com.datastax.spark.connector.writer.RowWriterFactory; 8 | import org.apache.spark.api.java.JavaRDD; 9 | import org.apache.spark.sql.SparkSession; 10 | import scala.Tuple1; 11 | import scala.Tuple2; 12 | 13 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.javaFunctions; 14 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapRowToTuple; 15 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapTupleToRow; 16 | import static com.datastax.spark.connector.japi.CassandraJavaUtil.someColumns; 17 | 18 | 19 | // create table if not exists test.jtest (id int primary key, v text); 20 | 21 | public class JoinTestsRDD { 22 | public static void main(String[] args) { 23 | SparkSession spark = SparkSession 24 | .builder() 25 | .appName("CassandraSparkWithJoin") 26 | // .config("spark.cassandra.connection.host", "192.168.0.10") 27 | .getOrCreate(); 28 | 29 | // Dataset df = spark.sql("select * from test.jtest"); 30 | // df.show(); 31 | JavaRDD> toJoinRDD = spark 32 | .range(1, 100) 33 | .javaRDD() 34 | .map(x -> new Tuple1(x.intValue())); 35 | 36 | // CassandraJavaRDD> rdd = javaFunctions(spark.sparkContext()) 37 | // .cassandraTable("test", "jtest", mapRowToTuple(Integer.class, String.class)) 38 | // .select("id", "v"); 39 | // 40 | // System.out.println(rdd.take(10)); 41 | 42 | 43 | RDDJavaFunctions> trdd = new RDDJavaFunctions>(toJoinRDD.rdd()); 44 | CassandraJavaPairRDD, Tuple2> joinedRDD = 45 | trdd.joinWithCassandraTable("test", "jtest", 46 | someColumns("id", "v"), someColumns("id"), 47 | mapRowToTuple(Integer.class, String.class), mapTupleToRow(Integer.class)); 48 | System.out.println("Plan: " + joinedRDD.toDebugString()); 49 | joinedRDD.cache(); 50 | System.out.print("Count: " + joinedRDD.count()); 51 | System.out.println("Data: " + joinedRDD.take(10)); 52 | 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /scc-2.5/src/main/scala/com/datastax/alexott/streaming/StructuredStreamingDSE.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.streaming 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | import org.apache.spark.sql.types._ 6 | import org.apache.spark.sql.functions._ 7 | import org.apache.spark.sql.streaming.OutputMode 8 | import org.apache.spark.sql.cassandra._ 9 | 10 | object StructuredStreamingDSE { 11 | 12 | def main(args: Array[String]): Unit = { 13 | 14 | val sc = new SparkContext() 15 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 16 | import spark.implicits._ 17 | 18 | 19 | val inputPath = if (args.isEmpty) { 20 | "webhdfs://192.168.0.10:5598/sttest/" 21 | } else { 22 | args(0) 23 | } 24 | 25 | // Input data are from DSE distribution, file: demos/weather_sensors/resources/daily.csv 26 | // stationid,metric,date,location,max,mean,median,min,percentile1,percentile5,percentile95,percentile99,total 27 | // LAE,barometricpressure,2014-01-01 00:00:00+0000,Nadzab,950,944,944,940,940,940,948,950,1360374 28 | 29 | val csvSchema = new StructType().add("stationid", StringType) 30 | .add("metric", StringType).add("date", TimestampType) 31 | .add("location", StringType).add("", IntegerType) 32 | .add("max", IntegerType).add("mean", IntegerType) 33 | .add("median", IntegerType).add("min", IntegerType) 34 | .add("percentile1", IntegerType).add("percentile5", IntegerType) 35 | .add("percentile95", IntegerType).add("percentile99", IntegerType) 36 | .add("total", IntegerType) 37 | 38 | val streamingInputDF = spark.readStream 39 | .schema(csvSchema) 40 | .option("maxFilesPerTrigger", 1) 41 | .option("header", true) 42 | .csv(inputPath) 43 | 44 | val streamingCountsDF = streamingInputDF 45 | .where(col("location").isNotNull) 46 | .groupBy($"location") 47 | .count() 48 | 49 | // need to have table created with following CQL: 50 | // create table test.sttest(location text primary key, count int); 51 | 52 | // This works only with Spark 2.2 (if BYOS 6.0.4 is used) 53 | val query = streamingCountsDF.writeStream 54 | .outputMode(OutputMode.Update) 55 | .option("checkpointLocation", "webhdfs://192.168.0.10:5598/checkpoint") 56 | .cassandraFormat("sttest", "test") 57 | .start() 58 | 59 | /* val query = streamingCountsDF.writeStream 60 | .outputMode("complete") 61 | .format("console") 62 | .start()*/ 63 | 64 | query.awaitTermination() 65 | 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /spark-dse/src/main/scala/com/datastax/alexott/streaming/StructuredStreamingDSE.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.streaming 2 | 3 | import org.apache.spark.SparkContext 4 | import org.apache.spark.sql.SparkSession 5 | import org.apache.spark.sql.types._ 6 | import org.apache.spark.sql.functions._ 7 | import org.apache.spark.sql.streaming.OutputMode 8 | import org.apache.spark.sql.cassandra._ 9 | 10 | object StructuredStreamingDSE { 11 | 12 | def main(args: Array[String]): Unit = { 13 | 14 | val sc = new SparkContext() 15 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 16 | import spark.implicits._ 17 | 18 | 19 | val inputPath = if (args.isEmpty) { 20 | "webhdfs://192.168.0.10:5598/sttest/" 21 | } else { 22 | args(0) 23 | } 24 | 25 | // Input data are from DSE distribution, file: demos/weather_sensors/resources/daily.csv 26 | // stationid,metric,date,location,max,mean,median,min,percentile1,percentile5,percentile95,percentile99,total 27 | // LAE,barometricpressure,2014-01-01 00:00:00+0000,Nadzab,950,944,944,940,940,940,948,950,1360374 28 | 29 | val csvSchema = new StructType().add("stationid", StringType) 30 | .add("metric", StringType).add("date", TimestampType) 31 | .add("location", StringType).add("", IntegerType) 32 | .add("max", IntegerType).add("mean", IntegerType) 33 | .add("median", IntegerType).add("min", IntegerType) 34 | .add("percentile1", IntegerType).add("percentile5", IntegerType) 35 | .add("percentile95", IntegerType).add("percentile99", IntegerType) 36 | .add("total", IntegerType) 37 | 38 | val streamingInputDF = spark.readStream 39 | .schema(csvSchema) 40 | .option("maxFilesPerTrigger", 1) 41 | .option("header", true) 42 | .csv(inputPath) 43 | 44 | val streamingCountsDF = streamingInputDF 45 | .where(col("location").isNotNull) 46 | .groupBy($"location") 47 | .count() 48 | 49 | // need to have table created with following CQL: 50 | // create table test.sttest(location text primary key, count int); 51 | 52 | // This works only with Spark 2.2 (if BYOS 6.0.4 is used) 53 | val query = streamingCountsDF.writeStream 54 | .outputMode(OutputMode.Update) 55 | .option("checkpointLocation", "webhdfs://192.168.0.10:5598/checkpoint") 56 | .cassandraFormat("sttest", "test") 57 | .start() 58 | 59 | /* val query = streamingCountsDF.writeStream 60 | .outputMode("complete") 61 | .format("console") 62 | .start()*/ 63 | 64 | query.awaitTermination() 65 | 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /spark-oss/src/main/scala/com/datastax/alexott/streaming/StructuredStreamingForEachBatch.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.streaming 2 | 3 | import java.util.concurrent.ConcurrentHashMap 4 | 5 | import com.datastax.driver.core.PreparedStatement 6 | import com.datastax.spark.connector.cql.CassandraConnector 7 | import org.apache.spark.SparkContext 8 | import org.apache.spark.sql.functions._ 9 | import org.apache.spark.sql.streaming.OutputMode 10 | import org.apache.spark.sql.types._ 11 | import org.apache.spark.sql.{ForeachWriter, SaveMode, SparkSession} 12 | import org.apache.spark.sql.cassandra._ 13 | 14 | // need to have table created with following CQL: 15 | // create table test.sttest(location text primary key, count int); 16 | 17 | object StructuredStreamingForEachBatch { 18 | 19 | def main(args: Array[String]): Unit = { 20 | 21 | val sc = new SparkContext() 22 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 23 | import spark.implicits._ 24 | 25 | 26 | val inputPath = if (args.isEmpty) { 27 | "webhdfs://127.0.0.1:5598/sttest/" 28 | } else { 29 | args(0) 30 | } 31 | 32 | // Input data are from DSE distribution, file: demos/weather_sensors/resources/daily.csv 33 | // stationid,metric,date,location,max,mean,median,min,percentile1,percentile5,percentile95,percentile99,total 34 | // LAE,barometricpressure,2014-01-01 00:00:00+0000,Nadzab,950,944,944,940,940,940,948,950,1360374 35 | 36 | val csvSchema = new StructType().add("stationid", StringType) 37 | .add("metric", StringType).add("date", TimestampType) 38 | .add("location", StringType).add("", IntegerType) 39 | .add("max", IntegerType).add("mean", IntegerType) 40 | .add("median", IntegerType).add("min", IntegerType) 41 | .add("percentile1", IntegerType).add("percentile5", IntegerType) 42 | .add("percentile95", IntegerType).add("percentile99", IntegerType) 43 | .add("total", IntegerType) 44 | 45 | val streamingInputDF = spark.readStream 46 | .schema(csvSchema) 47 | .option("maxFilesPerTrigger", 1) 48 | .option("header", true) 49 | .csv(inputPath) 50 | 51 | val streamingCountsDF = streamingInputDF 52 | .where(col("location").isNotNull) 53 | .groupBy($"location") 54 | .count() 55 | 56 | val query = streamingCountsDF.writeStream 57 | .outputMode(OutputMode.Update) 58 | .option("checkpointLocation", "webhdfs://127.0.0.1:5598/checkpoint") 59 | .foreachBatch((df, batchId) => 60 | df.write.cassandraFormat("sttest", "test") 61 | .mode(SaveMode.Append).save() 62 | ) 63 | .start() 64 | 65 | query.awaitTermination() 66 | 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /driver-4.x/src/main/scala/com/datastax/alexott/demos/UdtScalaTest1.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos 2 | 3 | import com.datastax.oss.driver.api.core.CqlSession 4 | import com.datastax.oss.driver.api.core.`type`.UserDefinedType 5 | import com.datastax.oss.driver.api.core.`type`.codec.registry.MutableCodecRegistry 6 | import com.datastax.oss.driver.api.core.`type`.codec.{MappingCodec, TypeCodec} 7 | import com.datastax.oss.driver.api.core.`type`.reflect.GenericType 8 | import com.datastax.oss.driver.api.core.data.UdtValue 9 | 10 | import scala.collection.JavaConverters._ 11 | import scala.compat.java8.OptionConverters._ 12 | 13 | case class UDTTest(id: Integer, t1: Integer, t2: Integer, a2: Integer) 14 | 15 | class UDTTestCodec(innerCodec: TypeCodec[UdtValue]) 16 | extends MappingCodec[UdtValue, UDTTest](innerCodec, GenericType.of(classOf[UDTTest])) { 17 | 18 | override def getCqlType: UserDefinedType = super.getCqlType.asInstanceOf[UserDefinedType] 19 | 20 | override protected def innerToOuter(value: UdtValue): UDTTest = { 21 | if (value == null) 22 | null 23 | else 24 | UDTTest(value.getInt("id"), value.getInt("t1"), 25 | value.getInt("t2"), value.getInt("a2")) 26 | } 27 | 28 | override protected def outerToInner(value: UDTTest): UdtValue = { 29 | if (value == null) 30 | null 31 | else 32 | getCqlType.newValue(value.id, value.t1, value.t2, value.a2) 33 | } 34 | 35 | } 36 | 37 | object UdtScalaTest1 { 38 | 39 | def getInnerCodec(session: CqlSession, 40 | keyspace: String, 41 | typeName: String): TypeCodec[UdtValue] = { 42 | val udtTypeOption = session.getMetadata.getKeyspace(keyspace).asScala 43 | .flatMap(ks => ks.getUserDefinedType(typeName).asScala) 44 | 45 | udtTypeOption match { 46 | case None => throw new RuntimeException(s"No UDT $typeName in keyspace $keyspace") 47 | case Some(udtType) => 48 | session.getContext.getCodecRegistry.codecFor(udtType) 49 | } 50 | } 51 | 52 | 53 | def main(args: Array[String]): Unit = { 54 | val session = CqlSession.builder() 55 | .addContactPoints(Commons.getContactPoints("10.101.34.176")) 56 | .build(); 57 | 58 | val codecRegistry = session.getContext.getCodecRegistry 59 | val innerCodec = getInnerCodec(session, "test", "udt") 60 | val udtCodec = new UDTTestCodec(innerCodec) 61 | 62 | codecRegistry.asInstanceOf[MutableCodecRegistry].register(udtCodec) 63 | 64 | session.execute("select * from test.u2") 65 | .all().asScala 66 | .foreach(x => println(x.getInt("id"), 67 | x.get("u", GenericType.of(classOf[UDTTest])))) 68 | 69 | 70 | session.close() 71 | 72 | 73 | } 74 | 75 | } 76 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/SessionLimiter.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demo; 2 | 3 | import java.util.Map; 4 | import java.util.concurrent.Executor; 5 | import java.util.concurrent.Semaphore; 6 | 7 | import com.datastax.driver.core.HostDistance; 8 | import com.datastax.driver.core.PoolingOptions; 9 | import com.datastax.driver.core.ResultSetFuture; 10 | import com.datastax.driver.core.Session; 11 | import com.datastax.driver.core.Statement; 12 | import com.google.common.util.concurrent.MoreExecutors; 13 | 14 | public class SessionLimiter { 15 | final Session session; 16 | final Semaphore semaphore; 17 | final int limit; 18 | Executor executor = MoreExecutors.directExecutor(); 19 | 20 | static int getNumberOfRequests(final Session session) { 21 | PoolingOptions poolingOptions = session.getCluster().getConfiguration().getPoolingOptions(); 22 | int requestsPerConnection = poolingOptions.getMaxRequestsPerConnection(HostDistance.LOCAL); 23 | int maxConnections = poolingOptions.getCoreConnectionsPerHost(HostDistance.LOCAL); 24 | 25 | return (int) (requestsPerConnection * maxConnections * 0.95); 26 | } 27 | 28 | public SessionLimiter(final Session session) { 29 | this(session, getNumberOfRequests(session)); 30 | } 31 | 32 | public SessionLimiter(final Session session, int limit) { 33 | System.out.println("Initializing SessionLimiter with limit=" + limit); 34 | this.session = session; 35 | this.limit = limit; 36 | semaphore = new Semaphore(limit); 37 | } 38 | 39 | public ResultSetFuture executeAsync(Statement statement) throws InterruptedException { 40 | semaphore.acquire(); 41 | ResultSetFuture future = session.executeAsync(statement); 42 | future.addListener(() -> semaphore.release(), executor); 43 | return future; 44 | } 45 | 46 | public ResultSetFuture executeAsync(String query) throws InterruptedException { 47 | semaphore.acquire(); 48 | ResultSetFuture future = session.executeAsync(query); 49 | future.addListener(() -> semaphore.release(), executor); 50 | return future; 51 | } 52 | 53 | public ResultSetFuture executeAsync(String query, Object... values) throws InterruptedException { 54 | semaphore.acquire(); 55 | ResultSetFuture future = session.executeAsync(query, values); 56 | future.addListener(() -> semaphore.release(), executor); 57 | return future; 58 | } 59 | 60 | public ResultSetFuture executeAsync(String query, Map values) throws InterruptedException { 61 | semaphore.acquire(); 62 | ResultSetFuture future = session.executeAsync(query, values); 63 | future.addListener(() -> semaphore.release(), executor); 64 | return future; 65 | } 66 | 67 | public void waitForFinish() throws InterruptedException { 68 | while (semaphore.availablePermits() != limit) { 69 | Thread.sleep(200); 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /driver-4.x/src/main/java/com/datastax/alexott/demos/Commons.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.oss.driver.api.core.CqlSession; 4 | import com.datastax.oss.driver.api.core.cql.ResultSet; 5 | import com.datastax.oss.driver.api.core.cql.SimpleStatement; 6 | import com.datastax.oss.driver.api.core.metadata.Node; 7 | import com.datastax.oss.driver.internal.mapper.processor.util.generation.PropertyType; 8 | 9 | import java.net.InetSocketAddress; 10 | import java.util.Arrays; 11 | import java.util.Collection; 12 | import java.util.Map; 13 | import java.util.TreeMap; 14 | import java.util.function.Function; 15 | import java.util.stream.Collectors; 16 | 17 | public class Commons { 18 | private static final int CQL_PORT = Integer.parseInt(System.getProperty("cqlPort", "9042")); 19 | public static final int WAIT_TIME = 500; 20 | public static final int WAIT_CYCLES = 100; 21 | 22 | public static Collection getContactPoints(final String contactPoints) { 23 | return Arrays.stream(contactPoints.split(",")) 24 | .map(host -> InetSocketAddress.createUnresolved(host, CQL_PORT)) 25 | .collect(Collectors.toList()); 26 | 27 | } 28 | 29 | 30 | public static Collection getContactPoints() { 31 | return getContactPoints(System.getProperty("contactPoints", "localhost")); 32 | 33 | } 34 | 35 | public static void executeDDL(CqlSession session, SimpleStatement statement) throws InterruptedException { 36 | ResultSet rs = session.execute(statement); 37 | if (!rs.getExecutionInfo().isSchemaInAgreement()) { 38 | int cnt = 0; 39 | while(!session.checkSchemaAgreement()) { 40 | Thread.sleep(WAIT_TIME); 41 | cnt++; 42 | if (cnt > WAIT_CYCLES) { 43 | throw new RuntimeException(String.format("Can't reach schema agreement after %d seconds", 44 | WAIT_CYCLES*WAIT_CYCLES/1000)); 45 | } 46 | } 47 | } 48 | 49 | } 50 | 51 | public static void executeDDL(CqlSession session, String query) throws InterruptedException { 52 | executeDDL(session, SimpleStatement.newInstance(query)); 53 | 54 | } 55 | 56 | public static Map getDataCenters(CqlSession session) { 57 | Map m = session.getMetadata().getNodes().values().stream().map(Node::getDatacenter) 58 | .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); 59 | Map converted = new TreeMap<>(); 60 | for (Map.Entry e: m.entrySet()) { 61 | converted.put(e.getKey(), e.getValue().intValue()); 62 | } 63 | return converted; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/CassandraHealthCheck.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Host; 5 | import com.datastax.driver.core.Metrics; 6 | import com.datastax.driver.core.Session; 7 | 8 | import java.util.Set; 9 | import java.util.stream.Collectors; 10 | 11 | public class CassandraHealthCheck { 12 | 13 | static boolean clusterCheckMetrics(Cluster cluster) { 14 | Set hosts = cluster.getMetadata().getAllHosts(); 15 | Metrics metrics = cluster.getMetrics(); 16 | return metrics.getConnectedToHosts().getValue() > (hosts.size() /2); 17 | } 18 | 19 | static boolean clusterCheckMetrics(Cluster cluster, String dcName) { 20 | Set hosts = cluster.getMetadata().getAllHosts() 21 | .stream().filter(h -> h.getDatacenter().equals(dcName)) 22 | .collect(Collectors.toSet()); 23 | Metrics metrics = cluster.getMetrics(); 24 | return metrics.getConnectedToHosts().getValue() > (hosts.size() /2); 25 | } 26 | 27 | static boolean clusterCheckMetadata(Cluster cluster, String dcName) { 28 | Set hosts = cluster.getMetadata().getAllHosts() 29 | .stream().filter(h -> h.getDatacenter().equals(dcName)) 30 | .collect(Collectors.toSet()); 31 | Set aliveHosts = hosts.stream() 32 | .filter(h -> h.isUp()) 33 | .collect(Collectors.toSet()); 34 | return aliveHosts.size() > (hosts.size() /2); 35 | } 36 | 37 | static boolean clusterCheckMetadata(Cluster cluster) { 38 | Set hosts = cluster.getMetadata().getAllHosts(); 39 | Set aliveHosts = hosts.stream() 40 | .filter(h -> h.isUp()) 41 | .collect(Collectors.toSet()); 42 | return aliveHosts.size() > (hosts.size() /2); 43 | } 44 | 45 | public static void main(String[] args) throws InterruptedException { 46 | String server = System.getProperty("contactPoint", "127.0.0.1"); 47 | String dcName = System.getProperty("dcName", ""); 48 | try (Cluster cluster = Cluster.builder().addContactPoint(server).build(); 49 | Session session = cluster.connect()) { 50 | 51 | while(true) { 52 | System.out.println("Metrics, global. Is DSE Alive? " + clusterCheckMetrics(cluster)); 53 | System.out.println("Metadata, global. Is DSE Alive? " + clusterCheckMetadata(cluster)); 54 | if (!dcName.isEmpty()) { 55 | System.out.println("Metrics, with DC. Is DSE Alive? " + clusterCheckMetrics(cluster, dcName)); 56 | System.out.println("Metadata, with DC. Is DSE Alive? " + clusterCheckMetadata(cluster, dcName)); 57 | } 58 | Thread.sleep(10000); 59 | } 60 | 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/WhiteListPolicyExample.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Metadata; 5 | import com.datastax.driver.core.ResultSet; 6 | import com.datastax.driver.core.Row; 7 | import com.datastax.driver.core.Session; 8 | import com.datastax.driver.core.policies.LoadBalancingPolicy; 9 | import com.datastax.driver.core.policies.RoundRobinPolicy; 10 | import com.datastax.driver.core.policies.WhiteListPolicy; 11 | 12 | import javax.xml.transform.Result; 13 | import java.net.InetSocketAddress; 14 | import java.util.Collections; 15 | 16 | public class WhiteListPolicyExample { 17 | static final int DSE_PORT = 9042; 18 | 19 | public static void main(String[] args) throws InterruptedException { 20 | String server = System.getProperty("contactPoint", "127.0.0.1"); 21 | 22 | // Notice, that in the driver 3.6, DSE driver 1.7, there is a static function 'ofHosts' 23 | // for easier construction of white list policies 24 | LoadBalancingPolicy lbpolicy = new WhiteListPolicy(new RoundRobinPolicy(), 25 | Collections.singletonList(new InetSocketAddress(server, DSE_PORT))); 26 | Cluster.Builder builder = Cluster.builder().addContactPoint(server) 27 | .withLoadBalancingPolicy(lbpolicy); 28 | 29 | Cluster cluster = builder.build(); 30 | Session session = cluster.connect(); 31 | 32 | String[] commands = {"drop keyspace if exists whtest;", 33 | "create keyspace whtest WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};", 34 | "create table whtest.whtest(id int primary key, t text);", 35 | "create table whtest.whtest2(id int primary key, t text);"}; 36 | 37 | Metadata metadata = cluster.getMetadata(); 38 | for (int i = 0; i < commands.length; i++) { 39 | System.out.println("Executing '" + commands[i] + "'"); 40 | ResultSet rs = session.execute(commands[i]); 41 | if (!rs.getExecutionInfo().isSchemaInAgreement()) { 42 | while (!metadata.checkSchemaAgreement()) { 43 | System.out.println("Schema isn't in agreement, sleep 1 second..."); 44 | Thread.sleep(1000); 45 | } 46 | } 47 | } 48 | // just to be sure, and to show that it could be done via Metadata as well 49 | for (int i = 0; i < 5; i++) { 50 | session.execute(String.format("insert into whtest.whtest(id, t) values(%d, 'test %d');",i, i)); 51 | } 52 | 53 | ResultSet rs = session.execute("select count(*) from whtest.whtest;"); 54 | Row row = rs.one(); 55 | System.out.println("There are " + row.getLong(0) + " rows in the whtest table..."); 56 | 57 | session.close(); 58 | cluster.close(); 59 | } 60 | 61 | } 62 | -------------------------------------------------------------------------------- /spark-oss/src/main/java/com/datastax/alexott/demos/spark/streaming/StructuredStreaming.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.spark.streaming; 2 | 3 | import org.apache.spark.api.java.function.VoidFunction2; 4 | import org.apache.spark.sql.Dataset; 5 | import org.apache.spark.sql.Row; 6 | import org.apache.spark.sql.SaveMode; 7 | import org.apache.spark.sql.SparkSession; 8 | import org.apache.spark.sql.streaming.OutputMode; 9 | import org.apache.spark.sql.streaming.StreamingQuery; 10 | import org.apache.spark.sql.streaming.StreamingQueryException; 11 | import org.apache.spark.sql.types.StructType; 12 | import org.spark_project.guava.collect.ImmutableMap; 13 | 14 | import static org.apache.spark.sql.types.DataTypes.IntegerType; 15 | import static org.apache.spark.sql.types.DataTypes.StringType; 16 | import static org.apache.spark.sql.types.DataTypes.TimestampType; 17 | import static org.apache.spark.sql.functions.col; 18 | 19 | public class StructuredStreaming { 20 | public static void main(String[] args) throws StreamingQueryException { 21 | SparkSession spark = SparkSession 22 | .builder() 23 | .appName("CassandraTableCreate") 24 | .getOrCreate(); 25 | 26 | String sourceData = "webhdfs://127.0.0.1:5598/sttest/"; 27 | 28 | 29 | StructType csvSchema = new StructType() 30 | .add("stationid", StringType) 31 | .add("metric", StringType).add("date", TimestampType) 32 | .add("location", StringType).add("", IntegerType) 33 | .add("max", IntegerType).add("mean", IntegerType) 34 | .add("median", IntegerType).add("min", IntegerType) 35 | .add("percentile1", IntegerType).add("percentile5", IntegerType) 36 | .add("percentile95", IntegerType).add("percentile99", IntegerType) 37 | .add("total", IntegerType); 38 | 39 | Dataset inputDF = spark.readStream() 40 | .schema(csvSchema) 41 | .option("maxFilesPerTrigger", 1) 42 | .option("header", true) 43 | .csv(sourceData); 44 | Dataset groupedDF = inputDF 45 | .where(col("location").isNotNull()) 46 | .groupBy(col("location")) 47 | .count(); 48 | 49 | StreamingQuery query = groupedDF.writeStream() 50 | .outputMode(OutputMode.Update()) 51 | .option("checkpointLocation", "webhdfs://127.0.0.1:5598/checkpoint") 52 | .foreachBatch((VoidFunction2, Long>) (df, batchId) -> 53 | df.write() 54 | .format("org.apache.spark.sql.cassandra") 55 | .options(ImmutableMap.of("table", "sttest", "keyspace", "test")) 56 | .mode(SaveMode.Append) 57 | .save() 58 | ) 59 | .start(); 60 | 61 | query.awaitTermination(); 62 | 63 | 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /prometheus-java-driver/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | com.datastax.alexott.demos 6 | prom-java-driver 7 | 0.0.1 8 | jar 9 | 10 | http://maven.apache.org 11 | 12 | 13 | UTF-8 14 | 4.5.1 15 | 1.8 16 | 0.8.0 17 | 18 | 19 | 20 | 21 | com.datastax.oss 22 | java-driver-core 23 | ${java.dse.version} 24 | 25 | 26 | io.dropwizard.metrics 27 | metrics-jmx 28 | 4.0.2 29 | 30 | 31 | io.prometheus 32 | simpleclient_dropwizard 33 | ${prometheus.version} 34 | 35 | 36 | io.prometheus 37 | simpleclient_common 38 | ${prometheus.version} 39 | 40 | 41 | io.prometheus 42 | simpleclient_hotspot 43 | ${prometheus.version} 44 | 45 | 46 | io.prometheus 47 | simpleclient_httpserver 48 | ${prometheus.version} 49 | 50 | 51 | 52 | 53 | 54 | 55 | maven-compiler-plugin 56 | 3.6.1 57 | 58 | ${java.version} 59 | ${java.version} 60 | true 61 | 62 | 63 | 64 | org.apache.maven.plugins 65 | maven-assembly-plugin 66 | 3.1.0 67 | 68 | 69 | jar-with-dependencies 70 | 71 | 72 | 73 | 74 | package 75 | 76 | single 77 | 78 | 79 | 80 | 81 | 82 | org.codehaus.mojo 83 | exec-maven-plugin 84 | 1.6.0 85 | 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/TestResultSerializer.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.ResultSet; 5 | import com.datastax.driver.core.Row; 6 | import com.datastax.driver.core.Session; 7 | import com.fasterxml.jackson.core.JsonProcessingException; 8 | import com.fasterxml.jackson.databind.ObjectMapper; 9 | import com.fasterxml.jackson.databind.module.SimpleModule; 10 | 11 | /* 12 | 13 | drop table if exists test.rstest; 14 | drop type if exists test.tudt; 15 | create type test.tudt ( 16 | id int, 17 | t text, 18 | lst frozen> 19 | ); 20 | 21 | create table test.rstest ( 22 | id int primary key, 23 | text text, 24 | date date, 25 | timestamp timestamp, 26 | time time, 27 | uuid uuid, 28 | tuuid timeuuid, 29 | m1 map, 30 | m2 frozen>, 31 | l1 list, 32 | l2 frozen>, 33 | s1 set, 34 | s2 frozen>, 35 | udt test.tudt, 36 | ludt1 list>, 37 | ludt2 frozen>, 38 | blob blob, 39 | bool boolean, 40 | dec decimal, 41 | double double, 42 | float float, 43 | bigint bigint, 44 | smallint smallint, 45 | tinyint tinyint, 46 | varint varint, 47 | ascii ascii, 48 | tuple tuple, 49 | varchar varchar, 50 | nullval text 51 | ); 52 | 53 | insert into test.rstest(id, text, date, timestamp, time, uuid, tuuid, m1, m2, l1, l2, s1, s2, 54 | udt, ludt1, ludt2, blob, bool, dec, double, float, bigint, smallint, tinyint, varint, ascii, tuple, varchar) 55 | values (1, 'text', '2019-01-29', toTimestamp(now()), '04:05:00.234', 123e4567-e89b-12d3-a456-426655440000, 56 | now(), {1:'m1', 2:'m2'}, {'m1':1, 'm2':2}, [1,2,3], [1,2,3], {1,2,3}, {'1','2','3'}, 57 | {id: 1, t: 'text', lst: [1,2,3]}, [{id: 1, t: 'text', lst: [1,2,3]}, {id: 2, t: 'text2'}], 58 | [{id: 1, t: 'text', lst: [1,2,3]}, {id: 2, t: 'text2'}], bigintAsBlob(1024), true, 123562352352.0, 59 | 10.015, 20.030, 123562352352, 10000, 10, 124325345634643900999999, 'ascii', (1, 'text', 10.0), 'varchar'); 60 | 61 | */ 62 | 63 | public class TestResultSerializer { 64 | public static void main(String[] args) throws JsonProcessingException { 65 | String server = System.getProperty("contactPoint", "127.0.0.1"); 66 | try (Cluster cluster = Cluster.builder().addContactPoint(server).build(); 67 | Session session = cluster.connect()) { 68 | 69 | ResultSet rs = session.execute("select json * from test.rstest ;"); 70 | int i = 0; 71 | System.out.print("["); 72 | for (Row row : rs) { 73 | if (i > 0) 74 | System.out.print(","); 75 | i++; 76 | String json = row.getString(0); 77 | System.out.print(json); 78 | } 79 | System.out.println("]"); 80 | 81 | ObjectMapper mapper = new ObjectMapper(); 82 | SimpleModule module = new SimpleModule(); 83 | module.addSerializer(ResultSet.class, new ResultSetSerializer()); 84 | mapper.registerModule(module); 85 | 86 | rs = session.execute("select * from test.rstest ;"); 87 | String json = mapper.writeValueAsString(rs); 88 | System.out.println(json); 89 | } 90 | } 91 | 92 | } 93 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/objmapper/ExpMaps.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.PreparedStatement; 5 | import com.datastax.driver.core.ResultSet; 6 | import com.datastax.driver.core.Session; 7 | import com.datastax.driver.mapping.Mapper; 8 | import com.datastax.driver.mapping.MappingManager; 9 | 10 | import java.text.DateFormat; 11 | import java.util.Collections; 12 | import java.util.HashSet; 13 | import java.util.Locale; 14 | import java.util.Random; 15 | 16 | public class ExpMaps { 17 | 18 | public static void main(String[] args) throws InterruptedException { 19 | String server = System.getProperty("contactPoint", "127.0.0.1"); 20 | Cluster.Builder builder = Cluster.builder().addContactPoint(server); 21 | 22 | Locale locales[] = DateFormat.getAvailableLocales(); 23 | System.out.println("We have " + locales.length + " locales"); 24 | 25 | try (Cluster cluster = builder.build(); 26 | Session session = cluster.connect()) { 27 | 28 | PreparedStatement stmt1 = session.prepare ( 29 | "update srs.entities set pop_a_ = pop_a_ + ? where hcom_geo_id = ?;"); 30 | 31 | PreparedStatement stmt4 = session.prepare ( 32 | "select * from srs.entities_udt where hcom_geo_id = ?;"); 33 | PreparedStatement stmt5 = session.prepare ( 34 | "select * from srs.entities where hcom_geo_id = ?;"); 35 | 36 | MappingManager manager = new MappingManager(session); 37 | Mapper mapper = manager.mapper(ExpEntity.class); 38 | 39 | Random rnd = new Random(); 40 | 41 | int maxCount = 10000; 42 | for (int i = 0; i < maxCount; i++) { 43 | int id = rnd.nextInt(maxCount); 44 | 45 | ResultSet rs; 46 | // insert via update 47 | for (int j = 0; j < locales.length; j++) { 48 | rs = session.execute(stmt1.bind( 49 | Collections.singletonMap(locales[j].getDisplayName(), rnd.nextDouble()), id)); 50 | } 51 | rs = session.execute(stmt5.bind(id)); 52 | if (rs != null) { 53 | if (rs.one() != null) { 54 | } 55 | } 56 | 57 | // 58 | ExpEntity entity = mapper.get(id); 59 | if (entity == null) { 60 | entity = new ExpEntity(); 61 | entity.setHcom_geo_id(id); 62 | entity.setPopularity(new HashSet()); 63 | } 64 | for (int j = 0; j < locales.length; j++) { 65 | entity.getPopularity().add(new ExpPopularity(locales[j].getDisplayName(), 66 | rnd.nextDouble(), rnd.nextDouble())); 67 | } 68 | mapper.save(entity); 69 | 70 | rs = session.execute(stmt4.bind(id)); 71 | if (rs != null) { 72 | if (rs.one() != null) { 73 | } 74 | } 75 | } 76 | } 77 | } 78 | 79 | } 80 | -------------------------------------------------------------------------------- /cassandra-join-spark/src/main/java/json/utils/ExchangeUtils.java: -------------------------------------------------------------------------------- 1 | package json.utils; 2 | 3 | import json.ticks.TickData; 4 | 5 | import com.opencsv.CSVReader; 6 | import com.opencsv.ICSVParser; 7 | import java.io.File; 8 | import java.io.FileFilter; 9 | import java.io.FileNotFoundException; 10 | import java.io.FileReader; 11 | import java.io.IOException; 12 | import java.util.ArrayList; 13 | import java.util.HashMap; 14 | import java.util.List; 15 | import org.slf4j.Logger; 16 | import org.slf4j.LoggerFactory; 17 | 18 | public class ExchangeUtils { 19 | 20 | private static final Logger log = LoggerFactory.getLogger("ExchangeUtils"); 21 | 22 | private static final CharSequence EXCHANGEDATA = "exchangedata"; 23 | 24 | private static CSVReader getCSVReader(File file) throws IOException{ 25 | char quoteChar = ICSVParser.DEFAULT_QUOTE_CHARACTER; 26 | char delimiterChar = ICSVParser.DEFAULT_SEPARATOR; 27 | return new CSVReader(new FileReader(file.getAbsolutePath()), delimiterChar, quoteChar, 0); 28 | } 29 | 30 | private static List getAllFilesThatContain(File searchDir, String containsString){ 31 | List allFilesThatContain = new ArrayList<>(); 32 | 33 | File[] files = searchDir.listFiles(new FileFilter() { 34 | public boolean accept(File file) { 35 | return file.isFile(); 36 | } 37 | }); 38 | 39 | for (File file : files) { 40 | if (file.getName().contains(containsString)) { 41 | allFilesThatContain.add(file); 42 | } 43 | } 44 | 45 | return allFilesThatContain; 46 | } 47 | 48 | public static List getExchangeData() { 49 | 50 | List allStocks = new ArrayList<>(); 51 | 52 | // Process all the files from the csv directory 53 | File csvDir = new File(".", "src/main/resources/json/csv"); 54 | 55 | List files = getAllFilesThatContain(csvDir, EXCHANGEDATA.toString()); 56 | 57 | for (File file : files) { 58 | try { 59 | allStocks.addAll(getExchangeData(file)); 60 | } catch (FileNotFoundException e) { 61 | System.out.println("Could not process file : " + file.getAbsolutePath()); 62 | e.printStackTrace(); 63 | System.exit(1); 64 | } catch (IOException e) { 65 | System.out.println("Could not process file : " + file.getAbsolutePath()); 66 | e.printStackTrace(); 67 | System.exit(1); 68 | } catch (InterruptedException e) { 69 | e.printStackTrace(); 70 | System.exit(1); 71 | } 72 | } 73 | return allStocks; 74 | } 75 | 76 | private static List getExchangeData(File file) throws IOException, InterruptedException{ 77 | CSVReader reader = getCSVReader(file); 78 | 79 | String[] items; 80 | 81 | List stocksList = new ArrayList<>(); 82 | 83 | while ((items = reader.readNext()) != null) { 84 | stocksList.add(new TickData( 85 | items[1].trim(), 86 | Double.valueOf(items[2].trim()))); 87 | } 88 | 89 | reader.close(); 90 | return stocksList; 91 | } 92 | } 93 | 94 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/graph/GraphLoad.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.graph; 2 | 3 | import com.datastax.driver.dse.DseCluster; 4 | import com.datastax.driver.dse.DseSession; 5 | import com.datastax.driver.dse.graph.GraphOptions; 6 | import com.datastax.driver.dse.graph.GraphStatement; 7 | import com.datastax.driver.dse.graph.SimpleGraphStatement; 8 | import com.datastax.dse.graph.api.DseGraph; 9 | import com.datastax.dse.graph.api.TraversalBatch; 10 | import com.github.javafaker.Faker; 11 | import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; 12 | import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; 13 | 14 | import java.util.Random; 15 | import java.util.UUID; 16 | 17 | public class GraphLoad { 18 | static final int NUM_ORGS = 1000; 19 | static final int NUM_PEOPLE = 10000; 20 | static final int NUM_ACCOUNTS = 10000; 21 | 22 | public static void main(String[] args) { 23 | String server = System.getProperty("contactPoint", "127.0.0.1"); 24 | try (DseCluster dseCluster = DseCluster.builder().addContactPoints(server) 25 | .withGraphOptions(new GraphOptions().setGraphName("C720")).build(); 26 | DseSession session = dseCluster.connect()) { 27 | 28 | Faker faker = new Faker(); 29 | Random rnd = new Random(); 30 | 31 | GraphTraversalSource g = DseGraph.traversal(); 32 | for (int i = 0; i < NUM_ORGS; i++) { 33 | System.out.println("Step=" + i); 34 | String istr = Integer.toString(i); 35 | // TraversalBatch batch = DseGraph.batch(); 36 | GraphTraversal v1 = g.addV("organization").property("name", faker.company().name()) 37 | .property("type", faker.company().buzzword()) 38 | .property("id", istr); 39 | // System.out.println("v1=" + v1); 40 | if (v1 == null) { 41 | System.out.println("v1 == null!"); 42 | continue; 43 | } 44 | // batch.add(v1); 45 | session.executeGraph(DseGraph.statementFromTraversal(v1)); 46 | 47 | 48 | if (i > 2) { 49 | String v2str = Integer.toString(rnd.nextInt(i-1)); 50 | session.executeGraph(DseGraph.statementFromTraversal( 51 | g.V().has("organization", "id", istr) 52 | .as("v1").V().has("organization", "id", v2str) 53 | .addE("is_parent").from("v1").property("distance", "1"))); 54 | session.executeGraph(DseGraph.statementFromTraversal( 55 | g.V().has("organization", "id", istr) 56 | .as("v1").V().has("organization", "id", v2str) 57 | .addE("is_direct_parent").from("v1"))); 58 | 59 | if (rnd.nextDouble() > 0.8) { 60 | String v3str = Integer.toString(rnd.nextInt(i-2)); 61 | session.executeGraph(DseGraph.statementFromTraversal( 62 | g.V().has("organization", "id", istr) 63 | .as("v1").V().has("organization", "id", v3str) 64 | .addE("is_parent").from("v1").property("distance", "2"))); 65 | session.executeGraph(DseGraph.statementFromTraversal( 66 | g.V().has("organization", "id", v2str) 67 | .as("v1").V().has("organization", "id", v3str) 68 | .addE("is_parent").from("v1").property("distance", "1"))); 69 | session.executeGraph(DseGraph.statementFromTraversal( 70 | g.V().has("organization", "id", v2str) 71 | .as("v1").V().has("organization", "id", v3str) 72 | .addE("is_direct_parent").from("v1"))); 73 | } 74 | } 75 | } 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/TokenRangesScan.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Metadata; 5 | import com.datastax.driver.core.ProtocolVersion; 6 | import com.datastax.driver.core.ResultSet; 7 | import com.datastax.driver.core.Row; 8 | import com.datastax.driver.core.Session; 9 | import com.datastax.driver.core.SimpleStatement; 10 | import com.datastax.driver.core.Token; 11 | import com.datastax.driver.core.TokenRange; 12 | 13 | import java.util.ArrayList; 14 | import java.util.Collections; 15 | import java.util.HashMap; 16 | import java.util.List; 17 | import java.util.Map; 18 | 19 | // create table test.range_scan(id bigint, col1 int, col2 bigint, primary key(id, col1)); 20 | 21 | public class TokenRangesScan { 22 | public static void main(String[] args) { 23 | String server = System.getProperty("contactPoint", "127.0.0.1"); 24 | Cluster cluster = Cluster.builder() 25 | .addContactPoint(server) 26 | .build(); 27 | Session session = cluster.connect(); 28 | 29 | Metadata metadata = cluster.getMetadata(); 30 | List ranges = new ArrayList(metadata.getTokenRanges()); 31 | Collections.sort(ranges); 32 | System.out.println("Processing " + (ranges.size() + 1) + " token ranges..."); 33 | 34 | Token minToken = ranges.get(0).getStart(); 35 | String baseQuery = "SELECT id, col1 FROM test.range_scan WHERE "; 36 | Map queries = new HashMap<>(); 37 | // generate queries for every range 38 | for (int i = 0; i < ranges.size(); i++) { 39 | TokenRange range = ranges.get(i); 40 | Token rangeStart = range.getStart(); 41 | Token rangeEnd = range.getEnd(); 42 | System.out.println("i=" + i + ", start=" + rangeStart + ", end=" + rangeEnd); 43 | if (rangeStart.equals(rangeEnd)) { 44 | queries.put(baseQuery + "token(id) >= " + minToken, minToken); 45 | } else if (i == 0) { 46 | queries.put(baseQuery + "token(id) <= " + minToken, minToken); 47 | queries.put(baseQuery + "token(id) > " + rangeStart + " AND token(id) <= " + rangeEnd, rangeEnd); 48 | } else if (rangeEnd.equals(minToken)) { 49 | queries.put(baseQuery + "token(id) > " + rangeStart, rangeEnd); 50 | } else { 51 | queries.put(baseQuery + "token(id) > " + rangeStart + " AND token(id) <= " + rangeEnd, rangeEnd); 52 | } 53 | } 54 | 55 | // Note: It could be speedup by using async queries, but for illustration it's ok 56 | long rowCount = 0; 57 | // That is needed if OSS driver is used... 58 | // ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); 59 | for (Map.Entry entry : queries.entrySet()) { 60 | SimpleStatement statement = new SimpleStatement(entry.getKey()); 61 | // !!! This function is available only in Java DSE driver, not OSS !!! 62 | statement.setRoutingToken(entry.getValue()); 63 | // for OSS driver, following code should be used 64 | // statement.setRoutingKey(entry.getValue().serialize(protocolVersion)); 65 | ResultSet rs = session.execute(statement); 66 | long rangeCount = 0; 67 | for (Row row : rs) { 68 | rangeCount++; 69 | } 70 | System.out.println("Processed range ending at " + entry.getValue() + ". Row count: " 71 | + rangeCount + ", query: \"" + entry.getKey() + "\""); 72 | rowCount += rangeCount; 73 | } 74 | System.out.println("Total row count: " + rowCount); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/DumpClusterConfig.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Configuration; 5 | import com.datastax.driver.core.HostDistance; 6 | import com.datastax.driver.core.Session; 7 | import com.google.common.base.Strings; 8 | 9 | import java.lang.reflect.InvocationTargetException; 10 | import java.lang.reflect.Method; 11 | import java.util.HashSet; 12 | import java.util.Set; 13 | 14 | public class DumpClusterConfig { 15 | private static final HostDistance distance = HostDistance.LOCAL; 16 | 17 | private static final Set primitiveClasses = new HashSet() {{ 18 | add(Long.class); 19 | add(String.class); 20 | add(Integer.class); 21 | add(Boolean.class); 22 | add(Double.class); 23 | add(Float.class); 24 | }}; 25 | 26 | private static final Set excludedFunctions = new HashSet() {{ 27 | add("getDeclaringClass"); 28 | add("getClass"); 29 | }}; 30 | 31 | private static void dumpClass(Object obj, int level) { 32 | String ident = Strings.repeat(" ", level); 33 | 34 | for (Method method : obj.getClass().getMethods()) { 35 | String name = method.getName(); 36 | if ((name.startsWith("get") || name.startsWith("is")) && !excludedFunctions.contains(name)) { 37 | try { 38 | Object result = null; 39 | if (method.getParameterCount() == 0) { 40 | result = method.invoke(obj); 41 | } else { 42 | Class[] params = method.getParameterTypes(); 43 | if (params.length == 1 && params[0].equals(distance.getDeclaringClass())) { 44 | for (HostDistance distance : HostDistance.values()) { 45 | result = method.invoke(obj, distance); 46 | System.out.println(ident + name + "(" + distance.name() + ")" + "=" + result); 47 | } 48 | continue; 49 | } else { 50 | System.out.println(name + ": method with " + params.length + "arguments"); 51 | } 52 | } 53 | if (result == null) { 54 | System.out.println(ident + name + "=" + result); 55 | continue; 56 | } 57 | Class resultClass = result.getClass(); 58 | if (primitiveClasses.contains(resultClass)) { 59 | System.out.println(ident + name + "=" + result); 60 | } else if (result instanceof Enum) { 61 | System.out.println(ident + name + "=" + ((Enum) result).name()); 62 | } else { 63 | System.out.println(ident + name + ": " + resultClass.getName()); 64 | dumpClass(result, level + 2); 65 | } 66 | } catch (IllegalAccessException e) { 67 | System.out.println("Could not determine method: " + method.getName()); 68 | } catch (InvocationTargetException e) { 69 | System.out.println("Could not determine method: " + method.getName()); 70 | } 71 | } 72 | } 73 | } 74 | 75 | public static void dumpConfig(Configuration conf) { 76 | dumpClass(conf, 0); 77 | } 78 | 79 | public static void main(String[] args) { 80 | String server = System.getProperty("contactPoint", "127.0.0.1"); 81 | try (Cluster cluster = Cluster.builder().addContactPoint(server).build(); 82 | Session session = cluster.connect()) { 83 | dumpConfig(cluster.getConfiguration()); 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /scc-2.5/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | com.datastax.alexott.demos 6 | plg-spark-scc-2.5 7 | 0.0.1 8 | jar 9 | 10 | scc-2.5 11 | http://maven.apache.org 12 | 13 | 14 | UTF-8 15 | 2.11.12 16 | 2.4.6 17 | 2.11 18 | 2.5.1 19 | 1.8 20 | 21 | 22 | 23 | 24 | com.datastax.spark 25 | spark-cassandra-connector_${spark.scala.version} 26 | ${scc.version} 27 | 28 | 29 | org.apache.spark 30 | spark-sql_${spark.scala.version} 31 | ${spark.version} 32 | provided 33 | 34 | 35 | org.apache.spark 36 | spark-mllib_${spark.scala.version} 37 | ${spark.version} 38 | provided 39 | 40 | 41 | org.apache.spark 42 | spark-sql-kafka-0-10_${spark.scala.version} 43 | ${spark.version} 44 | 45 | 46 | org.apache.spark 47 | spark-core_${spark.scala.version} 48 | ${spark.version} 49 | provided 50 | 51 | 52 | org.scala-lang.modules 53 | scala-java8-compat_${spark.scala.version} 54 | 0.9.0 55 | 56 | 57 | 58 | 59 | 60 | 61 | maven-compiler-plugin 62 | 3.8.1 63 | 64 | ${java.version} 65 | ${java.version} 66 | true 67 | 68 | 69 | 70 | net.alchim31.maven 71 | scala-maven-plugin 72 | 4.3.0 73 | 74 | 75 | process-sources 76 | 77 | compile 78 | testCompile 79 | 80 | 81 | ${scala.version} 82 | 83 | 84 | 85 | 86 | 87 | org.apache.maven.plugins 88 | maven-assembly-plugin 89 | 3.2.0 90 | 91 | 92 | jar-with-dependencies 93 | 94 | 95 | 96 | 97 | package 98 | 99 | single 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/QBuilder.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.BatchStatement; 4 | import com.datastax.driver.core.Cluster; 5 | import com.datastax.driver.core.ConsistencyLevel; 6 | import com.datastax.driver.core.DataType; 7 | import com.datastax.driver.core.PreparedStatement; 8 | import com.datastax.driver.core.ResultSet; 9 | import com.datastax.driver.core.Row; 10 | import com.datastax.driver.core.Session; 11 | import com.datastax.driver.core.Statement; 12 | import com.datastax.driver.core.querybuilder.BuiltStatement; 13 | import com.datastax.driver.core.querybuilder.Ordering; 14 | import com.datastax.driver.core.querybuilder.QueryBuilder; 15 | import com.datastax.driver.core.querybuilder.Update; 16 | 17 | import java.util.Arrays; 18 | 19 | import static com.datastax.driver.core.querybuilder.QueryBuilder.*; 20 | 21 | public class QBuilder { 22 | 23 | public static void main(String[] args) { 24 | String server = System.getProperty("contactPoint", "127.0.0.1"); 25 | Cluster cluster = Cluster.builder().addContactPoint(server).build(); 26 | Session session = cluster.connect(); 27 | 28 | BuiltStatement selectAll = QueryBuilder.select().all().from("test", "test").limit(10); 29 | ResultSet rs = session.execute(selectAll); 30 | for (Row row: rs) { 31 | System.out.println(row); 32 | } 33 | 34 | BuiltStatement selectAll2 = QueryBuilder.select().from("test", "test").limit(10); 35 | rs = session.execute(selectAll2); 36 | for (Row row: rs) { 37 | System.out.println(row); 38 | } 39 | 40 | BuiltStatement selectOne = QueryBuilder.select().from("test") 41 | .where(QueryBuilder.eq("id", 1)).limit(1).allowFiltering() 42 | .perPartitionLimit(1).orderBy(desc("id")); 43 | rs = session.execute(selectOne); 44 | for (Row row: rs) { 45 | System.out.println(row); 46 | } 47 | 48 | BuiltStatement selectOne2 = QueryBuilder.select().from("test", "test") 49 | .where(eq("id", bindMarker())); 50 | PreparedStatement preparedStatement = session.prepare(selectOne2); 51 | session.execute(preparedStatement.bind(1)); 52 | 53 | BuiltStatement selectSome = QueryBuilder.select().from("test", "test") 54 | .where(in("id", 1, 2)).and(in("id", 1, 2)); 55 | rs = session.execute(selectSome); 56 | for (Row row: rs) { 57 | System.out.println(row); 58 | } 59 | 60 | Statement updateStatement = QueryBuilder.update("test").with(set("t", "test 1")) 61 | .and(set("x", 10)).where(eq("id", 1)); 62 | 63 | QueryBuilder.select().cast(column("id"), DataType.varchar()); 64 | 65 | BuiltStatement ttlAndWriteTime = QueryBuilder.select().column("id").column("t") 66 | .ttl("t").as("id_ttl").writeTime("t") 67 | .from("test", "test"); 68 | rs = session.execute(ttlAndWriteTime); 69 | for (Row row: rs) { 70 | System.out.println(row); 71 | } 72 | 73 | BuiltStatement sum = QueryBuilder.select().fcall("ttl", column("t")).as("sum_id") 74 | .from("test", "test"); 75 | rs = session.execute(sum); 76 | for (Row row: rs) { 77 | System.out.println(row); 78 | } 79 | 80 | BuiltStatement deleteStatemet = QueryBuilder.delete().from("test", "test") 81 | .where(eq("id", "1")).and(eq("txt", "test")); 82 | QueryBuilder.delete("t").from("test"); 83 | 84 | QueryBuilder.insertInto("test").values(Arrays.asList("id", "t"), Arrays.asList(4, "test 4")); 85 | QueryBuilder.insertInto("test").value("id", 4).ifNotExists(); 86 | 87 | QueryBuilder.insertInto("test").json("{\"id\":4, \"t\":\"test 4\"}").using(ttl(10)).and(timestamp(1000)); 88 | 89 | 90 | QueryBuilder.batch(selectAll2, selectOne2); 91 | 92 | 93 | session.close(); 94 | cluster.close(); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /spark-oss/src/main/scala/com/datastax/alexott/streaming/StructuredStreamingForEach.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.streaming 2 | 3 | import java.util.concurrent.ConcurrentHashMap 4 | 5 | import com.datastax.driver.core.PreparedStatement 6 | import com.datastax.spark.connector.cql.CassandraConnector 7 | import org.apache.spark.SparkContext 8 | import org.apache.spark.sql.functions._ 9 | import org.apache.spark.sql.streaming.OutputMode 10 | import org.apache.spark.sql.types._ 11 | import org.apache.spark.sql.{ForeachWriter, SparkSession} 12 | 13 | // need to have table created with following CQL: 14 | // create table test.sttest(location text primary key, count int); 15 | 16 | class CassandraSinkForeach() extends ForeachWriter[org.apache.spark.sql.Row] { 17 | val insertStatement: String = "insert into test.sttest(location, count) values(?,?)" 18 | 19 | @transient 20 | var cassandraConnector: CassandraConnector = null; 21 | @transient 22 | var preparedStatements: ConcurrentHashMap[String, PreparedStatement] = null 23 | 24 | def open(partitionId: Long, version: Long): Boolean = { 25 | if (cassandraConnector == null) { 26 | cassandraConnector = CassandraConnector(SparkSession.builder.getOrCreate.sparkContext.getConf) 27 | } 28 | if (preparedStatements == null) { 29 | preparedStatements = new ConcurrentHashMap[String, PreparedStatement]() 30 | } 31 | true 32 | } 33 | 34 | def process(record: org.apache.spark.sql.Row) = { 35 | val pstmt = getStatement(insertStatement) 36 | val bstmt = pstmt.bind(record.getString(0), new java.lang.Integer(record.getLong(1).toInt)) 37 | cassandraConnector.withSessionDo(session => session.execute(bstmt)) 38 | } 39 | 40 | def close(errorOrNull: Throwable): Unit = { 41 | } 42 | 43 | def getStatement(stmt: String): PreparedStatement = { 44 | if (preparedStatements.contains(stmt)) { 45 | preparedStatements.get(stmt) 46 | } else { 47 | val pstmt = cassandraConnector.withSessionDo(session => session.prepare(stmt)) 48 | preparedStatements.putIfAbsent(stmt, pstmt) 49 | pstmt 50 | } 51 | } 52 | } 53 | 54 | object StructuredStreamingForEach { 55 | 56 | def main(args: Array[String]): Unit = { 57 | 58 | val sc = new SparkContext() 59 | val spark = SparkSession.builder().config(sc.getConf).getOrCreate() 60 | import spark.implicits._ 61 | 62 | 63 | val inputPath = if (args.isEmpty) { 64 | "webhdfs://127.0.0.1:5598/sttest/" 65 | } else { 66 | args(0) 67 | } 68 | 69 | // Input data are from DSE distribution, file: demos/weather_sensors/resources/daily.csv 70 | // stationid,metric,date,location,max,mean,median,min,percentile1,percentile5,percentile95,percentile99,total 71 | // LAE,barometricpressure,2014-01-01 00:00:00+0000,Nadzab,950,944,944,940,940,940,948,950,1360374 72 | 73 | val csvSchema = new StructType().add("stationid", StringType) 74 | .add("metric", StringType).add("date", TimestampType) 75 | .add("location", StringType).add("", IntegerType) 76 | .add("max", IntegerType).add("mean", IntegerType) 77 | .add("median", IntegerType).add("min", IntegerType) 78 | .add("percentile1", IntegerType).add("percentile5", IntegerType) 79 | .add("percentile95", IntegerType).add("percentile99", IntegerType) 80 | .add("total", IntegerType) 81 | 82 | val streamingInputDF = spark.readStream 83 | .schema(csvSchema) 84 | .option("maxFilesPerTrigger", 1) 85 | .option("header", true) 86 | .csv(inputPath) 87 | 88 | val streamingCountsDF = streamingInputDF 89 | .where(col("location").isNotNull) 90 | .groupBy($"location") 91 | .count() 92 | 93 | val query = streamingCountsDF.writeStream 94 | .outputMode(OutputMode.Update) 95 | .option("checkpointLocation", "webhdfs://127.0.0.1:5598/checkpoint") 96 | .foreach(new CassandraSinkForeach()) 97 | .start() 98 | 99 | 100 | query.awaitTermination() 101 | 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/ResultSetSerializer.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import java.io.IOException; 4 | import java.time.LocalTime; 5 | import java.time.format.DateTimeFormatter; 6 | import java.util.Base64; 7 | import java.util.List; 8 | 9 | import com.datastax.driver.core.ColumnDefinitions; 10 | import com.datastax.driver.core.DataType; 11 | import com.datastax.driver.core.ResultSet; 12 | import com.datastax.driver.core.Row; 13 | import com.fasterxml.jackson.core.JsonGenerator; 14 | import com.fasterxml.jackson.databind.SerializerProvider; 15 | import com.fasterxml.jackson.databind.ser.std.StdSerializer; 16 | 17 | @SuppressWarnings("serial") 18 | public class ResultSetSerializer extends StdSerializer { 19 | private static final Base64.Encoder B64_ENCODER = Base64.getEncoder(); 20 | public static final DateTimeFormatter TIMESTAMP_FORMATTER = DateTimeFormatter.ISO_INSTANT; 21 | public static final DateTimeFormatter TIME_FORMATTER = DateTimeFormatter.ISO_LOCAL_TIME; 22 | 23 | public ResultSetSerializer() { 24 | this(null); 25 | } 26 | 27 | public ResultSetSerializer(Class t) { 28 | super(t); 29 | } 30 | 31 | void handleCollection(Row row, int i, String name, DataType dt, JsonGenerator jgen) throws IOException { 32 | jgen.writeStringField(name, row.getObject(i).toString()); 33 | } 34 | 35 | // TODO: cache UDT definitions... 36 | 37 | void writeItem(Row row, int i, String name, DataType dt, JsonGenerator jgen) throws IOException { 38 | // TODO: use map lookup instead? 39 | // TODO: how to handle UDTs correctly? 40 | if (DataType.cboolean().equals(dt)) { 41 | jgen.writeBooleanField(name, row.getBool(i)); 42 | } else if (DataType.cint().equals(dt)) { 43 | jgen.writeNumberField(name, row.getInt(i)); 44 | } else if (DataType.cdouble().equals(dt)) { 45 | jgen.writeNumberField(name, row.getDouble(i)); 46 | } else if (DataType.cfloat().equals(dt)) { 47 | jgen.writeNumberField(name, row.getFloat(i)); 48 | } else if (DataType.counter().equals(dt) || DataType.bigint().equals(dt)) { 49 | jgen.writeNumberField(name, row.getLong(i)); 50 | } else if (DataType.smallint().equals(dt)) { 51 | jgen.writeNumberField(name, row.getShort(i)); 52 | } else if (DataType.tinyint().equals(dt)) { 53 | jgen.writeNumberField(name, row.getByte(i)); 54 | } else if (DataType.timestamp().equals(dt)) { 55 | String ts = TIMESTAMP_FORMATTER.format(row.getTimestamp(i).toInstant()); 56 | jgen.writeStringField(name, ts); 57 | } else if (DataType.date().equals(dt)) { 58 | jgen.writeStringField(name, row.getDate(i).toString()); 59 | } else if (DataType.time().equals(dt)) { 60 | LocalTime tm = LocalTime.ofNanoOfDay(row.getTime(i)); 61 | jgen.writeStringField(name, TIME_FORMATTER.format(tm)); 62 | } else if (DataType.blob().equals(dt)) { 63 | jgen.writeStringField(name, B64_ENCODER.encodeToString(row.getBytes(i).array())); 64 | } else if (dt.isCollection()) { 65 | handleCollection(row, i, name, dt, jgen); 66 | } else { 67 | jgen.writeStringField(name, row.getObject(i).toString()); 68 | } 69 | } 70 | 71 | @Override 72 | public void serialize(ResultSet rs, JsonGenerator jgen, SerializerProvider provider) throws IOException { 73 | ColumnDefinitions cd = rs.getColumnDefinitions(); 74 | List lcd = cd.asList(); 75 | int lsize = lcd.size(); 76 | String[] names = new String[lsize]; 77 | DataType[] types = new DataType[lsize]; 78 | for (int i = 0; i < lsize; i++) { 79 | ColumnDefinitions.Definition cdef = lcd.get(i); 80 | names[i] = cdef.getName(); 81 | types[i] = cdef.getType(); 82 | } 83 | jgen.writeStartArray(); 84 | for (Row row : rs) { 85 | jgen.writeStartObject(); 86 | for (int i = 0; i < lsize; i++) { 87 | String name = names[i]; 88 | if (row.isNull(i)) { 89 | jgen.writeNullField(name); 90 | } else { 91 | writeItem(row, i, name, types[i], jgen); 92 | } 93 | } 94 | jgen.writeEndObject(); 95 | } 96 | jgen.writeEndArray(); 97 | } 98 | 99 | } 100 | -------------------------------------------------------------------------------- /spark-dse/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | com.datastax.alexott.demos 6 | dse-plg-spark 7 | 0.0.1 8 | jar 9 | 10 | dse-playground-spark 11 | http://maven.apache.org 12 | 13 | 14 | UTF-8 15 | 1.8 16 | 2.11.8 17 | 2.11 18 | 2.2.3 19 | 6.7.8 20 | 21 | 22 | 23 | 24 | com.datastax.dse 25 | dse-spark-dependencies 26 | ${dse.spark.version} 27 | provided 28 | 29 | 30 | org.apache.spark 31 | spark-sql_${spark.scala.version} 32 | ${spark.version} 33 | provided 34 | 35 | 36 | org.apache.spark 37 | spark-sql-kafka-0-10_${spark.scala.version} 38 | ${spark.version} 39 | 40 | 41 | org.apache.spark 42 | spark-core_${spark.scala.version} 43 | ${spark.version} 44 | provided 45 | 46 | 47 | org.scala-lang.modules 48 | scala-java8-compat_${spark.scala.version} 49 | 0.9.0 50 | 51 | 52 | 53 | 54 | 55 | DataStax-Repo 56 | https://repo.datastax.com/public-repos/ 57 | 58 | 59 | 60 | 61 | 62 | 63 | maven-compiler-plugin 64 | 3.8.1 65 | 66 | ${java.version} 67 | ${java.version} 68 | true 69 | 70 | 71 | 72 | net.alchim31.maven 73 | scala-maven-plugin 74 | 4.3.0 75 | 76 | 77 | process-sources 78 | 79 | compile 80 | testCompile 81 | 82 | 83 | ${scala.version} 84 | 85 | 88 | 89 | 90 | 91 | 92 | org.apache.maven.plugins 93 | maven-assembly-plugin 94 | 3.2.0 95 | 96 | 97 | jar-with-dependencies 98 | 99 | 100 | 101 | 102 | package 103 | 104 | single 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | -------------------------------------------------------------------------------- /cassandra-join-spark/src/main/scala/com/datastax/alexott/demos/streaming/StockTickersJoinRDD.scala: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos.streaming 2 | 3 | import java.time.{Instant, ZoneId} 4 | import java.time.format.DateTimeFormatter 5 | 6 | import org.apache.spark.SparkContext 7 | import org.apache.spark.streaming.{Seconds, StreamingContext} 8 | import org.apache.kafka.common.serialization.StringDeserializer 9 | import org.apache.spark.streaming.kafka010.KafkaUtils 10 | import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent 11 | import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe 12 | import com.datastax.spark.connector._ 13 | 14 | import scala.util.parsing.json.JSON 15 | 16 | case class StockData(symbol: String, timestamp: Instant, price: Double) extends Serializable 17 | case class StockInfo(symbol: String, exchange: String, name: String, industry: String, 18 | base_price: Double) extends Serializable 19 | case class JoinedData(symbol: String, exchange: String, name: String, industry: String, 20 | base_price: Double, timestamp: Instant, price: Double) extends Serializable 21 | 22 | object StockTickersJoinRDD { 23 | 24 | val formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS") 25 | .withZone(ZoneId.of("Europe/Berlin")) 26 | 27 | /* 28 | Very naive parsing of JSON 29 | */ 30 | def parseJson(input: String): Seq[StockData] ={ 31 | val result = JSON.parseFull(input) 32 | result match { 33 | case Some(map: Map[String, Any]) => 34 | Seq(StockData(map.get("symbol").get.asInstanceOf[String], 35 | Instant.from(formatter.parse(map.get("datetime").get.asInstanceOf[String])), 36 | map.get("value").get.asInstanceOf[Double])) 37 | case None => { 38 | println("Parsing failed") 39 | Seq() 40 | } 41 | case other => { 42 | println("Unknown data structure: " + other) 43 | Seq() 44 | } 45 | } 46 | 47 | } 48 | 49 | def main(args: Array[String]): Unit = { 50 | if (args.length < 2) { 51 | println("Usage: StockTickersJoinDataFrames kafka-servers topic-name") 52 | System.exit(1) 53 | } 54 | val kafkaServes = args(0) 55 | val topicName = args(1) 56 | val sc = new SparkContext() 57 | val ssc = new StreamingContext(sc, Seconds(10)) 58 | 59 | val kafkaParams = Map[String, Object]( 60 | "bootstrap.servers" -> kafkaServes, 61 | "key.deserializer" -> classOf[StringDeserializer], 62 | "value.deserializer" -> classOf[StringDeserializer], 63 | "group.id" -> "StockTickersJoinRDD", 64 | "auto.offset.reset" -> "latest", 65 | "enable.auto.commit" -> (true: java.lang.Boolean) 66 | ) 67 | val topics = Array(topicName) 68 | 69 | val stream = KafkaUtils.createDirectStream[String, String]( 70 | ssc, PreferConsistent, Subscribe[String, String](topics, kafkaParams) 71 | ) 72 | 73 | val parsedData = stream.flatMap(x => parseJson(x.value())) 74 | val transformedData = parsedData.transform(rdd => { 75 | // we're using leftJoinWithCassandraTable to be able to find data for which we don't have information 76 | // in Cassandra - in this case, the second part of tuple will be empty 77 | val joined = rdd.leftJoinWithCassandraTable[StockInfo]("test", "stock_info") 78 | joined.persist() 79 | val missingInfoCount = joined.filter(x => x._2.isEmpty).count() 80 | val stocksWithInfo = joined.filter(x => x._2.isDefined) 81 | val existingInfoCount = stocksWithInfo.count() 82 | println(s"There are $missingInfoCount stock tickers without information in Cassandra") 83 | println(s"There are $existingInfoCount stock tickers with information in Cassandra") 84 | val combined = stocksWithInfo.map(x => { 85 | val i = x._2.get 86 | val d = x._1 87 | JoinedData(i.symbol, i.exchange, i.name, i.industry, i.base_price, d.timestamp, d.price) 88 | }) 89 | joined.unpersist() 90 | combined 91 | }) 92 | transformedData.foreachRDD(rdd => rdd.foreach(println)) 93 | 94 | ssc.start() 95 | ssc.awaitTermination() 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /spark-oss/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | com.datastax.alexott.demos 6 | oss-plg-spark 7 | 0.0.1 8 | jar 9 | 10 | oss-playground-spark 11 | http://maven.apache.org 12 | 13 | 14 | UTF-8 15 | 2.11.12 16 | 2.4.5 17 | 2.11 18 | 2.4.3 19 | 1.8 20 | 21 | 22 | 23 | 24 | com.datastax.spark 25 | spark-cassandra-connector_${spark.scala.version} 26 | ${scc.version} 27 | 28 | 29 | org.apache.spark 30 | spark-sql_${spark.scala.version} 31 | ${spark.version} 32 | provided 33 | 34 | 35 | org.apache.spark 36 | spark-mllib_${spark.scala.version} 37 | ${spark.version} 38 | provided 39 | 40 | 41 | org.apache.spark 42 | spark-sql-kafka-0-10_${spark.scala.version} 43 | ${spark.version} 44 | 45 | 46 | org.apache.spark 47 | spark-core_${spark.scala.version} 48 | ${spark.version} 49 | provided 50 | 51 | 52 | org.scala-lang.modules 53 | scala-java8-compat_${spark.scala.version} 54 | 0.9.0 55 | 56 | 57 | 58 | 59 | 60 | 61 | maven-compiler-plugin 62 | 3.8.1 63 | 64 | ${java.version} 65 | ${java.version} 66 | true 67 | 68 | 69 | 70 | net.alchim31.maven 71 | scala-maven-plugin 72 | 4.3.0 73 | 74 | 75 | process-sources 76 | 77 | compile 78 | testCompile 79 | 80 | 81 | ${scala.version} 82 | 83 | 86 | 87 | 88 | 89 | 90 | org.apache.maven.plugins 91 | maven-assembly-plugin 92 | 3.2.0 93 | 94 | 95 | jar-with-dependencies 96 | 97 | 98 | 99 | 100 | package 101 | 102 | single 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/TestBatches.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.BatchStatement; 4 | import com.datastax.driver.core.BoundStatement; 5 | import com.datastax.driver.core.Cluster; 6 | import com.datastax.driver.core.CodecRegistry; 7 | import com.datastax.driver.core.PreparedStatement; 8 | import com.datastax.driver.core.ProtocolVersion; 9 | import com.datastax.driver.core.Session; 10 | import com.datastax.driver.core.Statement; 11 | import com.datastax.driver.core.exceptions.QueryExecutionException; 12 | import com.datastax.driver.mapping.Mapper; 13 | import com.datastax.driver.mapping.MappingManager; 14 | import org.apache.ivy.util.StringUtils; 15 | 16 | // create table test.btest (id int, c1 int, t text, primary key (id, c1)); 17 | 18 | public class TestBatches { 19 | 20 | private static final int MAX_BATCH_SIZE = 2 * 1024 * 1024; // 2Mb 21 | 22 | public static void main(String[] args) { 23 | String server = System.getProperty("contactPoint", "127.0.0.1"); 24 | try(Cluster cluster = Cluster.builder().addContactPoint(server).build(); 25 | Session session = cluster.connect()) { 26 | 27 | PreparedStatement pStmt = session.prepare("insert into test.btest(id, c1, t) values(?, ?, ?);"); 28 | String str = StringUtils.repeat("x", 1024); 29 | { // unlogged batch into single partition - it shouldn't fail 30 | BatchStatement batchStatement = new BatchStatement(); 31 | for (int j = 0; j < 5000; j++) { 32 | BoundStatement boundStatement = pStmt.bind(1, j, str); 33 | batchStatement.add(boundStatement); 34 | } 35 | try { 36 | session.execute(batchStatement); 37 | System.out.println("Single-partition batch executed"); 38 | } catch (Exception ex) { 39 | System.out.println("Got exception for single-partition batch: " + ex.getMessage()); 40 | } 41 | } 42 | { // unlogged batch into single partition - it should fail with big mutation error 43 | BatchStatement batchStatement = new BatchStatement(); 44 | for (int j = 0; j < 50000; j++) { 45 | BoundStatement boundStatement = pStmt.bind(1, j, str); 46 | batchStatement.add(boundStatement); 47 | } 48 | try { 49 | session.execute(batchStatement); 50 | System.out.println("Big Single-partition batch executed"); 51 | } catch (Exception ex) { 52 | System.out.println("Got exception for big single-partition batch: " + ex.getMessage()); 53 | } 54 | } 55 | { // unlogged batch into single partition - it should fail 56 | BatchStatement batchStatement = new BatchStatement(); 57 | for (int j = 0; j < 5000; j++) { 58 | BoundStatement boundStatement = pStmt.bind(j, j, str); 59 | batchStatement.add(boundStatement); 60 | } 61 | try { 62 | session.execute(batchStatement); 63 | System.out.println("Multi-partition batch executed"); 64 | } catch (Exception ex) { 65 | System.out.println("Got exception for multi-partition batch: " + ex.getMessage()); 66 | } 67 | } 68 | 69 | { // unlogged batch into single partition - it shouldn't fail 70 | BatchStatement batchStatement = new BatchStatement(); 71 | ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); 72 | CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); 73 | int currentBatchSize = 0; 74 | for (int j = 0; j < 5000; j++) { 75 | BoundStatement boundStatement = pStmt.bind(j, j, str); 76 | int stmtSize = boundStatement.requestSizeInBytes(protocolVersion, codecRegistry); 77 | if ((currentBatchSize + stmtSize) > MAX_BATCH_SIZE) { 78 | session.execute(batchStatement); 79 | batchStatement = new BatchStatement(); 80 | currentBatchSize = 0; 81 | } 82 | batchStatement.add(boundStatement); 83 | currentBatchSize += stmtSize; 84 | } 85 | if (batchStatement.size() > 0) { 86 | session.execute(batchStatement); 87 | } 88 | } 89 | 90 | 91 | } 92 | } 93 | 94 | } 95 | -------------------------------------------------------------------------------- /driver-1.x/src/main/java/com/datastax/alexott/demos/AlterTableWithChecks.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.driver.core.Cluster; 4 | import com.datastax.driver.core.Metadata; 5 | import com.datastax.driver.core.ResultSet; 6 | import com.datastax.driver.core.Row; 7 | import com.datastax.driver.core.Session; 8 | import com.datastax.driver.core.Statement; 9 | import com.datastax.driver.core.exceptions.InvalidQueryException; 10 | import com.datastax.driver.core.policies.LoadBalancingPolicy; 11 | import com.datastax.driver.core.policies.RoundRobinPolicy; 12 | import com.datastax.driver.core.policies.WhiteListPolicy; 13 | import org.apache.commons.lang3.StringUtils; 14 | 15 | import java.net.InetSocketAddress; 16 | import java.util.Collections; 17 | import java.util.regex.Matcher; 18 | import java.util.regex.Pattern; 19 | 20 | public class AlterTableWithChecks { 21 | static final int DSE_PORT = 9042; 22 | 23 | private static Pattern DDL_PATTERN = Pattern.compile("^(create|alter|drop)\\s+.*", 24 | Pattern.CASE_INSENSITIVE); 25 | private static Pattern COL_EXISTS = Pattern.compile("Invalid column name .+ because it conflicts with an existing column", 26 | Pattern.CASE_INSENSITIVE); 27 | private static Pattern COL_DOESNT_EXIST = Pattern.compile("Column .+ was not found in table ", 28 | Pattern.CASE_INSENSITIVE); 29 | private static Pattern COL_DOESNT_EXIST_RENAME = Pattern.compile("Cannot rename unknown column .+ in ", 30 | Pattern.CASE_INSENSITIVE); 31 | private static Pattern COL_EXISTS_RENAME = Pattern.compile("Cannot rename column .+ to .+ in keyspace .+; another column of that name already exist", 32 | Pattern.CASE_INSENSITIVE); 33 | 34 | public static ResultSet execute(Session session, String stmt) throws InterruptedException { 35 | Matcher matcher = DDL_PATTERN.matcher(stmt); 36 | if (matcher.matches()) { 37 | System.out.println("Executing DDL: " + stmt); 38 | Metadata metadata = session.getCluster().getMetadata(); 39 | ResultSet rs = null; 40 | try { 41 | rs = session.execute(stmt); 42 | } catch (InvalidQueryException ex) { 43 | String msg = ex.getMessage(); 44 | // THIS IS NOT RELIABLE! 45 | if (!(COL_EXISTS.matcher(msg).find() || 46 | COL_DOESNT_EXIST.matcher(msg).find() || 47 | COL_DOESNT_EXIST_RENAME.matcher(msg).find() || 48 | COL_EXISTS_RENAME.matcher(msg).find())) { 49 | throw ex; 50 | } 51 | System.out.println("Skipping error: " + msg + ", for query: " + stmt); 52 | } 53 | if (rs != null && !rs.getExecutionInfo().isSchemaInAgreement()) { 54 | while (!metadata.checkSchemaAgreement()) { 55 | System.out.println("Schema isn't in agreement, sleep 1 second..."); 56 | Thread.sleep(1000); 57 | } 58 | } 59 | return rs; 60 | } else { 61 | return session.execute(stmt); 62 | } 63 | } 64 | 65 | public static void main(String[] args) throws InterruptedException { 66 | String server = System.getProperty("contactPoint", "127.0.0.1"); 67 | 68 | try(Cluster cluster = Cluster.builder().addContactPoint(server).build(); 69 | Session session = cluster.connect()) { 70 | 71 | String[] commands = {"drop keyspace if exists whtest;", 72 | "create keyspace whtest WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};", 73 | "create table whtest.whtest(id int primary key, t text);", 74 | "alter table whtest.whtest add t int;", 75 | "alter table whtest.whtest drop abcd;", 76 | "alter table whtest.whtest rename abcd to abc;", 77 | "alter table whtest.whtest rename id to t;", 78 | "create table whtest.whtest2(id int primary key, t text);"}; 79 | 80 | Metadata metadata = cluster.getMetadata(); 81 | for (int i = 0; i < commands.length; i++) { 82 | System.out.println("Executing '" + commands[i] + "'"); 83 | execute(session, commands[i]); 84 | } 85 | // just to be sure, and to show that it could be done via Metadata as well 86 | for (int i = 0; i < 5; i++) { 87 | session.execute(String.format("insert into whtest.whtest(id, t) values(%d, 'test %d');", i, i)); 88 | } 89 | 90 | ResultSet rs = session.execute("select count(*) from whtest.whtest;"); 91 | Row row = rs.one(); 92 | System.out.println("There are " + row.getLong(0) + " rows in the whtest table..."); 93 | 94 | } 95 | } 96 | 97 | } 98 | -------------------------------------------------------------------------------- /driver-4.x/src/main/java/com/datastax/alexott/demos/DCDetectingLBPolicy.java: -------------------------------------------------------------------------------- 1 | package com.datastax.alexott.demos; 2 | 3 | import com.datastax.oss.driver.api.core.CqlSession; 4 | import com.datastax.oss.driver.api.core.config.DefaultDriverOption; 5 | import com.datastax.oss.driver.api.core.config.DriverConfigLoader; 6 | import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; 7 | import com.datastax.oss.driver.api.core.context.DriverContext; 8 | import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; 9 | import com.datastax.oss.driver.api.core.metadata.Node; 10 | import com.datastax.oss.driver.api.core.session.Request; 11 | import com.datastax.oss.driver.api.core.session.Session; 12 | import edu.umd.cs.findbugs.annotations.NonNull; 13 | import edu.umd.cs.findbugs.annotations.Nullable; 14 | 15 | import java.net.InetSocketAddress; 16 | import java.util.Arrays; 17 | import java.util.HashMap; 18 | import java.util.Map; 19 | import java.util.Queue; 20 | import java.util.Set; 21 | import java.util.TreeSet; 22 | import java.util.UUID; 23 | import java.util.stream.Collectors; 24 | 25 | /** 26 | * This is just an exercise, before discovering of the JAVA-2459 that adds DcInferringLoadBalancingPolicy - use it instead 27 | */ 28 | public class DCDetectingLBPolicy implements LoadBalancingPolicy { 29 | Map nodesMap; 30 | 31 | public DCDetectingLBPolicy(@NonNull DriverContext context, @NonNull String profileName) { 32 | } 33 | 34 | @Override 35 | public void init(@NonNull Map map, @NonNull DistanceReporter distanceReporter) { 36 | nodesMap = map; 37 | } 38 | 39 | public String getDCName(String... contactPoints) { 40 | if (nodesMap == null) { 41 | throw new RuntimeException("DCDetectingLBPolicy wasn't initialized yet!"); 42 | } 43 | Map nodesToDcs = new HashMap<>(nodesMap.size()); 44 | for (Node node: nodesMap.values()) { 45 | if (node.getBroadcastRpcAddress().isPresent()) { 46 | InetSocketAddress address = node.getBroadcastRpcAddress().get(); 47 | nodesToDcs.put(address.getHostString(), node.getDatacenter()); 48 | nodesToDcs.put(address.getAddress().getHostAddress(), node.getDatacenter()); 49 | } else if (node.getBroadcastAddress().isPresent()) { 50 | InetSocketAddress address = node.getBroadcastAddress().get(); 51 | nodesToDcs.put(address.getHostString(), node.getDatacenter()); 52 | nodesToDcs.put(address.getAddress().getHostAddress(), node.getDatacenter()); 53 | } else if (node.getListenAddress().isPresent()) { 54 | InetSocketAddress address = node.getListenAddress().get(); 55 | nodesToDcs.put(address.getHostString(), node.getDatacenter()); 56 | nodesToDcs.put(address.getAddress().getHostAddress(), node.getDatacenter()); 57 | } else { 58 | throw new RuntimeException("Can't get hostname or IP for a node " + node.getHostId()); 59 | } 60 | } 61 | 62 | Set dcs = new TreeSet<>(); 63 | for (String cp: contactPoints) { 64 | String dc = nodesToDcs.get(cp); 65 | if (dc != null) { 66 | dcs.add(dc); 67 | } 68 | } 69 | if (dcs.size() > 1) { 70 | throw new RuntimeException("Contact points belong to different DCs: " + dcs.stream().collect(Collectors.joining(","))); 71 | } 72 | if (dcs.isEmpty()) { 73 | throw new RuntimeException("Can't detect DC from the contact points provided"); 74 | } 75 | 76 | return dcs.iterator().next(); 77 | } 78 | 79 | public String getDCName(final String contactPoints) { 80 | return getDCName(contactPoints.split(",")); 81 | } 82 | 83 | @NonNull 84 | @Override 85 | public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { 86 | return null; 87 | } 88 | 89 | @Override 90 | public void onAdd(@NonNull Node node) { 91 | } 92 | 93 | @Override 94 | public void onUp(@NonNull Node node) { 95 | } 96 | 97 | @Override 98 | public void onDown(@NonNull Node node) { 99 | } 100 | 101 | @Override 102 | public void onRemove(@NonNull Node node) { 103 | } 104 | 105 | @Override 106 | public void close() { 107 | } 108 | 109 | public static String detectDcName(String... contactPoints) { 110 | ProgrammaticDriverConfigLoaderBuilder configBuilder = DriverConfigLoader.programmaticBuilder(); 111 | configBuilder.withClass(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, DCDetectingLBPolicy.class); 112 | 113 | DriverConfigLoader loader = configBuilder.endProfile().build(); 114 | try(CqlSession session = CqlSession.builder() 115 | .addContactPoints(Arrays.stream(contactPoints) 116 | .map(x -> new InetSocketAddress(x, 9042)).collect(Collectors.toList())) 117 | .withConfigLoader(loader) 118 | .build()) { 119 | 120 | DCDetectingLBPolicy lbp = (DCDetectingLBPolicy) session.getContext().getLoadBalancingPolicies().values().iterator().next(); 121 | 122 | return lbp.getDCName(contactPoints); 123 | } 124 | } 125 | 126 | public static String detectDcName(String contactPoints) { 127 | return detectDcName(contactPoints.split(",")); 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /cassandra-join-spark/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | com.datastax.alexott.demos 6 | cassandra-join-spark 7 | 0.0.1 8 | jar 9 | 10 | scc-2.5-joins 11 | http://maven.apache.org 12 | 13 | 14 | UTF-8 15 | 2.11.12 16 | 2.4.6 17 | 2.11 18 | 2.5.1 19 | 1.8 20 | 2.3.0 21 | 22 | 23 | 24 | 25 | com.datastax.spark 26 | spark-cassandra-connector_${spark.scala.version} 27 | ${scc.version} 28 | 29 | 30 | org.apache.spark 31 | spark-sql_${spark.scala.version} 32 | ${spark.version} 33 | provided 34 | 35 | 36 | org.apache.spark 37 | spark-sql-kafka-0-10_${spark.scala.version} 38 | ${spark.version} 39 | 40 | 41 | org.apache.spark 42 | spark-core_${spark.scala.version} 43 | ${spark.version} 44 | provided 45 | 46 | 47 | org.apache.spark 48 | spark-streaming_${spark.scala.version} 49 | ${spark.version} 50 | provided 51 | 52 | 53 | org.apache.spark 54 | spark-streaming-kafka-0-10_${spark.scala.version} 55 | ${spark.version} 56 | 57 | 58 | org.scala-lang.modules 59 | scala-java8-compat_${spark.scala.version} 60 | 0.9.0 61 | 62 | 63 | 64 | org.apache.kafka 65 | kafka-clients 66 | ${kafka.version} 67 | 68 | 69 | 70 | org.apache.kafka 71 | connect-json 72 | ${kafka.version} 73 | 74 | 75 | org.apache.logging.log4j 76 | log4j-api 77 | 2.7 78 | 79 | 80 | org.apache.logging.log4j 81 | log4j-core 82 | 2.7 83 | 84 | 85 | org.apache.logging.log4j 86 | log4j-slf4j-impl 87 | 2.7 88 | 89 | 90 | com.opencsv 91 | opencsv 92 | 3.10 93 | 94 | 95 | 96 | 97 | 98 | 99 | maven-compiler-plugin 100 | 3.8.1 101 | 102 | ${java.version} 103 | ${java.version} 104 | true 105 | 106 | 107 | 108 | net.alchim31.maven 109 | scala-maven-plugin 110 | 4.3.0 111 | 112 | 113 | process-sources 114 | 115 | compile 116 | testCompile 117 | 118 | 119 | ${scala.version} 120 | 121 | 122 | 123 | 124 | 125 | org.apache.maven.plugins 126 | maven-assembly-plugin 127 | 3.2.0 128 | 129 | 130 | jar-with-dependencies 131 | 132 | 133 | 134 | 135 | package 136 | 137 | single 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /driver-1.x/src/main/scala/com/datastax/alexott/ObjMapperTest.scala: -------------------------------------------------------------------------------- 1 | import java.time.Instant 2 | 3 | import com.datastax.driver.core.Cluster 4 | import com.datastax.driver.mapping.{MappingManager, Result} 5 | import com.datastax.driver.mapping.annotations.{Accessor, ClusteringColumn, Column, Field, Param, PartitionKey, Query, Table, UDT} 6 | 7 | import scala.annotation.meta.field 8 | import scala.collection.JavaConverters 9 | 10 | // tables and data definition... 11 | // create table test.scala_test(id int primary key, t text, tm timestamp); 12 | // insert into test.scala_test(id,t,tm) values (1,'t1','2018-11-07T00:00:00Z') ; 13 | 14 | // create table test.scala_test_complex(p1 int, p2 int, c1 int, c2 int, t text, tm timestamp, primary key ((p1,p2), c1, c2)); 15 | // insert into test.scala_test_complex(p1, p2, c1, c2, t,tm) values (0,1,0,1,'t1','2018-11-07T00:00:00Z') ; 16 | // insert into test.scala_test_complex(p1, p2, c1, c2, t,tm) values (0,1,1,1,'t1','2018-11-08T10:00:00Z') ; 17 | 18 | // create type test.scala_udt(id int, t text); 19 | // create table test.scala_test_udt(id int primary key, udt frozen); 20 | // insert into test.scala_test_udt (id, udt) values (1, {id: 1, t: 't1'}); 21 | 22 | 23 | @Table(name = "scala_test") 24 | class TableObject { 25 | @PartitionKey 26 | var id: Integer = 0; 27 | var t: String = ""; 28 | var tm: java.util.Date = new java.util.Date(); 29 | 30 | def this(idval: Integer, tval: String, tmval: java.util.Date) = { 31 | this(); 32 | this.id = idval; 33 | this.t = tval; 34 | this.tm = tmval; 35 | } 36 | 37 | override def toString: String = { 38 | "{id=" + id + ", t='" + t + "', tm='" + tm + "'}" 39 | } 40 | } 41 | 42 | @Table(name = "scala_test") 43 | class TableObjectImmutable(@PartitionKey id: Integer, t: String, tm: java.util.Date) { 44 | override def toString: String = { 45 | "{id=" + id + ", t='" + t + "', tm='" + tm + "'}" 46 | } 47 | } 48 | 49 | 50 | @Table(name = "scala_test") 51 | case class TableObjectCaseClass(@(PartitionKey @field) id: Integer, t: String, tm: java.util.Date) { 52 | def this() { 53 | this(0, "", new java.util.Date()) 54 | } 55 | } 56 | 57 | // case class with renamed field 58 | @Table(name = "scala_test") 59 | case class TableObjectCaseClassRenamed(@(PartitionKey @field) id: Integer, 60 | @(Column @field)(name = "t") text: String, tm: java.util.Date) { 61 | def this() { 62 | this(0, "", new java.util.Date()) 63 | } 64 | } 65 | 66 | @Table(name = "scala_test_complex", keyspace = "test") 67 | case class TableObjectCaseClassClustered(@(PartitionKey @field)(value = 0) p1: Integer, 68 | @(PartitionKey @field)(value = 1) p2: Integer, 69 | @(ClusteringColumn @field)(value = 0) c1: java.lang.Integer, 70 | @(ClusteringColumn @field)(value = 1) c2: java.lang.Integer, 71 | t: String, 72 | tm: java.util.Date) { 73 | def this() { 74 | this(0, 1, 0, 1, "", new java.util.Date()) 75 | } 76 | } 77 | 78 | @UDT(name = "scala_udt") 79 | case class UdtCaseClass(id: Integer, @(Field @field)(name = "t") text: String) { 80 | def this() { 81 | this(0, "") 82 | } 83 | } 84 | 85 | @Table(name = "scala_test_udt") 86 | case class TableObjectCaseClassWithUDT(@(PartitionKey @field) id: Integer, 87 | udt: UdtCaseClass) { 88 | def this() { 89 | this(0, UdtCaseClass(0, "")) 90 | } 91 | } 92 | 93 | 94 | @Accessor 95 | trait ObjectAccessor { 96 | @Query("SELECT * from scala_test_complex where p1 = :p1 and p2 = :p2") 97 | def getByPartKey(@Param p1: Integer, @Param p2: Integer): Result[TableObjectCaseClassClustered] 98 | 99 | @Query("DELETE from scala_test_complex where p1 = :p1 and p2 = :p2") 100 | def deleteByPartKey(@Param p1: Integer, @Param p2: Integer) 101 | } 102 | 103 | 104 | object ObjMapperTest { 105 | 106 | def main(args: Array[String]): Unit = { 107 | 108 | val cluster = Cluster.builder().addContactPoint("127.0.0.1").build(); 109 | val session = cluster.connect("test") 110 | val manager = new MappingManager(session) 111 | 112 | val mapperClass = manager.mapper(classOf[TableObject]) 113 | val objClass = mapperClass.get(new Integer(1)) 114 | println("Obj(1)='" + objClass + "'") 115 | mapperClass.save(new TableObject(2, "t2", java.util.Date.from(Instant.now()))) 116 | val objClass2 = mapperClass.get(new Integer(2)) 117 | println("Obj(2)='" + objClass2 + "'") 118 | mapperClass.delete(objClass2) 119 | 120 | val mapperClassImmutable = manager.mapper(classOf[TableObject]) 121 | val objClassImm = mapperClassImmutable.get(new Integer(1)) 122 | println("ObjImm(1)='" + objClassImm + "'") 123 | 124 | val mapperCaseClass = manager.mapper(classOf[TableObjectCaseClass]) 125 | val objCaseClass = mapperCaseClass.get(new Integer(1)) 126 | println("Obj(1)='" + objCaseClass + "'") 127 | 128 | val mapperCaseClassRenamed = manager.mapper(classOf[TableObjectCaseClassRenamed]) 129 | val objCaseClassRenamed = mapperCaseClassRenamed.get(new Integer(1)) 130 | println("Obj(1)='" + objCaseClassRenamed + "'") 131 | 132 | mapperCaseClassRenamed.save(TableObjectCaseClassRenamed(2, "test 2", new java.util.Date())) 133 | 134 | val mapperCaseClassClustered = manager.mapper(classOf[TableObjectCaseClassClustered]) 135 | val objCaseClass2 = mapperCaseClassClustered.get(new Integer(0), new Integer(1), 136 | new Integer(0), new Integer(1)) 137 | println("Obj2((0,1),0,1)='" + objCaseClass2 + "'") 138 | 139 | val mapperForUdtCaseClass = manager.mapper(classOf[TableObjectCaseClassWithUDT]) 140 | val objectCaseClassWithUDT = mapperForUdtCaseClass.get(new Integer(1)) 141 | println("ObjWithUdt(1)='" + objectCaseClassWithUDT + "'") 142 | 143 | val accessor = manager.createAccessor(classOf[ObjectAccessor]) 144 | val rs = accessor.getByPartKey(0, 1) 145 | for (r <- JavaConverters.asScalaIteratorConverter(rs.iterator()).asScala) { 146 | println("r=" + r) 147 | } 148 | 149 | accessor.deleteByPartKey(0,0) 150 | 151 | session.close() 152 | cluster.close() 153 | 154 | } 155 | 156 | } --------------------------------------------------------------------------------