├── exsto ├── .gitignore ├── defaults.cfg ├── adhoc.sh ├── regex.py ├── filter.py ├── etl.py ├── parse.py ├── test │ ├── simple.json │ ├── forward.json │ ├── replied.json │ └── ipad.json ├── scrape.py ├── dbc │ ├── 0.prep_data.scala │ ├── 4.SocialGraph.scala │ ├── 3.Meetups.py │ ├── 2.TextRank.scala │ └── 1.ETL_python.py ├── graph2.scala ├── adhoc.py ├── graph1.scala ├── README.md ├── TextRank.py ├── ETL.md └── exsto.py ├── data └── join │ ├── clk.tsv │ └── reg.tsv ├── README.md ├── src └── main │ ├── python │ ├── nwc.py │ └── snwc.py │ └── scala │ └── com │ └── databricks │ └── apps │ └── graphx │ └── sssp.scala ├── textrank ├── mihalcea.json ├── mihalcea.txt └── TextRank.scala ├── .gitignore └── LICENSE.md /exsto/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *~ 3 | *.parquet/ 4 | parsed/ 5 | data/ 6 | -------------------------------------------------------------------------------- /data/join/clk.tsv: -------------------------------------------------------------------------------- 1 | 2014-03-04 15dfb8e6cc4111e3a5bb600308919594 11 2 | 2014-03-06 81da510acc4111e387f3600308919594 61 -------------------------------------------------------------------------------- /data/join/reg.tsv: -------------------------------------------------------------------------------- 1 | 2014-03-02 15dfb8e6cc4111e3a5bb600308919594 1 33.6599436237 -117.958125229 2 | 2014-03-04 81da510acc4111e387f3600308919594 2 33.8570099635 -117.855744398 -------------------------------------------------------------------------------- /exsto/defaults.cfg: -------------------------------------------------------------------------------- 1 | [scraper] 2 | iterations: 2500 3 | nap_time: 2 4 | base_url: http://mail-archives.apache.org 5 | start_url: /mod_mbox/spark-user/201503.mbox/<1427773728882-22311.post%40n3.nabble.com> 6 | 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Spark Exercises 2 | =============== 3 | 4 | Coding exercises for [Apache Spark workshops by Databricks](http://databricks.com/spark-training) 5 | 6 | Authors: 7 | * [Paco Nathan](http://liber118.com/pxn/) 8 | -------------------------------------------------------------------------------- /exsto/adhoc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | export SPARK_HOME=~/opt/spark 4 | 5 | rm -rf reply_edge.parquet reply_node.parquet 6 | rm -rf graf_edge.parquet graf_node.parquet 7 | 8 | SPARK_LOCAL_IP=127.0.0.1 \ 9 | $SPARK_HOME/bin/spark-submit ./adhoc.py -------------------------------------------------------------------------------- /exsto/regex.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | PAT_PUNCT = re.compile(r'^\W+$') 4 | PAT_SPACE = re.compile(r'\S+$') 5 | 6 | ex = [ 7 | '________________________________', 8 | '.' 9 | ] 10 | 11 | for s in ex: 12 | print s 13 | print "reg", PAT_PUNCT.match(s) 14 | print "foo", PAT_SPACE.match(s) 15 | -------------------------------------------------------------------------------- /exsto/filter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import exsto 5 | import json 6 | import os 7 | import sys 8 | 9 | 10 | def main (): 11 | path = sys.argv[1] 12 | 13 | if os.path.isdir(path): 14 | exsto.test_filter(path) 15 | else: 16 | with open(path, 'r') as f: 17 | for line in f.readlines(): 18 | meta = json.loads(line) 19 | print exsto.pretty_print(exsto.filter_quotes(meta["text"])) 20 | 21 | 22 | if __name__ == "__main__": 23 | main() 24 | -------------------------------------------------------------------------------- /src/main/python/nwc.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from pyspark import SparkContext 4 | from pyspark.streaming import StreamingContext 5 | 6 | sc = SparkContext(appName="PyStreamNWC", master="local[*]") 7 | ssc = StreamingContext(sc, 5) 8 | 9 | lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2])) 10 | 11 | counts = lines.flatMap(lambda line: line.split(" ")) \ 12 | .map(lambda word: (word, 1)) \ 13 | .reduceByKey(lambda a, b: a+b) 14 | 15 | counts.pprint() 16 | 17 | ssc.start() 18 | ssc.awaitTermination() 19 | -------------------------------------------------------------------------------- /textrank/mihalcea.json: -------------------------------------------------------------------------------- 1 | ["Compatibility of systems of linear constraints over the set of natural numbers.\nCriteria of compatibility of a system of linear Diophantine equations, strict\ninequations, and nonstrict inequations are considered. Upper bounds for\ncomponents of a minimal set of solutions and algorithms of construction of\nminimal generating sets of solutions for all types of systems are given. \nThese criteria and the corresponding algorithms for constructing a minimal\nsupporting set of solutions can be used in solving all the considered types\nsystems and systems of mixed types."] 2 | -------------------------------------------------------------------------------- /textrank/mihalcea.txt: -------------------------------------------------------------------------------- 1 | Compatibility of systems of linear constraints over the set of natural numbers. 2 | Criteria of compatibility of a system of linear Diophantine equations, strict 3 | inequations, and nonstrict inequations are considered. Upper bounds for 4 | components of a minimal set of solutions and algorithms of construction of 5 | minimal generating sets of solutions for all types of systems are given. 6 | These criteria and the corresponding algorithms for constructing a minimal 7 | supporting set of solutions can be used in solving all the considered types 8 | systems and systems of mixed types. 9 | -------------------------------------------------------------------------------- /exsto/etl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import exsto 5 | import json 6 | import sys 7 | 8 | DEBUG = False # True 9 | 10 | 11 | def main (): 12 | global DEBUG 13 | path = sys.argv[1] 14 | 15 | with open(path, 'r') as f: 16 | for line in f.readlines(): 17 | meta = json.loads(line) 18 | 19 | for graf_text in exsto.filter_quotes(meta["text"]): 20 | try: 21 | for sent in exsto.parse_graf(meta["id"], graf_text): 22 | print exsto.pretty_print(sent) 23 | except (IndexError) as e: 24 | if DEBUG: 25 | print "IndexError: " + str(e) 26 | print graf_text 27 | 28 | if __name__ == "__main__": 29 | main() 30 | -------------------------------------------------------------------------------- /exsto/parse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import exsto 5 | import json 6 | import sys 7 | 8 | 9 | DEBUG = False # True 10 | 11 | def main(): 12 | global DEBUG 13 | 14 | path = sys.argv[1] 15 | 16 | with open(path, 'r') as f: 17 | for line in f.readlines(): 18 | meta = json.loads(line) 19 | base = 0 20 | 21 | for graf_text in exsto.filter_quotes(meta["text"]): 22 | if DEBUG: 23 | print graf_text 24 | 25 | grafs, new_base = exsto.parse_graf(meta["id"], graf_text, base) 26 | base = new_base 27 | 28 | for graf in grafs: 29 | print exsto.pretty_print(graf) 30 | 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /src/main/python/snwc.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from pyspark import SparkContext 4 | from pyspark.streaming import StreamingContext 5 | 6 | def updateFunc (new_values, last_sum): 7 | return sum(new_values) + (last_sum or 0) 8 | 9 | sc = SparkContext(appName="PyStreamNWC", master="local[*]") 10 | ssc = StreamingContext(sc, 5) 11 | ssc.checkpoint("checkpoint") 12 | 13 | lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2])) 14 | 15 | counts = lines.flatMap(lambda line: line.split(" ")) \ 16 | .map(lambda word: (word, 1)) \ 17 | .updateStateByKey(updateFunc) \ 18 | .transform(lambda x: x.sortByKey()) 19 | 20 | counts.pprint() 21 | 22 | ssc.start() 23 | ssc.awaitTermination() 24 | -------------------------------------------------------------------------------- /exsto/test/simple.json: -------------------------------------------------------------------------------- 1 | {"date": "2014-11-01T00:12:02+00:00", "id": "09623D4D-A3B5-4EF4-BC13-BEE12F7F4954", "next_thread": "CAPX+=eFM5n5xq=CJALMjsYPnR+duvGaYaoZVCNaLzZ6qOHnU2g", "next_url": "http://mail-archives.apache.org/mod_mbox/spark-user/201411.mbox/%3cCAPX+=eFM5n5xq=CJALMjsYPnR+duvGaYaoZVCNaLzZ6qOHnU2g@mail.gmail.com%3e", "prev_thread": "", "sender": "Social Marketing ", "subject": "Spark Meetup in Singapore", "text": "\nDear Sir/Madam,\n\nWe want to become an organiser of Singapore Meetup to promote the regional SPARK and big data\ncommunity in ASEAN area.\n\nMy name is Songtao, I am a big data consultant in Singapore and have great passion for Spark\ntechnologies.\n\n\n\nThanks,\nSongtao\n---------------------------------------------------------------------\nTo unsubscribe, e-mail: user-unsubscribe@spark.apache.org\nFor additional commands, e-mail: user-help@spark.apache.org\n\n\n"} 2 | -------------------------------------------------------------------------------- /exsto/scrape.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import ConfigParser 5 | import exsto 6 | import sys 7 | import time 8 | 9 | 10 | def main (): 11 | config = ConfigParser.ConfigParser() 12 | config.read("defaults.cfg") 13 | 14 | iterations = config.getint("scraper", "iterations") 15 | nap_time = config.getint("scraper", "nap_time") 16 | base_url = config.get("scraper", "base_url") 17 | url = base_url + config.get("scraper", "start_url") 18 | 19 | with open(sys.argv[1], 'w') as f: 20 | for i in xrange(0, iterations): 21 | if len(url) < 1: 22 | break 23 | else: 24 | root = exsto.scrape_url(url) 25 | meta = exsto.parse_email(root, base_url) 26 | 27 | f.write(exsto.pretty_print(meta)) 28 | f.write('\n') 29 | 30 | url = meta["next_url"] 31 | time.sleep(nap_time) 32 | 33 | 34 | if __name__ == "__main__": 35 | main() 36 | -------------------------------------------------------------------------------- /exsto/dbc/0.prep_data.scala: -------------------------------------------------------------------------------- 1 | // Databricks notebook source exported at Mon, 9 Feb 2015 04:37:26 UTC 2 | // MAGIC %md ## Set up your S3 credentials 3 | // MAGIC NB: URL encode since AWS SecretKey can contain "/" and other characters 4 | 5 | // COMMAND ---------- 6 | 7 | dbutils.fs.unmount(s"/$MountName") 8 | 9 | // COMMAND ---------- 10 | 11 | import java.net.URLEncoder 12 | val AccessKey = "YOUR_ACCESS_KEY" 13 | val SecretKey = URLEncoder.encode("YOUR_SECRET_KEY", "UTF-8") 14 | val AwsBucketName = "paco.dbfs.public" 15 | val MountName = "mnt/paco" 16 | 17 | // COMMAND ---------- 18 | 19 | // MAGIC %md ## Mount the S3 bucket 20 | 21 | // COMMAND ---------- 22 | 23 | dbutils.fs.mount(s"s3n://$AccessKey:$SecretKey@$AwsBucketName", s"/$MountName") 24 | 25 | // COMMAND ---------- 26 | 27 | // MAGIC %md ## List the mounted contents 28 | 29 | // COMMAND ---------- 30 | 31 | display(dbutils.fs.ls(s"/$MountName/exsto/parsed")) 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | twitter4j.properties 2 | data/*.parquet 3 | *~ 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | env/ 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | 47 | # Translations 48 | *.mo 49 | *.pot 50 | 51 | # Django stuff: 52 | *.log 53 | 54 | # Sphinx documentation 55 | docs/_build/ 56 | 57 | # PyBuilder 58 | target/ 59 | -------------------------------------------------------------------------------- /src/main/scala/com/databricks/apps/graphx/sssp.scala: -------------------------------------------------------------------------------- 1 | // SSSP impl in Graphx using Pregel 2 | //https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm 3 | //http://spark.apache.org/docs/latest/graphx-programming-guide.html#pregel-api 4 | //http://stackoverflow.com/questions/23700124/how-to-get-sssp-actual-path-by-apache-spark-graphx 5 | 6 | import org.apache.spark.graphx._ 7 | import org.apache.spark.graphx.util.GraphGenerators 8 | 9 | val graph = GraphGenerators.logNormalGraph(sc, numVertices = 5, numEParts = sc.defaultParallelism, mu = 4.0, sigma = 1.3).mapEdges(e => e.attr.toDouble) 10 | graph.edges.foreach(println) 11 | 12 | // initialize all vertices except the root to have distance infinity 13 | val sourceId: VertexId = 0 14 | val initialGraph : Graph[(Double, List[VertexId]), Double] = graph.mapVertices((id, _) => if (id == sourceId) (0.0, List[VertexId](sourceId)) else (Double.PositiveInfinity, List[VertexId]())) 15 | 16 | val sssp = initialGraph.pregel((Double.PositiveInfinity, List[VertexId]()), Int.MaxValue, EdgeDirection.Out)( 17 | // vertex program 18 | (id, dist, newDist) => if (dist._1 < newDist._1) dist else newDist, 19 | 20 | // send message 21 | triplet => { 22 | if (triplet.srcAttr._1 < triplet.dstAttr._1 - triplet.attr ) { 23 | Iterator((triplet.dstId, (triplet.srcAttr._1 + triplet.attr , triplet.srcAttr._2 :+ triplet.dstId))) 24 | } else { 25 | Iterator.empty 26 | } 27 | }, 28 | 29 | // merge message 30 | (a, b) => if (a._1 < b._1) a else b) 31 | 32 | println(sssp.vertices.collect.mkString("\n") 33 | ) 34 | -------------------------------------------------------------------------------- /exsto/test/forward.json: -------------------------------------------------------------------------------- 1 | {"date": "2014-11-01T00:30:50+00:00", "id": "CALk5AbqSEhiDuD3PBTaX1GVKEr2ojYjACj9WyHvcd0qt2scMQw", "next_thread": "CALrNVjUR6eqofD-xDc61a14rp1ojWjQ=c1AqKx685iJc2V2YxQ", "next_url": "http://mail-archives.apache.org/mod_mbox/spark-user/201411.mbox/%3cCALrNVjUR6eqofD-xDc61a14rp1ojWjQ=c1AqKx685iJc2V2YxQ@mail.gmail.com%3e", "prev_thread": "CAPX+=eFM5n5xq=CJALMjsYPnR+duvGaYaoZVCNaLzZ6qOHnU2g", "sender": "Kevin Paul ", "subject": "Some of the statistics function in SparkSQL is very slow", "text": "\nHi all, some of the statistics function that I tried in HiveContext is\nvery slow, notably percentile, var_sampl, the symptom is same as what\nI describe in my previous email, when I do schemaRDD.collect on the\nresulting RDD, the shuffle size is around 1000GB, could I do anything\nelse to speed up this?\n\nThanks,\nKevin Paul\n---------- Forwarded message ----------\nFrom: Kevin Paul \nDate: Sat, Oct 25, 2014 at 8:48 PM\nSubject: HiveSQL percentile is query slow\nTo: user \n\n\nHi all, I tried to run the following sql command in HiveContext with\nmy table loaded into memory:\n SELECT percentile(myColumn, array(0.1, 0.5)) FROM myTable\n\nThe query took more than 5 minutes to complete, but the query like\n SELECT min(myColumn), max(myColumn) FROM myTable\nonly took around 10 seconds to run.\n\nMy Spark version is 1.2.0 SNAPSHOT, the cluster is 10 slaves, and the\ndataset is 10G, and I'm running on Yarn-client mode.\nThe query took two stages to run:\n 1st. is mapPartitions at Exchanged.scala:86 with duration 9s\n 2nd. is collect at SparkPlan.scala: 85 with duration 5.3 min\n\nI attach the Summary Metrics for the collect task here\nThanks,\nKevin Paul\n\n"} 2 | -------------------------------------------------------------------------------- /exsto/graph2.scala: -------------------------------------------------------------------------------- 1 | import org.apache.spark.graphx._ 2 | import org.apache.spark.rdd.RDD 3 | 4 | val sqlCtx = new org.apache.spark.sql.SQLContext(sc) 5 | import sqlCtx._ 6 | 7 | val edge = sqlCtx.parquetFile("reply_edge.parquet") 8 | edge.registerTempTable("edge") 9 | 10 | val node = sqlCtx.parquetFile("reply_node.parquet") 11 | node.registerTempTable("node") 12 | 13 | edge.schemaString 14 | node.schemaString 15 | 16 | 17 | val sql = "SELECT id, sender FROM node" 18 | 19 | val n = sqlCtx.sql(sql).distinct() 20 | val nodes: RDD[(Long, String)] = n.map{ p => 21 | (p(0).asInstanceOf[Long], p(1).asInstanceOf[String]) 22 | } 23 | nodes.collect() 24 | 25 | 26 | val sql = "SELECT replier, sender, num FROM edge" 27 | 28 | val e = sqlCtx.sql(sql).distinct() 29 | val edges: RDD[Edge[Int]] = e.map{ p => 30 | Edge(p(0).asInstanceOf[Long], p(1).asInstanceOf[Long], p(2).asInstanceOf[Int]) 31 | } 32 | edges.collect() 33 | 34 | 35 | // run graph analytics 36 | 37 | val g: Graph[String, Int] = Graph(nodes, edges) 38 | val r = g.pageRank(0.0001).vertices 39 | 40 | r.join(nodes).sortBy(_._2._1, ascending=false).foreach(println) 41 | 42 | // define a reduce operation to compute the highest degree vertex 43 | 44 | def max(a: (VertexId, Int), b: (VertexId, Int)): (VertexId, Int) = { 45 | if (a._2 > b._2) a else b 46 | } 47 | 48 | // compute the max degrees 49 | 50 | val maxInDegree: (VertexId, Int) = g.inDegrees.reduce(max) 51 | val maxOutDegree: (VertexId, Int) = g.outDegrees.reduce(max) 52 | val maxDegrees: (VertexId, Int) = g.degrees.reduce(max) 53 | 54 | val node_map: scala.collection.Map[Long, String] = node. 55 | map(p => (p(0).asInstanceOf[Long], p(1).asInstanceOf[String])).collectAsMap() 56 | 57 | // connected components 58 | 59 | val scc = g.stronglyConnectedComponents(10).vertices 60 | node.join(scc).foreach(println) 61 | -------------------------------------------------------------------------------- /exsto/test/replied.json: -------------------------------------------------------------------------------- 1 | {"date": "2014-11-01T01:20:17+00:00", "id": "CALrNVjUR6eqofD-xDc61a14rp1ojWjQ=c1AqKx685iJc2V2YxQ", "next_thread": "CALrNVjVQZwRxOMiEyrrazyZGNRjTc14sX=EtnQJHQANN3=86ag", "next_url": "http://mail-archives.apache.org/mod_mbox/spark-user/201411.mbox/%3cCALrNVjVQZwRxOMiEyrrazyZGNRjTc14sX=EtnQJHQANN3=86ag@mail.gmail.com%3e", "prev_thread": "CALk5AbqSEhiDuD3PBTaX1GVKEr2ojYjACj9WyHvcd0qt2scMQw", "sender": "Soumya Simanta ", "subject": "Re: SparkSQL performance", "text": "\nI agree. My personal experience with Spark core is that it performs really\nwell once you tune it properly.\n\nAs far I understand SparkSQL under the hood performs many of these\noptimizations (order of Spark operations) and uses a more efficient storage\nformat. Is this assumption correct?\n\nHas anyone done any comparison of SparkSQL with Impala ? The fact that many\nof the queries don't even finish in the benchmark is quite surprising and\nhard to believe.\n\nA few months ago there were a few emails about Spark not being able to\nhandle large volumes (TBs) of data. That myth was busted recently when the\nfolks at Databricks published their sorting record results.\n\n\nThanks\n-Soumya\n\n\n\n\n\n\nOn Fri, Oct 31, 2014 at 7:35 PM, Du Li wrote:\n\n> We have seen all kinds of results published that often contradict each\n> other. My take is that the authors often know more tricks about how to tune\n> their own/familiar products than the others. So the product on focus is\n> tuned for ideal performance while the competitors are not. The authors are\n> not necessarily biased but as a consequence the results are.\n>\n> Ideally it\u2019s critical for the user community to be informed of all the\n> in-depth tuning tricks of all products. However, realistically, there is a\n> big gap in terms of documentation. Hope the Spark folks will make a\n> difference. :-)\n>\n> Du\n>\n>\n> From: Soumya Simanta \n> Date: Friday, October 31, 2014 at 4:04 PM\n> To: \"user@spark.apache.org\" \n> Subject: SparkSQL performance\n>\n> I was really surprised to see the results here, esp. SparkSQL \"not\n> completing\"\n> http://www.citusdata.com/blog/86-making-postgresql-scale-hadoop-style\n>\n> I was under the impression that SparkSQL performs really well because it\n> can optimize the RDD operations and load only the columns that are\n> required. This essentially means in most cases SparkSQL should be as fast\n> as Spark is.\n>\n> I would be very interested to hear what others in the group have to say\n> about this.\n>\n> Thanks\n> -Soumya\n>\n>\n>\n\n"} 2 | -------------------------------------------------------------------------------- /exsto/dbc/4.SocialGraph.scala: -------------------------------------------------------------------------------- 1 | // Databricks notebook source exported at Thu, 4 Jun 2015 04:39:21 UTC 2 | // MAGIC %md 3 | // MAGIC # Construct a Social Graph of Sender/Replier 4 | 5 | // COMMAND ---------- 6 | 7 | val nodes = sqlContext.parquetFile("/mnt/paco/exsto/graph/reply_node.parquet") 8 | node.registerTempTable("node") 9 | 10 | val edges = sqlContext.parquetFile("/mnt/paco/exsto/graph/reply_edge.parquet") 11 | edge.registerTempTable("edge") 12 | 13 | // COMMAND ---------- 14 | 15 | import org.apache.spark.graphx._ 16 | import org.apache.spark.graphx._ 17 | import org.apache.spark.rdd.RDD 18 | 19 | 20 | // COMMAND ---------- 21 | 22 | val edgeRDD = edges.map{ p => 23 | Edge(p(0).asInstanceOf[Long], p(1).asInstanceOf[Long], p(2).asInstanceOf[Int]) 24 | }.distinct() 25 | 26 | // COMMAND ---------- 27 | 28 | val nodeRDD = nodes.map{ p => 29 | (p(0).asInstanceOf[Long], p(1).asInstanceOf[String]) 30 | }.distinct() 31 | 32 | 33 | // COMMAND ---------- 34 | 35 | val g: Graph[String, Int] = Graph(nodeRDD, edgeRDD) 36 | 37 | 38 | // COMMAND ---------- 39 | 40 | // MAGIC %md 41 | // MAGIC Now run *PageRank* on this graph to find the top-ranked email repliers 42 | 43 | // COMMAND ---------- 44 | 45 | case class Rank (id: Long, rank: Double) 46 | 47 | val rank_df = g.pageRank(0.0001).vertices.map(x => Rank(x._1.asInstanceOf[Long], x._2)).toDF() 48 | rank_df.registerTempTable("rank") 49 | 50 | // COMMAND ---------- 51 | 52 | // MAGIC %sql 53 | // MAGIC SELECT rank.rank, node.sender 54 | // MAGIC FROM rank JOIN node ON (rank.id = node.id) 55 | // MAGIC ORDER BY rank.rank DESC 56 | // MAGIC LIMIT 20 57 | 58 | // COMMAND ---------- 59 | 60 | // MAGIC %md 61 | // MAGIC Let's get some metrics about the social graph... 62 | 63 | // COMMAND ---------- 64 | 65 | import org.apache.spark.graphx.VertexId 66 | 67 | // define a reduce operation to compute the highest degree vertex 68 | def max(a: (VertexId, Int), b: (VertexId, Int)): (VertexId, Int) = { 69 | if (a._2 > b._2) a else b 70 | } 71 | 72 | // compute the max degrees 73 | val maxInDegree: (VertexId, Int) = g.inDegrees.reduce(max) 74 | val maxOutDegree: (VertexId, Int) = g.outDegrees.reduce(max) 75 | val maxDegrees: (VertexId, Int) = g.degrees.reduce(max) 76 | 77 | // COMMAND ---------- 78 | 79 | // connected components 80 | val cc = g.stronglyConnectedComponents(100).vertices 81 | cc.take(2) 82 | 83 | // COMMAND ---------- 84 | 85 | case class Component (name: String, component: Long) 86 | 87 | val cc_df = nodeRDD.join(cc).map { 88 | case (id, (name, cc)) => Component(name, cc) 89 | }.toDF() 90 | 91 | cc_df.registerTempTable("cc") 92 | 93 | // COMMAND ---------- 94 | 95 | // MAGIC %sql 96 | // MAGIC SELECT component, COUNT(*) AS num 97 | // MAGIC FROM cc 98 | // MAGIC GROUP BY component 99 | // MAGIC ORDER BY num DESC 100 | 101 | // COMMAND ---------- 102 | 103 | 104 | -------------------------------------------------------------------------------- /textrank/TextRank.scala: -------------------------------------------------------------------------------- 1 | import org.apache.spark.graphx._ 2 | import org.apache.spark.rdd.RDD 3 | 4 | val sqlContext = new org.apache.spark.sql.SQLContext(sc) 5 | import sqlContext._ 6 | 7 | // build the graph 8 | 9 | val word = sqlContext.parquetFile("word.parquet") 10 | word.registerTempTable("word") 11 | 12 | val edge = sqlContext.parquetFile("edge.parquet") 13 | edge.registerTempTable("edge") 14 | 15 | sql("SELECT * FROM word").take(5) 16 | sql("SELECT * FROM edge").take(5) 17 | 18 | val n = sql("SELECT id, stem FROM word").distinct() 19 | val nodes: RDD[(Long, String)] = n.map(p => (p(0).asInstanceOf[Long], p(1).asInstanceOf[String])) 20 | 21 | val e = sql("SELECT * FROM edge") 22 | val edges: RDD[Edge[Int]] = e.map(p => Edge(p(0).asInstanceOf[Long], p(1).asInstanceOf[Long], 0)) 23 | 24 | // run PageRank 25 | 26 | val g: Graph[String, Int] = Graph(nodes, edges) 27 | val r = g.pageRank(0.0001).vertices 28 | 29 | r.join(nodes).sortBy(_._2._1, ascending=false).foreach(println) 30 | 31 | // save the ranks 32 | 33 | case class Rank(id: Int, rank: Float) 34 | val rank = r.map(p => Rank(p._1.toInt, p._2.toFloat)) 35 | 36 | rank.registerTempTable("rank") 37 | rank.saveAsParquetFile("rank.parquet") 38 | 39 | ////////////////////////////////////////////////////////////////////// 40 | 41 | def median[T](s: Seq[T])(implicit n: Fractional[T]) = { 42 | import n._ 43 | val (lower, upper) = s.sortWith(_<_).splitAt(s.size / 2) 44 | if (s.size % 2 == 0) (lower.last + upper.head) / fromInt(2) else upper.head 45 | } 46 | 47 | node.schema 48 | edge.schema 49 | rank.schema 50 | 51 | val s = sql("SELECT w.index, w.word, r.rank FROM word w JOIN rank r ON w.id = r.id ORDER BY w.index").collect() 52 | 53 | val min_rank = median(r.map(_._2).collect()) 54 | 55 | var span:List[String] = List() 56 | var last_index = -1 57 | var rank_sum = 0.0 58 | 59 | var phrases:collection.mutable.Map[String, Double] = collection.mutable.Map() 60 | 61 | s.foreach { x => 62 | //println (x) 63 | val index = x.getInt(0) 64 | val word = x.getString(1) 65 | val rank = x.getFloat(2) 66 | 67 | var isStop = false 68 | 69 | // test for break from past 70 | if (span.size > 0 && rank < min_rank) isStop = true 71 | if (span.size > 0 && (index - last_index > 1)) isStop = true 72 | 73 | // clear accumulation 74 | if (isStop) { 75 | val phrase = span.mkString(" ") 76 | phrases += (phrase -> rank_sum) 77 | //println(phrase, rank_sum) 78 | 79 | span = List() 80 | last_index = index 81 | rank_sum = 0.0 82 | } 83 | 84 | // start or append 85 | if (rank >= min_rank) { 86 | span = span :+ word 87 | last_index = index 88 | rank_sum += rank 89 | } 90 | } 91 | 92 | // summarize the text as a list of ranked keyphrases 93 | 94 | var summary = sc.parallelize(phrases.toSeq).distinct().sortBy(_._2, ascending=false).cache() 95 | val min_rank = median(summary.map(_._2).collect().toSeq) 96 | summary = summary.filter(_._2 >= min_rank) 97 | 98 | val sum = summary.map(_._2).reduce(_ + _) 99 | summary = summary.map(x => (x._1, x._2 / sum)) 100 | summary.collect() 101 | -------------------------------------------------------------------------------- /exsto/adhoc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import json 5 | import sys 6 | 7 | 8 | from pyspark import SparkContext 9 | sc = SparkContext(appName="Exsto", master="local[*]") 10 | 11 | from pyspark.sql import SQLContext, Row 12 | sqlCtx = SQLContext(sc) 13 | 14 | msg = sqlCtx.jsonFile("data").cache() 15 | msg.registerTempTable("msg") 16 | 17 | 18 | # Question: Who are the senders? 19 | 20 | who = msg.map(lambda x: x.sender).distinct().zipWithUniqueId() 21 | who.take(10) 22 | 23 | whoMap = who.collectAsMap() 24 | 25 | print "\nsenders" 26 | print len(whoMap) 27 | 28 | 29 | # Question: Who are the top K senders? 30 | 31 | from operator import add 32 | 33 | top_sender = msg.map(lambda x: (x.sender, 1,)).reduceByKey(add) \ 34 | .map(lambda (a, b): (b, a)) \ 35 | .sortByKey(0, 1) \ 36 | .map(lambda (a, b): (b, a)) 37 | 38 | print "\ntop senders" 39 | print top_sender.take(11) 40 | 41 | 42 | # Question: Which are the top K conversations? 43 | 44 | import itertools 45 | 46 | def nitems (replier, senders): 47 | for sender, g in itertools.groupby(senders): 48 | yield len(list(g)), (replier, sender,) 49 | 50 | senders = msg.map(lambda x: (x.id, x.sender,)) 51 | replies = msg.map(lambda x: (x.prev_thread, x.sender,)) 52 | 53 | convo = replies.join(senders).values() \ 54 | .filter(lambda (a, b): a != b) 55 | 56 | top_convo = convo.groupByKey() \ 57 | .flatMap(lambda (a, b): list(nitems(a, b))) \ 58 | .sortByKey(0) 59 | 60 | print "\ntop convo" 61 | print top_convo.take(10) 62 | 63 | 64 | # Prepare for Sender/Reply Graph Analysis 65 | 66 | edge = top_convo.map(lambda (a, b): (whoMap.get(b[0]), whoMap.get(b[1]), a,)) 67 | edgeSchema = edge.map(lambda p: Row(replier=long(p[0]), sender=long(p[1]), num=int(p[2]))) 68 | edgeTable = sqlCtx.inferSchema(edgeSchema) 69 | edgeTable.saveAsParquetFile("reply_edge.parquet") 70 | 71 | node = who.map(lambda (a, b): (b, a)) 72 | nodeSchema = node.map(lambda p: Row(id=long(p[0]), sender=p[1])) 73 | nodeTable = sqlCtx.inferSchema(nodeSchema) 74 | nodeTable.saveAsParquetFile("reply_node.parquet") 75 | 76 | 77 | # Prepare for TextRank Analysis per paragraph 78 | 79 | def map_graf_edges (x): 80 | j = json.loads(x) 81 | 82 | for pair in j["tile"]: 83 | n0 = int(pair[0]) 84 | n1 = int(pair[1]) 85 | 86 | if n0 > 0 and n1 > 0: 87 | yield (j["id"], n0, n1,) 88 | yield (j["id"], n1, n0,) 89 | 90 | graf = sc.textFile("parsed").flatMap(map_graf_edges) 91 | n = graf.count() 92 | print "\ngraf edges", n 93 | 94 | edgeSchema = graf.map(lambda p: Row(id=p[0], node0=int(p[1]), node1=int(p[2]))) 95 | 96 | edgeTable = sqlCtx.inferSchema(edgeSchema) 97 | edgeTable.saveAsParquetFile("graf_edge.parquet") 98 | 99 | 100 | def map_graf_nodes (x): 101 | j = json.loads(x) 102 | 103 | for word in j["graf"]: 104 | yield [j["id"]] + word 105 | 106 | graf = sc.textFile("parsed").flatMap(map_graf_nodes) 107 | n = graf.count() 108 | print "\ngraf nodes", n 109 | 110 | nodeSchema = graf.map(lambda p: Row(id=p[0], node_id=int(p[1]), raw=p[2], root=p[3], pos=p[4], keep=p[5], num=int(p[6]))) 111 | 112 | nodeTable = sqlCtx.inferSchema(nodeSchema) 113 | nodeTable.saveAsParquetFile("graf_node.parquet") 114 | -------------------------------------------------------------------------------- /exsto/graph1.scala: -------------------------------------------------------------------------------- 1 | import org.apache.spark.graphx._ 2 | import org.apache.spark.rdd.RDD 3 | 4 | val sqlCtx = new org.apache.spark.sql.SQLContext(sc) 5 | import sqlCtx._ 6 | 7 | val edge = sqlCtx.parquetFile("graf_edge.parquet") 8 | edge.registerTempTable("edge") 9 | 10 | val node = sqlCtx.parquetFile("graf_node.parquet") 11 | node.registerTempTable("node") 12 | 13 | // Let's pick one message as an example -- 14 | // at scale we'd parallelize this 15 | 16 | val msg_id = "CA+B-+fyrBU1yGZAYJM_u=gnBVtzB=sXoBHkhmS-6L1n8K5Hhbw" 17 | 18 | 19 | val sql = """ 20 | SELECT node_id, root 21 | FROM node 22 | WHERE id='%s' AND keep='1' 23 | """.format(msg_id) 24 | 25 | val n = sqlCtx.sql(sql.stripMargin).distinct() 26 | val nodes: RDD[(Long, String)] = n.map{ p => 27 | (p(0).asInstanceOf[Int].toLong, p(1).asInstanceOf[String]) 28 | } 29 | nodes.collect() 30 | 31 | 32 | val sql = """ 33 | SELECT node0, node1 34 | FROM edge 35 | WHERE id='%s' 36 | """.format(msg_id) 37 | 38 | val e = sqlCtx.sql(sql.stripMargin).distinct() 39 | val edges: RDD[Edge[Int]] = e.map{ p => 40 | Edge(p(0).asInstanceOf[Int].toLong, p(1).asInstanceOf[Int].toLong, 0) 41 | } 42 | edges.collect() 43 | 44 | // run PageRank 45 | 46 | val g: Graph[String, Int] = Graph(nodes, edges) 47 | val r = g.pageRank(0.0001).vertices 48 | 49 | r.join(nodes).sortBy(_._2._1, ascending=false).foreach(println) 50 | 51 | // save the ranks 52 | 53 | case class Rank(id: Int, rank: Float) 54 | val rank = r.map(p => Rank(p._1.toInt, p._2.toFloat)) 55 | 56 | rank.registerTempTable("rank") 57 | 58 | 59 | ////////////////////////////////////////////////////////////////////// 60 | 61 | def median[T](s: Seq[T])(implicit n: Fractional[T]) = { 62 | import n._ 63 | val (lower, upper) = s.sortWith(_<_).splitAt(s.size / 2) 64 | if (s.size % 2 == 0) (lower.last + upper.head) / fromInt(2) else upper.head 65 | } 66 | 67 | node.schema 68 | edge.schema 69 | rank.schema 70 | 71 | val sql = """ 72 | SELECT n.num, n.raw, r.rank 73 | FROM node n JOIN rank r ON n.node_id = r.id 74 | WHERE n.id='%s' AND n.keep='1' 75 | ORDER BY n.num 76 | """.format(msg_id) 77 | 78 | val s = sqlCtx.sql(sql.stripMargin).collect() 79 | 80 | val min_rank = median(r.map(_._2).collect()) 81 | 82 | var span:List[String] = List() 83 | var last_index = -1 84 | var rank_sum = 0.0 85 | 86 | var phrases:collection.mutable.Map[String, Double] = collection.mutable.Map() 87 | 88 | s.foreach { x => 89 | //println (x) 90 | val index = x.getInt(0) 91 | val word = x.getString(1) 92 | val rank = x.getFloat(2) 93 | 94 | var isStop = false 95 | 96 | // test for break from past 97 | if (span.size > 0 && rank < min_rank) isStop = true 98 | if (span.size > 0 && (index - last_index > 1)) isStop = true 99 | 100 | // clear accumulation 101 | if (isStop) { 102 | val phrase = span.mkString(" ") 103 | phrases += (phrase -> rank_sum) 104 | //println(phrase, rank_sum) 105 | 106 | span = List() 107 | last_index = index 108 | rank_sum = 0.0 109 | } 110 | 111 | // start or append 112 | if (rank >= min_rank) { 113 | span = span :+ word 114 | last_index = index 115 | rank_sum += rank 116 | } 117 | } 118 | 119 | // summarize the text as a list of ranked keyphrases 120 | 121 | var summary = sc.parallelize(phrases.toSeq).distinct().sortBy(_._2, ascending=false).cache() 122 | 123 | // take top 50 percentile 124 | // NOT USED FOR SMALL MESSAGES 125 | 126 | val min_rank = median(summary.map(_._2).collect().toSeq) 127 | summary = summary.filter(_._2 >= min_rank) 128 | 129 | val sum = summary.map(_._2).reduce(_ + _) 130 | summary = summary.map(x => (x._1, x._2 / sum)) 131 | summary.collect() 132 | -------------------------------------------------------------------------------- /exsto/README.md: -------------------------------------------------------------------------------- 1 | # Microservices, Containers, and Machine Learning 2 | 3 | A frequently asked question on the [Apache Spark](http://spark.apache.org/) 4 | user email list concerns where to find data sets for evaluating the code. 5 | Oddly enough, the collect of archived messages for this email list 6 | provides an excellent data set to evalute machine learning, graph 7 | algorithms, text analytics, time-series analysis, etc. 8 | 9 | Herein, an open source developer community considers itself algorithmically. 10 | This project shows work-in-progress for how to surface data insights from 11 | the developer email forums for an Apache open source project. 12 | It leverages advanced technologies for natural language processing, machine 13 | learning, graph algorithms, time series analysis, etc. 14 | As an example, we use data from the `` 15 | [email list archives](http://mail-archives.apache.org) to help understand 16 | its community better. 17 | 18 | See [DataDayTexas 2015 session talk] 19 | (http://www.slideshare.net/pacoid/microservices-containers-and-machine-learning) 20 | 21 | In particular, we will shows production use of NLP tooling in Python, 22 | integrated with 23 | [MLlib](http://spark.apache.org/docs/latest/mllib-guide.html) 24 | (machine learning) and 25 | [GraphX](http://spark.apache.org/docs/latest/graphx-programming-guide.html) 26 | (graph algorithms) in Apache Spark. 27 | Machine learning approaches used include: 28 | [Word2Vec](https://code.google.com/p/word2vec/), 29 | [TextRank](http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf), 30 | Connected Components, Streaming K-Means, etc. 31 | 32 | Keep in mind that "One Size Fits All" is an anti-pattern, especially for 33 | Big Data tools. 34 | This project illustrates how to leverage microservices and containers to 35 | scale-out the code+data components that do not fit well in Spark, Hadoop, etc. 36 | 37 | In addition to Spark, other technologies used include: 38 | [Mesos](http://mesos.apache.org/), 39 | [Docker](https://www.docker.com/), 40 | [Anaconda](http://continuum.io/downloads), 41 | [Flask](http://flask.pocoo.org/), 42 | [NLTK](http://www.nltk.org/), 43 | [TextBlob](https://textblob.readthedocs.org/en/dev/). 44 | 45 | 46 | ## Dependencies 47 | 48 | * https://github.com/opentable/docker-anaconda 49 | 50 | ```bash 51 | conda config --add channels https://conda.binstar.org/sloria 52 | conda install textblob 53 | python -m textblob.download_corpora 54 | python -m nltk.downloader -d ~/nltk_data all 55 | pip install -U textblob textblob-aptagger 56 | pip install lxml 57 | pip install python-dateutil 58 | pip install Flask 59 | ``` 60 | 61 | NLTK and TextBlob require some 62 | [data downloads](https://s3.amazonaws.com/textblob/nltk_data.tar.gz) 63 | which may also require updating the NLTK data path: 64 | 65 | ```python 66 | import nltk 67 | nltk.data.path.append("~/nltk_data/") 68 | ``` 69 | 70 | 71 | ## Running 72 | 73 | To change the project configuration simply edit the `defaults.cfg` 74 | file. 75 | 76 | 77 | ### scrape the email list 78 | 79 | ```bash 80 | ./scrape.py data/foo.json 81 | ``` 82 | 83 | ### parse the email text 84 | 85 | ```bash 86 | ./parse.py data/foo.json parsed/foo.json 87 | ``` 88 | 89 | 90 | # What's in a name? 91 | 92 | The word [exsto](http://en.wiktionary.org/wiki/exsto) is the Latin 93 | verb meaning "to stand out", in its present active form. 94 | 95 | 96 | # Research Topics 97 | 98 | ### machine learning 99 | 100 | * [TextRank](http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf) 101 | * [Word2Vec use cases](http://www.yseam.com/blog/WV.html) 102 | * [Word2Vec vs. GloVe](http://radimrehurek.com/2014/12/making-sense-of-word2vec/) 103 | 104 | ### microservices and containers 105 | 106 | * [The Strengths and Weaknesses of Microservices](http://www.infoq.com/news/2014/05/microservices) 107 | * [Microservices architecture](http://martinfowler.com/articles/microservices.html) 108 | * [Adrian Crockcroft @ DockerCon](https://blog.docker.com/2014/12/dockercon-europe-keynote-state-of-the-art-in-microservices-by-adrian-cockcroft-battery-ventures/) 109 | * [Weave](https://github.com/zettio/weave) 110 | -------------------------------------------------------------------------------- /exsto/TextRank.py: -------------------------------------------------------------------------------- 1 | # TextRank, based on: 2 | # http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf 3 | 4 | from itertools import tee, izip 5 | from nltk import stem 6 | from text.blob import TextBlob as tb 7 | from textblob_aptagger import PerceptronTagger 8 | import nltk.data 9 | import numpy as np 10 | import sys 11 | 12 | 13 | TOKENIZER = nltk.data.load('tokenizers/punkt/english.pickle') 14 | TAGGER = PerceptronTagger() 15 | STEMMER = stem.porter.PorterStemmer() 16 | 17 | 18 | def pos_tag (s): 19 | """high-performance part-of-speech tagger""" 20 | global TAGGER 21 | return TAGGER.tag(s) 22 | 23 | 24 | def wrap_words (pair): 25 | """wrap each (word, tag) pair as an object with fully indexed metadata""" 26 | global STEMMER 27 | index = pair[0] 28 | result = [] 29 | for word, tag in pair[1]: 30 | word = word.lower() 31 | stem = STEMMER.stem(word) 32 | if stem == "": 33 | stem = word 34 | keep = tag in ('JJ', 'NN', 'NNS', 'NNP',) 35 | result.append({ "id": 0, "index": index, "stem": stem, "word": word, "tag": tag, "keep": keep }) 36 | index += 1 37 | return result 38 | 39 | 40 | ###################################################################### 41 | ## build a graph from raw text 42 | 43 | TEXT = """ 44 | Compatibility of systems of linear constraints over the set of natural numbers. 45 | Criteria of compatibility of a system of linear Diophantine equations, strict 46 | inequations, and nonstrict inequations are considered. Upper bounds for 47 | components of a minimal set of solutions and algorithms of construction of 48 | minimal generating sets of solutions for all types of systems are given. 49 | These criteria and the corresponding algorithms for constructing a minimal 50 | supporting set of solutions can be used in solving all the considered types 51 | systems and systems of mixed types. 52 | """ 53 | 54 | from pyspark import SparkContext 55 | sc = SparkContext(appName="TextRank", master="local[*]") 56 | 57 | sent = sc.parallelize(TOKENIZER.tokenize(TEXT)).map(pos_tag).cache() 58 | sent.collect() 59 | 60 | base = list(np.cumsum(np.array(sent.map(len).collect()))) 61 | base.insert(0, 0) 62 | base.pop() 63 | sent_length = sc.parallelize(base) 64 | 65 | tagged_doc = sent_length.zip(sent).map(wrap_words) 66 | 67 | 68 | ###################################################################### 69 | 70 | from pyspark.sql import SQLContext, Row 71 | sqlCtx = SQLContext(sc) 72 | 73 | def enum_words (s): 74 | for word in s: 75 | yield word 76 | 77 | words = tagged_doc.flatMap(enum_words) 78 | pair_words = words.keyBy(lambda w: w["stem"]) 79 | uniq_words = words.map(lambda w: w["stem"]).distinct().zipWithUniqueId() 80 | 81 | uniq = sc.broadcast(dict(uniq_words.collect())) 82 | 83 | 84 | def id_words (pair): 85 | (key, val) = pair 86 | word = val[0] 87 | id = val[1] 88 | word["id"] = id 89 | return word 90 | 91 | id_doc = pair_words.join(uniq_words).map(id_words) 92 | id_words = id_doc.map(lambda w: (w["id"], w["index"], w["word"], w["stem"], w["tag"])) 93 | 94 | wordSchema = id_words.map(lambda p: Row(id=long(p[0]), index=int(p[1]), word=p[2], stem=p[3], tag=p[4])) 95 | wordTable = sqlCtx.inferSchema(wordSchema) 96 | 97 | wordTable.registerTempTable("word") 98 | wordTable.saveAsParquetFile("word.parquet") 99 | 100 | 101 | ###################################################################### 102 | 103 | def sliding_window (iterable, size): 104 | """apply a sliding window to produce 'size' tiles""" 105 | iters = tee(iterable, size) 106 | for i in xrange(1, size): 107 | for each in iters[i:]: 108 | next(each, None) 109 | return list(izip(*iters)) 110 | 111 | 112 | def keep_pair (pair): 113 | """filter the relevant linked word pairs""" 114 | return pair[0]["keep"] and pair[1]["keep"] and (pair[0]["word"] != pair[1]["word"]) 115 | 116 | 117 | def link_words (seq): 118 | """attempt to link words in a sentence""" 119 | return [ (seq[0], word) for word in seq[1:] ] 120 | 121 | 122 | tiled = tagged_doc.flatMap(lambda s: sliding_window(s, 3)).flatMap(link_words).filter(keep_pair) 123 | 124 | t0 = tiled.map(lambda l: (uniq.value[l[0]["stem"]], uniq.value[l[1]["stem"]],)) 125 | t1 = tiled.map(lambda l: (uniq.value[l[1]["stem"]], uniq.value[l[0]["stem"]],)) 126 | 127 | neighbors = t0.union(t1) 128 | 129 | edgeSchema = neighbors.map(lambda p: Row(n0=long(p[0]), n1=long(p[1]))) 130 | edgeTable = sqlCtx.inferSchema(edgeSchema) 131 | 132 | edgeTable.registerTempTable("edge") 133 | edgeTable.saveAsParquetFile("edge.parquet") 134 | -------------------------------------------------------------------------------- /exsto/ETL.md: -------------------------------------------------------------------------------- 1 | ## ETL in PySpark with Spark SQL 2 | 3 | Let's use PySpark and Spark SQL to prepare the data for ML and graph 4 | analysis. 5 | We can perform *data discovery* while reshaping the data for later 6 | work. 7 | These early results can help guide our deeper analysis. 8 | 9 | NB: if this ETL needs to run outside of the `bin/pyspark` shell, first 10 | set up a `SparkContext` variable: 11 | 12 | ```python 13 | from pyspark import SparkContext 14 | sc = SparkContext(appName="Exsto", master="local[*]") 15 | ``` 16 | 17 | Import the JSON data produced by the scraper and register its schema 18 | for ad-hoc SQL queries later. 19 | Each message has the fields: 20 | `date`, `sender`, `id`, `next_thread`, `prev_thread`, `next_url`, `subject`, `text` 21 | 22 | ```python 23 | from pyspark.sql import SQLContext, Row 24 | sqlCtx = SQLContext(sc) 25 | 26 | msg = sqlCtx.jsonFile("data").cache() 27 | msg.registerTempTable("msg") 28 | ``` 29 | 30 | NB: note the persistence used for the JSON message data. 31 | We may need to unpersist at a later stage of this ETL work. 32 | 33 | ### Question: Who are the senders? 34 | 35 | Who are the people in the developer community sending email to the list? 36 | We will use this as a dimension in our analysis and reporting. 37 | Let's create a map, with a unique ID for each email address -- 38 | this will be required for the graph analysis. 39 | It may come in handy later for some 40 | [named-entity recognition](https://en.wikipedia.org/wiki/Named-entity_recognition). 41 | 42 | ```python 43 | who = msg.map(lambda x: x.sender).distinct().zipWithUniqueId() 44 | who.take(10) 45 | 46 | whoMap = who.collectAsMap() 47 | 48 | print "\nsenders" 49 | print len(whoMap) 50 | ``` 51 | 52 | ### Question: Who are the top K senders? 53 | 54 | [Apache Spark](http://spark.apache.org/) is one of the most 55 | active open source developer communities on Apache, so it 56 | will tend to have several thousand people engaged. 57 | Let's identify the most active ones. 58 | Then we can show a leaderboard and track changes in it over time. 59 | 60 | ```python 61 | from operator import add 62 | 63 | top_sender = msg.map(lambda x: (x.sender, 1,)).reduceByKey(add) \ 64 | .map(lambda (a, b): (b, a)) \ 65 | .sortByKey(0, 1) \ 66 | .map(lambda (a, b): (b, a)) 67 | 68 | print "\ntop senders" 69 | print top_sender.take(11) 70 | ``` 71 | 72 | You many notice that code... it comes from *word count*. 73 | 74 | 75 | ### Question: Which are the top K conversations? 76 | 77 | Clearly, some people discuss over the email list more than others. 78 | Let's identify *who* those people are. 79 | Later we can leverage our graph analysis to determine *what* they discuss. 80 | 81 | NB: note the use case for `groupByKey` transformations; 82 | sometimes its usage is indicated. 83 | 84 | ```python 85 | import itertools 86 | 87 | def nitems (replier, senders): 88 | for sender, g in itertools.groupby(senders): 89 | yield len(list(g)), (replier, sender,) 90 | 91 | senders = msg.map(lambda x: (x.id, x.sender,)) 92 | replies = msg.map(lambda x: (x.prev_thread, x.sender,)) 93 | 94 | convo = replies.join(senders).values() \ 95 | .filter(lambda (a, b): a != b) 96 | 97 | top_convo = convo.groupByKey() \ 98 | .flatMap(lambda (a, b): list(nitems(a, b))) \ 99 | .sortByKey(0) 100 | 101 | print "\ntop convo" 102 | print top_convo.take(10) 103 | ``` 104 | 105 | ### Prepare for Sender/Reply Graph Analysis 106 | 107 | Given the RDDs that we have created to help answer some of the 108 | questions so far, let's persist those data sets using 109 | [Parquet](http://parquet.io) -- 110 | starting with the graph of sender/message/reply: 111 | 112 | ```python 113 | edge = top_convo.map(lambda (a, b): (whoMap.get(b[0]), whoMap.get(b[1]), a,)) 114 | edgeSchema = edge.map(lambda p: Row(replier=p[0], sender=p[1], count=int(p[2]))) 115 | edgeTable = sqlCtx.inferSchema(edgeSchema) 116 | edgeTable.saveAsParquetFile("reply_edge.parquet") 117 | 118 | node = who.map(lambda (a, b): (b, a)) 119 | nodeSchema = node.map(lambda p: Row(id=int(p[0]), sender=p[1])) 120 | nodeTable = sqlCtx.inferSchema(nodeSchema) 121 | nodeTable.saveAsParquetFile("reply_node.parquet") 122 | ``` 123 | 124 | 125 | ### Prepare for TextRank Analysis per paragraph 126 | 127 | ```python 128 | def map_graf_edges (x): 129 | j = json.loads(x) 130 | 131 | for pair in j["tile"]: 132 | n0 = int(pair[0]) 133 | n1 = int(pair[1]) 134 | 135 | if n0 > 0 and n1 > 0: 136 | yield (j["id"], n0, n1,) 137 | yield (j["id"], n1, n0,) 138 | 139 | graf = sc.textFile("parsed") 140 | n = graf.flatMap(map_graf_edges).count() 141 | print "\ngraf edges", n 142 | 143 | edgeSchema = graf.map(lambda p: Row(id=p[0], node0=p[1], node1=p[2])) 144 | 145 | edgeTable = sqlCtx.inferSchema(edgeSchema) 146 | edgeTable.saveAsParquetFile("graf_edge.parquet") 147 | ``` 148 | 149 | ```python 150 | def map_graf_nodes (x): 151 | j = json.loads(x) 152 | 153 | for word in j["graf"]: 154 | yield [j["id"]] + word 155 | 156 | graf = sc.textFile("parsed") 157 | n = graf.flatMap(map_graf_nodes).count() 158 | print "\ngraf nodes", n 159 | 160 | nodeSchema = graf.map(lambda p: Row(id=p[0], node_id=p[1], raw=p[2], root=p[3], pos=p[4], keep=p[5], num=p[6])) 161 | 162 | nodeTable = sqlCtx.inferSchema(nodeSchema) 163 | nodeTable.saveAsParquetFile("graf_node.parquet") 164 | ``` 165 | -------------------------------------------------------------------------------- /exsto/test/ipad.json: -------------------------------------------------------------------------------- 1 | {"date": "2014-11-01T16:36:35+00:00", "id": "4566CED7-C0DF-4AA5-863D-F4784A99067B", "next_thread": "CAA_qdLq4ei7tzSpmWEUBMXaJYGJ7KYzNA4oNKV+a9QdbHVrjkA", "next_url": "http://mail-archives.apache.org/mod_mbox/spark-user/201411.mbox/%3cCALEj8eMRR2dO4Gmdor-4FOYZR8V6yFEtkha-Yo0sRPF70Xn5Fg@mail.gmail.com%3e", "prev_thread": "CAA_qdLqJzUPkiFkSNRLrpUBOFqCnWLLAbQuH4kbK1A956MN8bA", "sender": "Jean-Pascal Billaud ...@tellapart.com>", "subject": "Re: SparkSQL + Hive Cached Table Exception", "text": "\nGreat! Thanks.\n\nSent from my iPad\n\n> On Nov 1, 2014, at 8:35 AM, Cheng Lian wrote:\n> \n> Hi Jean,\n> \n> Thanks for reporting this. This is indeed a bug: some column types (Binary, Array, Map\nand Struct, and unfortunately for some reason, Boolean), a NoopColumnStats is used to collect\ncolumn statistics, which causes this issue. Filed SPARK-4182 to track this issue, will fix\nthis ASAP.\n> \n> Cheng\n> \n>> On Fri, Oct 31, 2014 at 7:04 AM, Jean-Pascal Billaud wrote:\n>> Hi,\n>> \n>> While testing SparkSQL on top of our Hive metastore, I am getting some java.lang.ArrayIndexOutOfBoundsException\nwhile reusing a cached RDD table.\n>> \n>> Basically, I have a table \"mtable\" partitioned by some \"date\" field in hive and below\nis the scala code I am running in spark-shell:\n>> \n>> val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc);\n>> val rdd_mtable = sqlContext.sql(\"select * from mtable where date=20141028\");\n>> rdd_mtable.registerTempTable(\"rdd_mtable\");\n>> sqlContext.cacheTable(\"rdd_mtable\");\n>> sqlContext.sql(\"select count(*) from rdd_mtable\").collect(); <-- OK\n>> sqlContext.sql(\"select count(*) from rdd_mtable\").collect(); <-- Exception\n>> \n>> So the first collect() is working just fine, however running the second collect()\nwhich I expect use the cached RDD throws some java.lang.ArrayIndexOutOfBoundsException, see\nthe backtrace at the end of this email. It seems the columnar traversal is crashing for some\nreasons. FYI, I am using spark ToT (234de9232bcfa212317a8073c4a82c3863b36b14).\n>> \n>> java.lang.ArrayIndexOutOfBoundsException: 14\n>> \tat org.apache.spark.sql.catalyst.expressions.GenericRow.apply(Row.scala:142)\n>> \tat org.apache.spark.sql.catalyst.expressions.BoundReference.eval(BoundAttribute.scala:37)\n>> \tat org.apache.spark.sql.catalyst.expressions.Expression.n2(Expression.scala:108)\n>> \tat org.apache.spark.sql.catalyst.expressions.Add.eval(arithmetic.scala:89)\n>> \tat org.apache.spark.sql.columnar.InMemoryRelation$$anonfun$computeSizeInBytes$1.apply(InMemoryColumnarTableScan.scala:66)\n>> \tat org.apache.spark.sql.columnar.InMemoryRelation$$anonfun$computeSizeInBytes$1.apply(InMemoryColumnarTableScan.scala:66)\n>> \tat scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)\n>> \tat scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)\n>> \tat scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)\n>> \tat scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)\n>> \tat scala.collection.TraversableLike$class.map(TraversableLike.scala:244)\n>> \tat scala.collection.AbstractTraversable.map(Traversable.scala:105)\n>> \tat org.apache.spark.sql.columnar.InMemoryRelation.computeSizeInBytes(InMemoryColumnarTableScan.scala:66)\n>> \tat org.apache.spark.sql.columnar.InMemoryRelation.statistics(InMemoryColumnarTableScan.scala:87)\n>> \tat org.apache.spark.sql.columnar.InMemoryRelation.statisticsToBePropagated(InMemoryColumnarTableScan.scala:73)\n>> \tat org.apache.spark.sql.columnar.InMemoryRelation.withOutput(InMemoryColumnarTableScan.scala:147)\n>> \tat org.apache.spark.sql.CacheManager$$anonfun$useCachedData$1$$anonfun$applyOrElse$1.apply(CacheManager.scala:122)\n>> \tat org.apache.spark.sql.CacheManager$$anonfun$useCachedData$1$$anonfun$applyOrElse$1.apply(CacheManager.scala:122)\n>> \tat scala.Option.map(Option.scala:145)\n>> \tat org.apache.spark.sql.CacheManager$$anonfun$useCachedData$1.applyOrElse(CacheManager.scala:122)\n>> \tat org.apache.spark.sql.CacheManager$$anonfun$useCachedData$1.applyOrElse(CacheManager.scala:119)\n>> \tat org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:144)\n>> \tat org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:162)\n>> \tat scala.collection.Iterator$$anon$11.next(Iterator.scala:328)\n>> \tat scala.collection.Iterator$class.foreach(Iterator.scala:727)\n>> \tat scala.collection.AbstractIterator.foreach(Iterator.scala:1157)\n>> \tat scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)\n>> \tat scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)\n>> \tat scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)\n>> \tat scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)\n>> \tat scala.collection.AbstractIterator.to(Iterator.scala:1157)\n>> \tat scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)\n>> \tat scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)\n>> \tat scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)\n>> \tat scala.collection.AbstractIterator.toArray(Iterator.scala:1157)\n>> \tat org.apache.spark.sql.catalyst.trees.TreeNode.transformChildrenDown(TreeNode.scala:191)\n>> \tat org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:147)\n>> \tat org.apache.spark.sql.CacheManager$class.useCachedData(CacheManager.scala:119)\n>> \tat org.apache.spark.sql.SQLContext.useCachedData(SQLContext.scala:49)\n>> \tat org.apache.spark.sql.SQLContext$QueryExecution.withCachedData$lzycompute(SQLContext.scala:376)\n>> \tat org.apache.spark.sql.SQLContext$QueryExecution.withCachedData(SQLContext.scala:376)\n>> \tat org.apache.spark.sql.SQLContext$QueryExecution.optimizedPlan$lzycompute(SQLContext.scala:377)\n>> \tat org.apache.spark.sql.SQLContext$QueryExecution.optimizedPlan(SQLContext.scala:377)\n>> \tat org.apache.spark.sql.SQLContext$QueryExecution.sparkPlan$lzycompute(SQLContext.scala:382)\n>> \tat org.apache.spark.sql.SQLContext$QueryExecution.sparkPlan(SQLContext.scala:380)\n>> \tat org.apache.spark.sql.SQLContext$QueryExecution.executedPlan$lzycompute(SQLContext.scala:386)\n>> \tat org.apache.spark.sql.SQLContext$QueryExecution.executedPlan(SQLContext.scala:386)\n>> \n>> Thanks,\n> \n\n"} 2 | -------------------------------------------------------------------------------- /exsto/dbc/3.Meetups.py: -------------------------------------------------------------------------------- 1 | # Databricks notebook source exported at Thu, 4 Jun 2015 02:07:58 UTC 2 | display(dbutils.fs.ls("/mnt/paco/events")) 3 | 4 | # COMMAND ---------- 5 | 6 | evt = sc.textFile("/mnt/paco/events") \ 7 | .map(lambda x: x.split("\t")) \ 8 | .filter(lambda x: x[0] != "date") 9 | 10 | # COMMAND ---------- 11 | 12 | evt.take(2) 13 | 14 | # COMMAND ---------- 15 | 16 | e = evt.map(lambda p: (p[0], p[2], p[6], p[6].find("/spark-users/"),)).filter(lambda x: x[3] > -1) 17 | e_df = e.toDF() 18 | e_df.registerTempTable("e") 19 | e_df.show() 20 | 21 | # COMMAND ---------- 22 | 23 | # MAGIC %sql 24 | # MAGIC SELECT * 25 | # MAGIC FROM e 26 | 27 | # COMMAND ---------- 28 | 29 | from pyspark.sql import Row 30 | from pyspark.sql.types import * 31 | import re 32 | 33 | def getBaseUrl (url): 34 | p = re.compile("^(http?\:\/\/.*\.meetup\.com\/[\w\-]+\/).*$") 35 | m = p.match(url) 36 | 37 | if m: 38 | return m.group(1) 39 | else: 40 | return "" 41 | 42 | def convInt (reg): 43 | try: 44 | return int(reg) 45 | except ValueError: 46 | return 0 47 | 48 | evt_schema = StructType([ 49 | StructField("date", StringType(), True), 50 | StructField("time", StringType(), True), 51 | StructField("title", StringType(), True), 52 | StructField("speakers", StringType(), True), 53 | StructField("affil", StringType(), True), 54 | StructField("addr", StringType(), True), 55 | StructField("url", StringType(), True), 56 | StructField("base_url", StringType(), True), 57 | StructField("reg", IntegerType(), True), 58 | StructField("city", StringType(), True) 59 | ]) 60 | 61 | evt_rdd = evt.map(lambda p: (p[0], p[1], p[2], p[3], p[4], p[5], p[6], getBaseUrl(p[6]), convInt(p[8]), p[9])) 62 | evt_df = sqlContext.createDataFrame(evt_rdd, evt_schema) 63 | evt_df.registerTempTable("events") 64 | 65 | # COMMAND ---------- 66 | 67 | # MAGIC %sql SELECT date, city, title, speakers, affil FROM events LIMIT 5 68 | 69 | # COMMAND ---------- 70 | 71 | # MAGIC %md 72 | # MAGIC # Which are the most active meetups, by number of events? 73 | 74 | # COMMAND ---------- 75 | 76 | # MAGIC %sql 77 | # MAGIC SELECT 78 | # MAGIC base_url, COUNT(*) AS num 79 | # MAGIC FROM events 80 | # MAGIC WHERE base_url <> "" 81 | # MAGIC GROUP BY base_url 82 | # MAGIC ORDER BY num DESC 83 | 84 | # COMMAND ---------- 85 | 86 | # MAGIC %md 87 | # MAGIC # Which meetups have the highest attendance? 88 | 89 | # COMMAND ---------- 90 | 91 | # ug! perhaps there's a bug in PySpark SQL that munges data in the form of 92 | # "Fubar Jones" 93 | # because only the data within quotes shows in the SQL query results 94 | 95 | def splitContact (contact): 96 | p = re.compile("^(.*)\s+\<(.*)\>.*$") 97 | m = p.match(contact) 98 | 99 | if m: 100 | return [m.group(1).replace('"', ''), m.group(2)] 101 | else: 102 | return ["", ""] 103 | 104 | # COMMAND ---------- 105 | 106 | meta_rdd = sc.textFile("/mnt/paco/meetup_metadata.tsv") \ 107 | .map(lambda x: x.split("\t")) \ 108 | .filter(lambda x: x[0] != "region") \ 109 | .map(lambda x: [x[5], x[0]] + splitContact(x[6])) 110 | 111 | meta_df = sqlContext.createDataFrame(meta_rdd, ["base_url", "region", "contact", "email"]) 112 | meta_df.registerTempTable("meta") 113 | 114 | meta_rdd.take(5) 115 | 116 | # COMMAND ---------- 117 | 118 | # MAGIC %sql 119 | # MAGIC SELECT * FROM meta WHERE contact <> "" LIMIT 5 120 | 121 | # COMMAND ---------- 122 | 123 | # MAGIC %sql 124 | # MAGIC SELECT * 125 | # MAGIC FROM events 126 | # MAGIC WHERE events.base_url = "http://www.meetup.com/spark-users/" 127 | 128 | # COMMAND ---------- 129 | 130 | # MAGIC %sql 131 | # MAGIC SELECT 132 | # MAGIC events.base_url, SUM(events.reg) AS attendance, COUNT(*) AS num, events.city, meta.region, meta.contact, meta.email 133 | # MAGIC FROM events LEFT JOIN meta ON (events.base_url = meta.base_url) 134 | # MAGIC WHERE events.base_url <> "" 135 | # MAGIC GROUP BY events.base_url, meta.region, events.city, meta.contact, meta.email 136 | # MAGIC ORDER BY attendance DESC 137 | 138 | # COMMAND ---------- 139 | 140 | # MAGIC %md 141 | # MAGIC # Which are the top cities? 142 | 143 | # COMMAND ---------- 144 | 145 | # MAGIC %sql 146 | # MAGIC SELECT 147 | # MAGIC city, COUNT(*) AS num 148 | # MAGIC FROM events 149 | # MAGIC GROUP BY city 150 | # MAGIC ORDER BY num DESC 151 | # MAGIC LIMIT 30 152 | 153 | # COMMAND ---------- 154 | 155 | # MAGIC %md 156 | # MAGIC # Which are the top company affiliations? 157 | 158 | # COMMAND ---------- 159 | 160 | # MAGIC %sql 161 | # MAGIC SELECT 162 | # MAGIC affil, COUNT(*) AS num 163 | # MAGIC FROM events 164 | # MAGIC GROUP BY affil 165 | # MAGIC ORDER BY num DESC 166 | # MAGIC LIMIT 10 167 | 168 | # COMMAND ---------- 169 | 170 | # MAGIC %md 171 | # MAGIC # Which companies are the most engaged with community events? 172 | 173 | # COMMAND ---------- 174 | 175 | evt_df.printSchema() 176 | 177 | # COMMAND ---------- 178 | 179 | sql = """ 180 | SELECT affil, COUNT(*) AS evt_count, MIN(date) AS earliest, MAX(date) AS latest 181 | FROM events 182 | GROUP BY affil 183 | """ 184 | 185 | from dateutil import parser 186 | 187 | def days_hours_minutes (td): 188 | return float(td.days) + float(td.seconds) / 3600 + (float(td.seconds) / 60) % 60 189 | 190 | lead_rdd = sqlContext.sql(sql) \ 191 | .map(lambda x: (x[0], int(x[1]), days_hours_minutes(parser.parse(x[3]) - parser.parse(x[2])))) 192 | 193 | lead_schema = StructType([ 194 | StructField("affil", StringType(), True), 195 | StructField("count", IntegerType(), True), 196 | StructField("duration", FloatType(), True) 197 | ]) 198 | 199 | lead_df = sqlContext.createDataFrame(lead_rdd, lead_schema) 200 | lead_df.registerTempTable("leaders") 201 | 202 | # COMMAND ---------- 203 | 204 | # MAGIC %sql 205 | # MAGIC SELECT affil, count, duration 206 | # MAGIC FROM leaders 207 | # MAGIC ORDER BY count DESC 208 | # MAGIC LIMIT 10 209 | 210 | # COMMAND ---------- 211 | 212 | # MAGIC %run /_SparkCamp/Exsto/pythonUtils 213 | 214 | # COMMAND ---------- 215 | 216 | # MAGIC %md Importing usefull libraries 217 | 218 | # COMMAND ---------- 219 | 220 | from ggplot import * 221 | from pyspark.mllib.clustering import KMeans 222 | from numpy import array 223 | from math import log, exp 224 | from pyspark.mllib.linalg import DenseVector, Vectors 225 | from pyspark.sql import Row 226 | import pandas 227 | 228 | # COMMAND ---------- 229 | 230 | # MAGIC %md Functions to transform SchemaRDD to dictionary and dataframe 231 | 232 | # COMMAND ---------- 233 | 234 | def toDict(rows): 235 | return [dict(zip(r.__FIELDS__, r)) for r in rows] 236 | 237 | def toDataFrame(rows): 238 | return pandas.DataFrame(toDict(rows)) 239 | 240 | # COMMAND ---------- 241 | 242 | # MAGIC %md Functions for sampling 243 | 244 | # COMMAND ---------- 245 | 246 | def getSampleRate(d, num): 247 | count = int(d['count']) 248 | year = d['year'] 249 | ratio = 1.0 if count < num else 1.0 * num / count 250 | return (year, ratio) 251 | 252 | def getFractions(c, num): 253 | return dict([getSampleRate(d, num) for d in toDict(c)]) 254 | 255 | # COMMAND ---------- 256 | 257 | sql = """ 258 | SELECT affil, count, duration 259 | FROM leaders 260 | ORDER BY count DESC 261 | LIMIT 10 262 | """ 263 | 264 | df = sqlContext.sql(sql).toPandas() 265 | 266 | # COMMAND ---------- 267 | 268 | df.head() 269 | 270 | # COMMAND ---------- 271 | 272 | plot = ggplot(df, aes(x='duration', y='count', label='affil')) \ 273 | + geom_text(hjust=0.5, vjust=0.5) \ 274 | + geom_point() 275 | display(plot) 276 | 277 | # COMMAND ---------- 278 | 279 | # MAGIC %md 280 | # MAGIC # Who are the most frequent speakers? 281 | 282 | # COMMAND ---------- 283 | 284 | from operator import add 285 | 286 | speak_rdd = evt.flatMap(lambda x: x[3].split(", ")) \ 287 | .map(lambda x: (x, 1)).reduceByKey(add) 288 | 289 | speak_schema = StructType([ 290 | StructField("speaker", StringType(), True), 291 | StructField("count", IntegerType(), True) 292 | ]) 293 | 294 | speak_df = sqlContext.createDataFrame(speak_rdd, speak_schema) 295 | speak_df.registerTempTable("speakers") 296 | 297 | # COMMAND ---------- 298 | 299 | # MAGIC %sql 300 | # MAGIC SELECT * 301 | # MAGIC FROM speakers 302 | # MAGIC ORDER BY count DESC 303 | # MAGIC LIMIT 23 304 | -------------------------------------------------------------------------------- /exsto/exsto.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import dateutil.parser as dp 5 | import hashlib 6 | import json 7 | import lxml.html 8 | import os 9 | import re 10 | import string 11 | import textblob 12 | import textblob_aptagger as tag 13 | import urllib 14 | 15 | DEBUG = False # True 16 | 17 | 18 | ###################################################################### 19 | ## scrape the Apache mailing list archives 20 | 21 | PAT_EMAIL_ID = re.compile("^.*\%3c(.*)\@.*$") 22 | 23 | 24 | def scrape_url (url): 25 | """get the HTML and parse it as an XML doc""" 26 | text = urllib.urlopen(url).read() 27 | text = filter(lambda x: x in string.printable, text) 28 | root = lxml.html.document_fromstring(text) 29 | 30 | return root 31 | 32 | 33 | def parse_email (root, base_url): 34 | """parse email fields from an lxml root""" 35 | global PAT_EMAIL_ID 36 | meta = {} 37 | 38 | path = "/html/head/title" 39 | meta["subject"] = root.xpath(path)[0].text 40 | 41 | path = "/html/body/table/tbody/tr[@class='from']/td[@class='right']" 42 | meta["sender"] = root.xpath(path)[0].text 43 | 44 | path = "/html/body/table/tbody/tr[@class='date']/td[@class='right']" 45 | meta["date"] = dp.parse(root.xpath(path)[0].text).isoformat() 46 | 47 | path = "/html/body/table/tbody/tr[@class='raw']/td[@class='right']/a" 48 | link = root.xpath(path)[0].get("href") 49 | meta["id"] = PAT_EMAIL_ID.match(link).group(1) 50 | 51 | path = "/html/body/table/tbody/tr[@class='contents']/td/pre" 52 | meta["text"] = root.xpath(path)[0].text 53 | 54 | # parse the optional elements 55 | 56 | path = "/html/body/table/thead/tr/th[@class='nav']/a[@title='Next by date']" 57 | refs = root.xpath(path) 58 | 59 | if len(refs) > 0: 60 | link = refs[0].get("href") 61 | meta["next_url"] = base_url + link 62 | else: 63 | meta["next_url"] = "" 64 | 65 | path = "/html/body/table/thead/tr/th[@class='nav']/a[@title='Previous by thread']" 66 | refs = root.xpath(path) 67 | 68 | if len(refs) > 0: 69 | link = refs[0].get("href") 70 | meta["prev_thread"] = PAT_EMAIL_ID.match(link).group(1) 71 | else: 72 | meta["prev_thread"] = "" 73 | 74 | path = "/html/body/table/thead/tr/th[@class='nav']/a[@title='Next by thread']" 75 | refs = root.xpath(path) 76 | 77 | if len(refs) > 0: 78 | link = refs[0].get("href") 79 | meta["next_thread"] = PAT_EMAIL_ID.match(link).group(1) 80 | else: 81 | meta["next_thread"] = "" 82 | 83 | return meta 84 | 85 | 86 | ###################################################################### 87 | ## filter the novel text versus quoted text in an email message 88 | 89 | PAT_FORWARD = re.compile("\n\-+ Forwarded message \-+\n") 90 | PAT_REPLIED = re.compile("\nOn.*\d+.*\n?wrote\:\n+\>") 91 | PAT_UNSUBSC = re.compile("\n\-+\nTo unsubscribe,.*\nFor additional commands,.*") 92 | 93 | 94 | def split_grafs (lines): 95 | """segment the raw text into paragraphs""" 96 | graf = [] 97 | 98 | for line in lines: 99 | line = line.strip() 100 | 101 | if len(line) < 1: 102 | if len(graf) > 0: 103 | yield "\n".join(graf) 104 | graf = [] 105 | else: 106 | graf.append(line) 107 | 108 | if len(graf) > 0: 109 | yield "\n".join(graf) 110 | 111 | 112 | def filter_quotes (text): 113 | """filter the quoted text out of a message""" 114 | global DEBUG 115 | global PAT_FORWARD, PAT_REPLIED, PAT_UNSUBSC 116 | 117 | text = filter(lambda x: x in string.printable, text) 118 | 119 | if DEBUG: 120 | print text 121 | 122 | # strip off quoted text in a forward 123 | m = PAT_FORWARD.split(text, re.M) 124 | 125 | if m and len(m) > 1: 126 | text = m[0] 127 | 128 | # strip off quoted text in a reply 129 | m = PAT_REPLIED.split(text, re.M) 130 | 131 | if m and len(m) > 1: 132 | text = m[0] 133 | 134 | # strip off any trailing unsubscription notice 135 | m = PAT_UNSUBSC.split(text, re.M) 136 | 137 | if m: 138 | text = m[0] 139 | 140 | # replace any remaining quoted text with blank lines 141 | lines = [] 142 | 143 | for line in text.split("\n"): 144 | if line.startswith(">"): 145 | lines.append("") 146 | else: 147 | lines.append(line) 148 | 149 | return list(split_grafs(lines)) 150 | 151 | 152 | def test_filter (path): 153 | """run the unit tests for known quoting styles""" 154 | global DEBUG 155 | DEBUG = True 156 | 157 | for root, dirs, files in os.walk(path): 158 | for file in files: 159 | with open(path + file, 'r') as f: 160 | line = f.readline() 161 | meta = json.loads(line) 162 | grafs = filter_quotes(meta["text"]) 163 | 164 | if not grafs or len(grafs) < 1: 165 | raise Exception("no results") 166 | else: 167 | print grafs 168 | 169 | 170 | ###################################################################### 171 | ## parse and markup text paragraphs for semantic analysis 172 | 173 | PAT_PUNCT = re.compile(r'^\W+$') 174 | PAT_SPACE = re.compile(r'\_+$') 175 | 176 | POS_KEEPS = ['v', 'n', 'j'] 177 | POS_LEMMA = ['v', 'n'] 178 | TAGGER = tag.PerceptronTagger() 179 | UNIQ_WORDS = { ".": 0 } 180 | 181 | 182 | def is_not_word (word): 183 | return PAT_PUNCT.match(word) or PAT_SPACE.match(word) 184 | 185 | 186 | def get_word_id (root): 187 | """lookup/assign a unique identify for each word""" 188 | global UNIQ_WORDS 189 | 190 | # in practice, this should use a microservice via some robust 191 | # distributed cache, e.g., Cassandra, Redis, etc. 192 | 193 | if root not in UNIQ_WORDS: 194 | UNIQ_WORDS[root] = len(UNIQ_WORDS) 195 | 196 | return UNIQ_WORDS[root] 197 | 198 | 199 | def get_tiles (graf, size=3): 200 | """generate word pairs for the TextRank graph""" 201 | graf_len = len(graf) 202 | 203 | for i in xrange(0, graf_len): 204 | w0 = graf[i] 205 | 206 | for j in xrange(i + 1, min(graf_len, i + 1 + size)): 207 | w1 = graf[j] 208 | 209 | if w0[4] == w1[4] == 1: 210 | yield (w0[0], w1[0],) 211 | 212 | 213 | def parse_graf (msg_id, text, base): 214 | """parse and markup each sentence in the given paragraph""" 215 | global DEBUG 216 | global POS_KEEPS, POS_LEMMA, TAGGER 217 | 218 | markup = [] 219 | i = base 220 | 221 | for s in textblob.TextBlob(text).sentences: 222 | graf = [] 223 | m = hashlib.sha1() 224 | 225 | pos = TAGGER.tag(str(s)) 226 | p_idx = 0 227 | w_idx = 0 228 | 229 | while p_idx < len(pos): 230 | p = pos[p_idx] 231 | 232 | if DEBUG: 233 | print "IDX", p_idx, p 234 | print "reg", is_not_word(p[0]) 235 | print " ", w_idx, len(s.words), s.words 236 | print graf 237 | 238 | if is_not_word(p[0]) or (p[1] == "SYM"): 239 | if (w_idx == len(s.words) - 1): 240 | w = p[0] 241 | t = '.' 242 | else: 243 | p_idx += 1 244 | continue 245 | elif w_idx < len(s.words): 246 | w = s.words[w_idx] 247 | t = p[1].lower()[0] 248 | w_idx += 1 249 | 250 | if t in POS_LEMMA: 251 | l = str(w.singularize().lemmatize(t)).lower() 252 | elif t != '.': 253 | l = str(w).lower() 254 | else: 255 | l = w 256 | 257 | keep = 1 if t in POS_KEEPS else 0 258 | m.update(l) 259 | 260 | id = get_word_id(l) if keep == 1 else 0 261 | graf.append((id, w, l, p[1], keep, i,)) 262 | 263 | i += 1 264 | p_idx += 1 265 | 266 | # tile the pairs for TextRank 267 | tile = list(get_tiles(graf)) 268 | 269 | #"lang": s.detect_language(), 270 | markup.append({ 271 | "id": msg_id, 272 | "size": len(graf), 273 | "sha1": m.hexdigest(), 274 | "polr": s.sentiment.polarity, 275 | "subj": s.sentiment.subjectivity, 276 | "graf": graf, 277 | "tile": tile 278 | }) 279 | 280 | return markup, i 281 | 282 | 283 | ###################################################################### 284 | ## common utilities 285 | 286 | def pretty_print (obj, indent=False): 287 | """pretty print a JSON object""" 288 | 289 | if indent: 290 | return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')) 291 | else: 292 | return json.dumps(obj, sort_keys=True) 293 | -------------------------------------------------------------------------------- /exsto/dbc/2.TextRank.scala: -------------------------------------------------------------------------------- 1 | // Databricks notebook source exported at Thu, 4 Jun 2015 02:07:20 UTC 2 | // MAGIC %md ## Augmented TextRank in Spark 3 | // MAGIC The following is a 4 | // MAGIC [Spark](http://spark.apache.org/) 5 | // MAGIC implementation of 6 | // MAGIC [TextRank](http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf) 7 | // MAGIC by Mihalcea, et al. 8 | // MAGIC The graph used in the algorithm is enriched by replacing the original authors' *Porter stemmer* approach with lemmatization from 9 | // MAGIC [WordNet](http://wordnet.princeton.edu/). 10 | // MAGIC 11 | // MAGIC This algorithm generates a *graph* from a text document, linking together related words, then runs 12 | // MAGIC [PageRank](http://en.wikipedia.org/wiki/PageRank) 13 | // MAGIC on that graph to determine the high-ranked keyphrases., 14 | // MAGIC Those keyphrases summarize the text document, similar to how an human editor would summarize for an academic paper. 15 | // MAGIC 16 | // MAGIC See [https://github.com/ceteri/spark-exercises/tree/master/exsto](https://github.com/ceteri/spark-exercises/tree/master/exsto) 17 | // MAGIC and also the earlier [Hadoop implementation](https://github.com/ceteri/textrank) 18 | // MAGIC which leveraged *semantic relations* by extending the graph using 19 | // MAGIC [hypernyms](https://en.wikipedia.org/wiki/Hyponymy_and_hypernymy) from WordNet as well. 20 | // MAGIC 21 | // MAGIC First, we need to create *base RDDs* from the Parquet files that we stored in DBFS during the ETL phase... 22 | 23 | // COMMAND ---------- 24 | 25 | val edge = sqlContext.parquetFile("/mnt/paco/exsto/graph/graf_edge.parquet") 26 | edge.registerTempTable("edge") 27 | 28 | val node = sqlContext.parquetFile("/mnt/paco/exsto/graph/graf_node.parquet") 29 | node.registerTempTable("node") 30 | 31 | // COMMAND ---------- 32 | 33 | // MAGIC %md Let's pick one message as an example -- at scale we would parallelize this to run for all the messages. 34 | 35 | // COMMAND ---------- 36 | 37 | val msg_id = "CA+B-+fyrBU1yGZAYJM_u=gnBVtzB=sXoBHkhmS-6L1n8K5Hhbw" 38 | 39 | // COMMAND ---------- 40 | 41 | // MAGIC %md Our use of [GraphX](https://spark.apache.org/graphx/) requires some imports... 42 | 43 | // COMMAND ---------- 44 | 45 | import org.apache.spark.graphx._ 46 | import org.apache.spark.rdd.RDD 47 | 48 | // COMMAND ---------- 49 | 50 | // MAGIC %md Next we run a query in [Spark SQL](https://spark.apache.org/sql/) to deserialize just the fields that we need from the Parquet files to generate the graph nodes... 51 | 52 | // COMMAND ---------- 53 | 54 | val sql = """ 55 | SELECT node_id, root 56 | FROM node 57 | WHERE id='%s' AND keep=1 58 | """.format(msg_id) 59 | 60 | val n = sqlContext.sql(sql.stripMargin) 61 | 62 | // COMMAND ---------- 63 | 64 | val nodes: RDD[(Long, String)] = n.map{ p => 65 | (p(0).asInstanceOf[Long], p(1).asInstanceOf[String]) 66 | }.distinct() 67 | 68 | nodes.collect() 69 | 70 | // COMMAND ---------- 71 | 72 | // MAGIC %md Likewise for the edges in the graph... 73 | 74 | // COMMAND ---------- 75 | 76 | val sql = """ 77 | SELECT node0, node1 78 | FROM edge 79 | WHERE id='%s' 80 | """.format(msg_id) 81 | 82 | // COMMAND ---------- 83 | 84 | val e = sqlContext.sql(sql.stripMargin) 85 | 86 | val edges: RDD[Edge[Int]] = e.map{ p => 87 | Edge(p(0).asInstanceOf[Long], p(1).asInstanceOf[Long], 0) 88 | }.distinct() 89 | 90 | edges.collect() 91 | 92 | // COMMAND ---------- 93 | 94 | // MAGIC %md ### Graph Analytics 95 | // MAGIC We compose a graph from the `node` and `edge` RDDs and run [PageRank](http://spark.apache.org/docs/latest/graphx-programming-guide.html#pagerank) on it... 96 | 97 | // COMMAND ---------- 98 | 99 | val g: Graph[String, Int] = Graph(nodes, edges) 100 | val r = g.pageRank(0.0001).vertices 101 | 102 | // COMMAND ---------- 103 | 104 | // MAGIC %md Save the resulting ranks for each word of interest... 105 | 106 | // COMMAND ---------- 107 | 108 | case class Rank(id: Int, rank: Double, word: String) 109 | 110 | val rank = r.join(nodes).map { 111 | case (node_id, (rank, word)) => Rank(node_id.toInt, rank, word) 112 | } 113 | 114 | rank.toDF().registerTempTable("rank") 115 | 116 | // COMMAND ---------- 117 | 118 | // MAGIC %sql 119 | // MAGIC SELECT word, id, rank 120 | // MAGIC FROM rank 121 | // MAGIC ORDER BY rank DESC 122 | 123 | // COMMAND ---------- 124 | 125 | // MAGIC %md 126 | // MAGIC Okay, from purely a keyword perspective we've got some rankings. However, to make these useful, we need to go back to the sequence of the words in the text and pull out the top-ranked phrases overall. We'll create `rankMap` to use for that... 127 | 128 | // COMMAND ---------- 129 | 130 | val rankMap = rank.map(r => (r.id, r.rank)).collectAsMap() 131 | 132 | // COMMAND ---------- 133 | 134 | // MAGIC %md ### Email Summarization: Extracting Key Phrases 135 | // MAGIC Next we got back to the parsed text and use the *TextRank* rankings to extract key phrases for each email message. 136 | // MAGIC 137 | // MAGIC First, let's examing the parsed text for the example message... 138 | 139 | // COMMAND ---------- 140 | 141 | val parsed = sqlContext.jsonFile("/mnt/paco/exsto/parsed/") 142 | parsed.registerTempTable("parsed") 143 | 144 | // COMMAND ---------- 145 | 146 | parsed.printSchema 147 | 148 | // COMMAND ---------- 149 | 150 | // MAGIC %sql 151 | // MAGIC SELECT graf, tile, size, polr, subj 152 | // MAGIC FROM parsed 153 | // MAGIC WHERE id='CA+B-+fyrBU1yGZAYJM_u=gnBVtzB=sXoBHkhmS-6L1n8K5Hhbw' 154 | 155 | // COMMAND ---------- 156 | 157 | // MAGIC %md Fortunately, the stored Parquet files have that data available in an efficient way... 158 | 159 | // COMMAND ---------- 160 | 161 | node.printSchema 162 | 163 | // COMMAND ---------- 164 | 165 | val sql = """ 166 | SELECT num, node_id, raw, pos, keep 167 | FROM node 168 | WHERE id='%s' 169 | ORDER BY num ASC 170 | """.format(msg_id) 171 | 172 | val para = sqlContext.sql(sql) 173 | 174 | // COMMAND ---------- 175 | 176 | // MAGIC %md The parsed text for the given message looks like the following sequence... 177 | 178 | // COMMAND ---------- 179 | 180 | val paraSeq = para 181 | .map(r => (r(0).asInstanceOf[Int], r(1).asInstanceOf[Long], r(2).toString, r(3).toString, r(4).asInstanceOf[Int])) 182 | .collect 183 | .toSeq 184 | 185 | // COMMAND ---------- 186 | 187 | // MAGIC %md We define a function to extract key phrases from that sequence... 188 | 189 | // COMMAND ---------- 190 | 191 | // use in parallelized version 192 | 193 | def extractPhrases (s: Seq[(Int, Long, String, String, Int)]): Seq[(String, Double)] = { 194 | var last_idx: Int = -1 195 | var span: List[String] = List() 196 | var rank_sum: Double = 0.0 197 | var noun_count: Int = 0 198 | var phrases: collection.mutable.Map[String, Double] = collection.mutable.Map() 199 | 200 | s.foreach { row => 201 | val(w_idx, node_id, word, pos, keep) = row 202 | 203 | if (keep == 1) { 204 | if (w_idx - last_idx > 1) { 205 | if (noun_count > 0) phrases += (span.mkString(" ").toLowerCase() -> rank_sum) 206 | 207 | span = List() 208 | rank_sum = 0.0 209 | noun_count = 0 210 | } 211 | 212 | val rank = rankMap.get(node_id.toInt).getOrElse(0.0).asInstanceOf[Float] 213 | //println(w_idx, node_id, word, pos, rank) 214 | 215 | last_idx = w_idx 216 | span = span :+ word 217 | rank_sum += rank 218 | 219 | if (pos.startsWith("N")) noun_count += 1 220 | } 221 | } 222 | 223 | if (noun_count > 0) phrases += (span.mkString(" ").toLowerCase() -> rank_sum) 224 | 225 | // normalize the ranks 226 | val sum = phrases.values.reduceLeft[Double](_ + _) 227 | val norm_ranks: collection.mutable.Map[String, Double] = collection.mutable.Map() 228 | 229 | phrases foreach {case (phrase, rank) => norm_ranks += (phrase -> rank / sum)} 230 | norm_ranks.toSeq 231 | } 232 | 233 | // COMMAND ---------- 234 | 235 | // MAGIC %md Now let's create an RDD from the extracted phrases and use SQL to show the results... 236 | 237 | // COMMAND ---------- 238 | 239 | case class Phrase(phrase: String, norm_rank: Double) 240 | 241 | val phraseRdd = sc.parallelize(extractPhrases(paraSeq)).map(p => Phrase(p._1, p._2)).toDF() 242 | phraseRdd.registerTempTable("phrase") 243 | 244 | // COMMAND ---------- 245 | 246 | // MAGIC %sql 247 | // MAGIC SELECT * FROM phrase 248 | // MAGIC ORDER BY norm_rank DESC 249 | 250 | // COMMAND ---------- 251 | 252 | // MAGIC %md ### Evaluation 253 | // MAGIC How do those results compare with what a human reader might extract from the message text? 254 | 255 | // COMMAND ---------- 256 | 257 | // MAGIC %sql 258 | // MAGIC SELECT text 259 | // MAGIC FROM msg 260 | // MAGIC WHERE id='CA+B-+fyrBU1yGZAYJM_u=gnBVtzB=sXoBHkhmS-6L1n8K5Hhbw' 261 | 262 | // COMMAND ---------- 263 | 264 | // MAGIC %md Just for kicks, let's compare the results of *TextRank* with the *term frequencies* that would result from a **WordCount** ... 265 | 266 | // COMMAND ---------- 267 | 268 | para.map(x => (x(2).asInstanceOf[String].toLowerCase(), 1)) 269 | .reduceByKey(_ + _) 270 | .map(item => item.swap) 271 | .sortByKey(false, 1) 272 | .map(item => item.swap) 273 | .collect 274 | .foreach(println) 275 | 276 | // COMMAND ---------- 277 | 278 | // MAGIC %md Um, yeah. So that happened. 279 | // MAGIC 280 | // MAGIC That's why you probably want to clean-up and enrich the results of text analytics before other algorithms consume them downstream as *features*. 281 | // MAGIC Otherwise, `GIGO` as they say. 282 | -------------------------------------------------------------------------------- /exsto/dbc/1.ETL_python.py: -------------------------------------------------------------------------------- 1 | # Databricks notebook source exported at Thu, 4 Jun 2015 02:03:35 UTC 2 | # MAGIC %md 3 | # MAGIC ## ETL in PySpark with Spark SQL 4 | # MAGIC 5 | # MAGIC Let's use PySpark and Spark SQL to prepare the data for ML and graph 6 | # MAGIC analysis. 7 | # MAGIC We can perform *data discovery* while reshaping the data for later 8 | # MAGIC work. 9 | # MAGIC These early results can help guide our deeper analysis. 10 | # MAGIC 11 | # MAGIC See also: overview of how to use this data in 12 | # MAGIC [Exsto: ETL in PySpark with Spark SQL](https://github.com/ceteri/spark-exercises/blob/master/exsto/ETL.md) 13 | 14 | # COMMAND ---------- 15 | 16 | # MAGIC %md 17 | # MAGIC Import the JSON data produced by the scraper and register its schema 18 | # MAGIC for ad-hoc SQL queries later. 19 | # MAGIC Each message has the fields: 20 | # MAGIC `date`, `sender`, `id`, `next_thread`, `prev_thread`, `next_url`, `subject`, `text` 21 | 22 | # COMMAND ---------- 23 | 24 | msg = sqlContext.jsonFile("/mnt/paco/exsto/original/").cache() 25 | msg.registerTempTable("msg") 26 | msg.count() 27 | 28 | # COMMAND ---------- 29 | 30 | # MAGIC %md 31 | # MAGIC NB: persistence gets used to cache the JSON message data. 32 | # MAGIC We may need to unpersist at a later stage of this ETL work. 33 | 34 | # COMMAND ---------- 35 | 36 | msg.first() 37 | 38 | # COMMAND ---------- 39 | 40 | msg.printSchema() 41 | 42 | # COMMAND ---------- 43 | 44 | # MAGIC %md ### Question: Who are the senders? 45 | # MAGIC 46 | # MAGIC Who are the people in the developer community sending email to the list? 47 | # MAGIC We will use this repeatedly as a dimension in our analysis and reporting. 48 | # MAGIC 49 | # MAGIC Let's create a map, with a unique ID for each email address -- along with an inverse lookup. 50 | # MAGIC This will be required for the graph analysis later. 51 | # MAGIC It may also come in handy for resolving some 52 | # MAGIC [named-entity recognition](https://en.wikipedia.org/wiki/Named-entity_recognition) 53 | # MAGIC issues, i.e., cleaning up the data where people may be using multiple email addresses. 54 | # MAGIC 55 | # MAGIC Note that we use that map as a [broadcast variable](http://spark.apache.org/docs/latest/programming-guide.html#broadcast-variables). 56 | 57 | # COMMAND ---------- 58 | 59 | who = msg.map(lambda x: x.sender).distinct().zipWithUniqueId() 60 | who_dict = who.collectAsMap() 61 | 62 | whoMap = sc.broadcast(who_dict) 63 | whoInv = sc.broadcast({v: k for k, v in who_dict.items()}) 64 | 65 | print "senders:", len(whoMap.value) 66 | print whoMap.value 67 | 68 | # COMMAND ---------- 69 | 70 | # MAGIC %md ### Question: Who are the top K senders? 71 | # MAGIC 72 | # MAGIC [Apache Spark](http://spark.apache.org/) is one of the most 73 | # MAGIC active open source developer communities on Apache, so it 74 | # MAGIC will tend to have several thousand people engaged. 75 | # MAGIC 76 | # MAGIC Let's identify the most active ones. 77 | # MAGIC Then we can show a leaderboard and track changes in it over time. 78 | 79 | # COMMAND ---------- 80 | 81 | from operator import add 82 | 83 | top_sender = msg.map(lambda x: (x.sender, 1,)).reduceByKey(add) \ 84 | .map(lambda (a, b): (b, a)) \ 85 | .sortByKey(0, 1) \ 86 | .map(lambda (a, b): (b, a)) 87 | 88 | print "top senders:", top_sender.take(11) 89 | 90 | # COMMAND ---------- 91 | 92 | # MAGIC %md 93 | # MAGIC Did you notice anything familiar about that code? 94 | # MAGIC It comes from _word count_. 95 | # MAGIC 96 | # MAGIC Alternatively, let's take a look at how to create that leaderboard using SQL... 97 | 98 | # COMMAND ---------- 99 | 100 | # MAGIC %sql 101 | # MAGIC SELECT sender, COUNT(id) AS msg_count, MIN(date) AS earliest, MAX(date) AS latest 102 | # MAGIC FROM msg 103 | # MAGIC GROUP BY sender 104 | # MAGIC ORDER BY msg_count DESC 105 | # MAGIC LIMIT 25 106 | 107 | # COMMAND ---------- 108 | 109 | # MAGIC %md It would be interesting to break that down a bit, and see how the *count* of messages sent compares with the *duration* of time in which the sender was engaged with the email list... 110 | 111 | # COMMAND ---------- 112 | 113 | from dateutil import parser 114 | from pyspark.sql import Row 115 | from pyspark.sql.types import * 116 | 117 | def days_hours_minutes (td): 118 | return float(td.days) + float(td.seconds) / 3600 + (float(td.seconds) / 60) % 60 119 | 120 | sql = """ 121 | SELECT sender, COUNT(id) AS msg_count, MIN(date) AS earliest, MAX(date) AS latest 122 | FROM msg 123 | GROUP BY sender 124 | """ 125 | 126 | leaders = sqlContext.sql(sql) \ 127 | .map(lambda x: (x[0], int(x[1]), days_hours_minutes(parser.parse(x[3]) - parser.parse(x[2])))) 128 | 129 | fields = [StructField("sender", StringType(), True), StructField("count", IntegerType(), True), StructField("duration", FloatType(), True)] 130 | schema = StructType(fields) 131 | 132 | leadTable = sqlContext.createDataFrame(leaders, schema) 133 | leadTable.registerTempTable("leaders") 134 | 135 | # COMMAND ---------- 136 | 137 | # MAGIC %sql 138 | # MAGIC SELECT sender, count, duration 139 | # MAGIC FROM leaders 140 | # MAGIC ORDER BY count DESC 141 | # MAGIC LIMIT 30 142 | 143 | # COMMAND ---------- 144 | 145 | # MAGIC %md Let's try to learn more about the structure of relationships among the people conversing on the list... 146 | 147 | # COMMAND ---------- 148 | 149 | # MAGIC %sql 150 | # MAGIC SELECT c0.subject, c0.sender, c1.sender AS receiver 151 | # MAGIC FROM msg c0 JOIN msg c1 ON c0.id = c1.prev_thread 152 | # MAGIC LIMIT 10 153 | 154 | # COMMAND ---------- 155 | 156 | # MAGIC %md Sometimes people answer their own messages... 157 | 158 | # COMMAND ---------- 159 | 160 | # MAGIC %sql 161 | # MAGIC SELECT c0.subject, c0.sender, c0.id 162 | # MAGIC FROM msg c0 JOIN msg c1 ON c0.id = c1.prev_thread 163 | # MAGIC WHERE c0.sender = c1.sender 164 | # MAGIC LIMIT 10 165 | 166 | # COMMAND ---------- 167 | 168 | # MAGIC %sql 169 | # MAGIC SELECT COUNT(*) 170 | # MAGIC FROM msg c0 JOIN msg c1 ON c0.id = c1.prev_thread 171 | # MAGIC WHERE c0.sender = c1.sender 172 | 173 | # COMMAND ---------- 174 | 175 | # MAGIC %sql 176 | # MAGIC SELECT COUNT(c0.subject) AS num, c0.sender AS sender 177 | # MAGIC FROM msg c0 JOIN msg c1 ON c0.id = c1.prev_thread 178 | # MAGIC WHERE c0.sender = c1.sender 179 | # MAGIC GROUP BY c0.sender 180 | # MAGIC ORDER BY num DESC 181 | 182 | # COMMAND ---------- 183 | 184 | # MAGIC %md ### Question: Which are the top K conversations? 185 | # MAGIC 186 | # MAGIC Clearly, some people discuss over the email list more than others. 187 | # MAGIC 188 | # MAGIC Let's identify *who* those people are. 189 | # MAGIC We can also determine who they in turn discuss with the most. 190 | # MAGIC Later we can leverage our graph analysis to determine *what* they discuss. 191 | # MAGIC 192 | # MAGIC Here is a great place to make use of our `whoMap` broadcast variable, since it's better to be sorting integers at scale than to need to sort many strings. 193 | # MAGIC 194 | # MAGIC Note the use case for the [groupByKey](http://spark.apache.org/docs/latest/programming-guide.html#transformations) transformation. 195 | # MAGIC Generally we [prefer to avoid it]((http://databricks.gitbooks.io/databricks-spark-knowledge-base/content/best_practices/prefer_reducebykey_over_groupbykey.html), but this is a good illustration of where its usage is indicated. 196 | 197 | # COMMAND ---------- 198 | 199 | import itertools 200 | 201 | senders = msg.map(lambda x: (x.id, whoMap.value.get(x.sender),)).distinct() 202 | replies = msg.map(lambda x: (x.prev_thread, whoMap.value.get(x.sender),)).distinct() 203 | 204 | convo = replies.join(senders).values() \ 205 | .filter(lambda (a, b): a != b) 206 | 207 | def nitems (replier, senders): 208 | for sender, g in itertools.groupby(senders): 209 | yield len(list(g)), (replier, sender,) 210 | 211 | # COMMAND ---------- 212 | 213 | top_convo = convo.groupByKey() \ 214 | .flatMap(lambda (a, b): list(nitems(a, b))) \ 215 | .sortByKey(0) 216 | 217 | print "top convo", top_convo.take(10) 218 | 219 | # COMMAND ---------- 220 | 221 | conv = top_convo.map(lambda p: (p[0], whoInv.value.get(p[1][0]), whoInv.value.get(p[1][1]),)) 222 | 223 | fields = [StructField("count", IntegerType(), True), StructField("sender", StringType(), True), StructField("replier", StringType(), True)] 224 | schema = StructType(fields) 225 | 226 | convTable = sqlContext.createDataFrame(conv, schema) 227 | convTable.registerTempTable("conv") 228 | 229 | # COMMAND ---------- 230 | 231 | # MAGIC %sql 232 | # MAGIC SELECT count, sender, replier 233 | # MAGIC FROM conv 234 | # MAGIC ORDER BY count DESC 235 | 236 | # COMMAND ---------- 237 | 238 | # MAGIC %md Just curious... how many "dead end" threads during that period? In other words, how many messages that had no replies or where the senders answered themselves? 239 | 240 | # COMMAND ---------- 241 | 242 | msg.count() - conv.map(lambda x: x[0]).sum() 243 | 244 | # COMMAND ---------- 245 | 246 | # MAGIC %md ### Prepare for Sender/Reply Graph Analysis 247 | # MAGIC 248 | # MAGIC Given the RDDs that we have created to help answer some of the 249 | # MAGIC questions so far, let's persist those data sets using 250 | # MAGIC [Parquet](http://parquet.io) -- 251 | # MAGIC starting with the graph of sender/message/reply: 252 | 253 | # COMMAND ---------- 254 | 255 | dbutils.fs.rm("/mnt/paco/exsto/graph/reply_edge.parquet", True) 256 | dbutils.fs.rm("/mnt/paco/exsto/graph/reply_node.parquet", True) 257 | 258 | # COMMAND ---------- 259 | 260 | edge = top_convo.map(lambda (a, b): (long(b[0]), long(b[1]), a,)) 261 | 262 | fields = [StructField("replier", LongType(), True), StructField("sender", LongType(), True), StructField("count", IntegerType(), True)] 263 | schema = StructType(fields) 264 | 265 | edgeTable = sqlContext.createDataFrame(edge, schema) 266 | edgeTable.saveAsParquetFile("/mnt/paco/exsto/graph/reply_edge.parquet") 267 | 268 | node = who.map(lambda (a, b): (long(b), a)) 269 | 270 | fields = [StructField("id", LongType(), True), StructField("sender", StringType(), True)] 271 | schema = StructType(fields) 272 | 273 | nodeTable = sqlContext.createDataFrame(node, schema) 274 | nodeTable.saveAsParquetFile("/mnt/paco/exsto/graph/reply_node.parquet") 275 | 276 | 277 | # COMMAND ---------- 278 | 279 | node.take(2) 280 | 281 | 282 | # COMMAND ---------- 283 | 284 | # MAGIC %md ### Prepare for TextRank Analysis per paragraph 285 | # MAGIC 286 | # MAGIC We will load this as text, not as JSON, as a convenient way to parse the nested tuples. 287 | 288 | # COMMAND ---------- 289 | 290 | graf = sc.textFile("/mnt/paco/exsto/parsed/").cache() 291 | graf.first() 292 | 293 | # COMMAND ---------- 294 | 295 | import json 296 | 297 | def map_graf_edges (x): 298 | j = json.loads(x) 299 | 300 | for pair in j["tile"]: 301 | n0 = long(pair[0]) 302 | n1 = long(pair[1]) 303 | 304 | if n0 > 0 and n1 > 0: 305 | yield (j["id"], n0, n1,) 306 | yield (j["id"], n1, n0,) 307 | 308 | grafEdge = graf.flatMap(map_graf_edges) 309 | 310 | print "graf edges", grafEdge.count() 311 | 312 | # COMMAND ---------- 313 | 314 | grafEdge.take(5) 315 | 316 | # COMMAND ---------- 317 | 318 | def map_graf_nodes (x): 319 | j = json.loads(x) 320 | 321 | for word in j["graf"]: 322 | yield [j["id"]] + word 323 | 324 | grafNode = graf.flatMap(map_graf_nodes) 325 | 326 | print "graf nodes", grafNode.count() 327 | 328 | # COMMAND ---------- 329 | 330 | grafNode.take(5) 331 | 332 | # COMMAND ---------- 333 | 334 | dbutils.fs.rm("/mnt/paco/exsto/graph/graf_edge.parquet", True) 335 | dbutils.fs.rm("/mnt/paco/exsto/graph/graf_node.parquet", True) 336 | 337 | # COMMAND ---------- 338 | 339 | fields = [StructField("id", StringType(), True), StructField("node0", LongType(), True), StructField("node1", LongType(), True)] 340 | schema = StructType(fields) 341 | 342 | grafEdgeTable = sqlContext.createDataFrame(grafEdge, schema) 343 | grafEdgeTable.saveAsParquetFile("/mnt/paco/exsto/graph/graf_edge.parquet") 344 | 345 | # COMMAND ---------- 346 | 347 | fields = [StructField("id", StringType(), True), StructField("node_id", LongType(), True), StructField("raw", StringType(), True), StructField("root", StringType(), True), StructField("pos", StringType(), True), StructField("keep", IntegerType(), True), StructField("num", IntegerType(), True)] 348 | schema = StructType(fields) 349 | 350 | grafNodeTable = sqlContext.createDataFrame(grafNode, schema) 351 | grafNodeTable.saveAsParquetFile("/mnt/paco/exsto/graph/graf_node.parquet") 352 | 353 | # COMMAND ---------- 354 | 355 | 356 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | ## creative commons 2 | 3 | # Attribution-NonCommercial-ShareAlike 4.0 International 4 | 5 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. 6 | 7 | ### Using Creative Commons Public Licenses 8 | 9 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 10 | 11 | * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). 12 | 13 | * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). 14 | 15 | ## Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License 16 | 17 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 18 | 19 | ### Section 1 – Definitions. 20 | 21 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 22 | 23 | b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 24 | 25 | c. __BY-NC-SA Compatible License__ means a license listed at [creativecommons.org/compatiblelicenses](http://creativecommons.org/compatiblelicenses), approved by Creative Commons as essentially the equivalent of this Public License. 26 | 27 | d. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 28 | 29 | e. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 30 | 31 | f. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 32 | 33 | g. __License Elements__ means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike. 34 | 35 | h. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 36 | 37 | i. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 38 | 39 | h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. 40 | 41 | i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. 42 | 43 | j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 44 | 45 | k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 46 | 47 | l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 48 | 49 | ### Section 2 – Scope. 50 | 51 | a. ___License grant.___ 52 | 53 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 54 | 55 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and 56 | 57 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 58 | 59 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 60 | 61 | 3. __Term.__ The term of this Public License is specified in Section 6(a). 62 | 63 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 64 | 65 | 5. __Downstream recipients.__ 66 | 67 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 68 | 69 | B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. 70 | 71 | C. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 72 | 73 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 74 | 75 | b. ___Other rights.___ 76 | 77 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 78 | 79 | 2. Patent and trademark rights are not licensed under this Public License. 80 | 81 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. 82 | 83 | ### Section 3 – License Conditions. 84 | 85 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 86 | 87 | a. ___Attribution.___ 88 | 89 | 1. If You Share the Licensed Material (including in modified form), You must: 90 | 91 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 92 | 93 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 94 | 95 | ii. a copyright notice; 96 | 97 | iii. a notice that refers to this Public License; 98 | 99 | iv. a notice that refers to the disclaimer of warranties; 100 | 101 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 102 | 103 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 104 | 105 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 106 | 107 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 108 | 109 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 110 | 111 | b. ___ShareAlike.___ 112 | 113 | In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 114 | 115 | 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License. 116 | 117 | 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 118 | 119 | 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. 120 | 121 | ### Section 4 – Sui Generis Database Rights. 122 | 123 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 124 | 125 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; 126 | 127 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and 128 | 129 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 130 | 131 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 132 | 133 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability. 134 | 135 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ 136 | 137 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ 138 | 139 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 140 | 141 | ### Section 6 – Term and Termination. 142 | 143 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 144 | 145 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 146 | 147 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 148 | 149 | 2. upon express reinstatement by the Licensor. 150 | 151 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 152 | 153 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 154 | 155 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 156 | 157 | ### Section 7 – Other Terms and Conditions. 158 | 159 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 160 | 161 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 162 | 163 | ### Section 8 – Interpretation. 164 | 165 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 166 | 167 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 168 | 169 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 170 | 171 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 172 | 173 | ``` 174 | Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” The text of the Creative Commons public licenses is dedicated to the public domain under the [CC0 Public Domain Dedication](http://creativecommons.org/licenses/by-nc-sa/4.0/creativecommons.org/zero/1.0/legalcode). Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 175 | 176 | Creative Commons may be contacted at [creativecommons.org](http://creativecommons.org/). 177 | ``` 178 | --------------------------------------------------------------------------------