├── .github └── workflows │ └── spark-alchemy-workflow.yml ├── .gitignore ├── .sdkmanrc ├── DEVELOPMENT.md ├── LICENSE ├── NOTICE ├── README.md ├── VERSION ├── alchemy └── src │ ├── main │ └── scala │ │ ├── com │ │ └── swoop │ │ │ └── alchemy │ │ │ ├── spark │ │ │ └── expressions │ │ │ │ ├── FunctionRegistration.scala │ │ │ │ ├── NativeFunctionRegistration.scala │ │ │ │ ├── WithHelper.scala │ │ │ │ └── hll │ │ │ │ ├── BoundHLL.scala │ │ │ │ ├── CardinalityHashFunction.scala │ │ │ │ ├── HLLFunctionRegistration.scala │ │ │ │ ├── HLLFunctions.scala │ │ │ │ ├── Implementation.scala │ │ │ │ └── package.scala │ │ │ └── utils │ │ │ └── AnyExtensions.scala │ │ └── org │ │ └── apache │ │ └── spark │ │ └── sql │ │ └── EncapsulationViolator.scala │ └── test │ ├── resources │ └── log4j.properties │ └── scala │ ├── com │ └── swoop │ │ ├── alchemy │ │ └── spark │ │ │ └── expressions │ │ │ └── hll │ │ │ ├── CardinalityHashFunctionTest.scala │ │ │ ├── HLLFunctionsTest.scala │ │ │ └── PostgresInteropTest.scala │ │ └── test_utils │ │ └── SparkSessionSpec.scala │ └── org │ └── apache │ └── spark │ ├── DebugFilesystem.scala │ ├── SparkFunSuite.scala │ ├── SparkTestUtilsEncapsulationViolator.scala │ └── sql │ ├── catalyst │ └── plans │ │ └── PlanTest.scala │ └── test │ ├── SQLHelper.scala │ ├── SQLTestData.scala │ ├── SQLTestUtils.scala │ ├── SharedSparkSessionBase.scala │ └── TestSparkSession.scala ├── build.sbt ├── codeStyleSettings.xml ├── docker-compose.yml ├── docs ├── docs │ ├── docs.md │ └── index.md └── src │ └── main │ └── resources │ └── site │ ├── images │ ├── favicon.png │ ├── navbar_brand.png │ ├── navbar_brand2x.png │ ├── sidebar_brand.png │ ├── sidebar_brand2x.png │ ├── swoop-icon_130x130.png │ └── swoop-icon_80x80.png │ ├── scripts │ └── automenu.js │ └── styles │ └── overrides.css └── project ├── build.properties └── plugins.sbt /.github/workflows/spark-alchemy-workflow.yml: -------------------------------------------------------------------------------- 1 | name: spark-alchemy workflow 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | - '!gh-pages' 8 | 9 | env: 10 | DOCKERIZE_VERSION: v0.6.1 11 | SASS_VERSION: "1.1.1" 12 | 13 | TERM: dumb 14 | AWS_REGION: us-east-1 15 | AWS_DEFAULT_REGION: us-east-1 16 | SWOOP_VERSION_FILE: VERSION 17 | JVM_OPTS: "-Xms2048m -Xmx2048m -Xss128m -XX:+CMSClassUnloadingEnabled -XX:MaxMetaspaceSize=1024m" 18 | 19 | jobs: 20 | test: 21 | name: Test 22 | runs-on: ubuntu-latest 23 | if: "!contains(github.event.head_commit.message, '[ci skip]')" 24 | services: 25 | postgres: 26 | image: swoopinc/postgres-hll:11 27 | ports: 28 | - 5432:5432 29 | steps: 30 | - uses: actions/checkout@v2 31 | 32 | - name: Install dockerize 33 | run: | 34 | cd ${{ runner.temp }} 35 | wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz 36 | sudo tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz 37 | rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz 38 | 39 | - name: Wait for postgres 40 | run: dockerize -wait tcp://localhost:5432 -timeout 1m 41 | 42 | - name: Cache SBT ivy cache 43 | uses: actions/cache@v1 44 | with: 45 | path: ~/.ivy2/cache 46 | key: ${{ runner.os }}-sbt-ivy-cache-${{ hashFiles('**/build.sbt') }} 47 | - name: Cache SBT 48 | uses: actions/cache@v1 49 | with: 50 | path: ~/.sbt 51 | key: ${{ runner.os }}-sbt-${{ hashFiles('**/build.sbt') }} 52 | - name: Cache coursier 53 | uses: actions/cache@v1 54 | with: 55 | path: ~/.cache/coursier 56 | key: ${{ runner.os }}-coursier-${{ hashFiles('**/build.sbt') }} 57 | 58 | # "cat /dev/null |" prevents sbt from running in interactive mode. One of many amazing 59 | # hacks get sbt working in a sane manner. 60 | - name: sbt test 61 | run: cat /dev/null | sbt test 62 | 63 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | *.log 3 | 4 | # sbt specific 5 | .cache 6 | .history 7 | .lib/ 8 | dist/* 9 | target/ 10 | lib_managed/ 11 | src_managed/ 12 | project/boot/ 13 | project/plugins/project/ 14 | 15 | # Scala-IDE specific 16 | .scala_dependencies 17 | .worksheet 18 | 19 | #Markdown editing 20 | .Ulysses-favorites.plist 21 | 22 | #IntelliJ 23 | .idea 24 | 25 | metastore_db/ 26 | tmp/ 27 | spark-warehouse/ 28 | 29 | .DS_Store 30 | -------------------------------------------------------------------------------- /.sdkmanrc: -------------------------------------------------------------------------------- 1 | # Enable auto-env through the sdkman_auto_env config 2 | # Add key=value pairs of SDKs to use below 3 | java=8.0.345-zulu 4 | sbt=1.6.2 5 | scala=2.12.17 6 | -------------------------------------------------------------------------------- /DEVELOPMENT.md: -------------------------------------------------------------------------------- 1 | # Development 2 | 3 | ## Local tests 4 | 5 | To run the `PostgresInteropTest` you need to have a working Docker 6 | environment. On Mac, that means having Docker Desktop installed and 7 | running. Then run `docker-compose up` in the repository root to start a 8 | Postgres server. 9 | 10 | ## Release Process 11 | 12 | 1. Develop new code on feature branches. 13 | 14 | 1. After development, testing, and code review, merge changes into the `master` branch. 15 | 16 | 1. When ready to deploy a new release, merge and push changes from `master` to the `release` branch. Travis-CI will then: 17 | 18 | * Build the project 19 | * Run tests 20 | * Deploy artifacts to Maven 21 | * Publish the microsite to Github Pages 22 | * Create a new release on the [Github Project Release Page](https://github.com/swoop-inc/spark-alchemy/releases) 23 | 24 | ### Project Version Numbers 25 | 26 | * The `VERSION` file in the root of the project contains the version number that SBT will use for the `spark-alchemy` project. 27 | * The format should follow [Semantic Versioning](https://semver.org/) with the patch number matching the Travis CI build number when deploying new releases. 28 | * During deployment, Travis CI will read the MAJOR and MINOR version numbers from the `VERSION` file, but substitute the build number into the PATCH portion. In other words, if project developers wish to change the MAJOR or MINOR version numbers of the `spark-alchemy` project, they can simply change them in the `VERSION` file. 29 | * During local development and when checked into Git, the version number defined in the `VERSION` file should end with the `-SNAPSHOT` string. 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------------------ 2 | This product bundles various third-party components under other open source licenses. 3 | This section summarizes those components and their licenses. 4 | 5 | 6 | Apache Software Foundation License 2.0 7 | -------------------------------------- 8 | alchemy/src/test/scala/org/apache/spark/* 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # spark-alchemy 2 | 3 | Spark Alchemy is a collection of open-source Spark tools & frameworks that have made the data engineering and 4 | data science teams at [Swoop](https://www.swoop.com) highly productive in our demanding petabyte-scale environment 5 | with rich data (thousands of columns). 6 | 7 | ## Supported languages 8 | 9 | While `spark-alchemy`, like Spark itself, is written in Scala, much of its functionality, such as interoperable HyperLogLog functions, can be used from other Spark-supported languages such as SparkSQL and [Python](#for-python-developers). 10 | 11 | ## Installation 12 | 13 | Add the following to your `libraryDependencies` in SBT: 14 | 15 | ```scala 16 | libraryDependencies += "com.swoop" %% "spark-alchemy" % "1.0.1" 17 | ``` 18 | 19 | You can find all released versions [here](https://repo1.maven.org/maven2/com/swoop/spark-alchemy_2.12/). 20 | 21 | Some use cases such as interoperability with PySpark may require the assembly of a fat JAR of `spark-alchemy`. To assemble, run `sbt assembly`. To skip tests during assembly, run `sbt 'set sbt.Keys.test in assembly := {}' assembly` instead. 22 | 23 | ## For Spark users 24 | 25 | - Native [HyperLogLog functions](../../wiki/Spark-HyperLogLog-Functions) that offer reaggregatable fast approximate distinct counting capabilities far beyond those in OSS Spark with interoperability to Postgres and even JavaScript. Just as Spark's own native functions, once the functions are registered with Spark, they can be used from SparkSQL, Python, etc. 26 | 27 | ## For Spark framework developers 28 | 29 | - Helpers for [native function registration](../../wiki/Spark-Native-Functions) 30 | 31 | - Look at [`SparkSessionSpec`](alchemy/src/test/scala/com/swoop/test_utils/SparkSessionSpec.scala) as an example of how to reuse advanced Spark testing functionality from OSS Spark. 32 | 33 | ## For Python developers 34 | 35 | - See [HyperLogLog functions](../../wiki/Spark-HyperLogLog-Functions) for an example of how `spark-alchemy` HLL functions can be registered for use through PySpark. 36 | 37 | ## What we hope to open source in the future, if we have the bandwidth 38 | 39 | - Configuration Addressable Production (CAP), Automatic Lifecycle Management (ALM) and Just-in-time Dependency Resolution 40 | (JDR) as outlined in our Spark+AI Summit talk [Unafraid of Change: Optimizing ETL, ML, and AI in Fast-Paced Environments](https://databricks.com/session/unafraid-of-change-optimizing-etl-ml-ai-in-fast-paced-environments). 41 | 42 | - Utilities that make [Delta Lake](https://delta.io) development substantially more productive. 43 | 44 | - Hundreds of productivity-enhancing extensions to the core user-level data types: `Column`, `Dataset`, `SparkSession`, etc. 45 | 46 | - Data discovery and cleansing tools we use to ingest and clean up large amounts of dirty data from third parties. 47 | 48 | - Cross-cluster named lock manager, which simplifies data production by removing the need for workflow servers much of the time. 49 | 50 | - `case class` code generation from Spark schema, with easy implementation customization. 51 | 52 | - Tools for deploying Spark ML pipelines to production. 53 | 54 | ## Development 55 | 56 | Build docs microsite 57 | 58 | ```sbt "project docs" makeMicrosite``` 59 | 60 | Run docs microsite locally (run under `docs/target/site` folder) 61 | 62 | ``` 63 | jekyll serve -b /spark-alchemy 64 | ``` 65 | 66 | [More details](https://47degrees.github.io/sbt-microsites/) 67 | 68 | ## More from Swoop 69 | 70 | - [spark-records](https://github.com/swoop-inc/spark-records): bulletproof Spark jobs with fast root cause analysis in the case of failures 71 | 72 | ## Community & contributing 73 | 74 | Contributions and feedback of any kind are welcome. Please, create an issue and/or pull request. 75 | 76 | Spark Alchemy is maintained by the team at [Swoop](https://www.swoop.com). If you'd like to contribute to our open-source efforts, by joining our team or from your company, let us know at `spark-interest at swoop dot com`. 77 | 78 | ## License 79 | 80 | `spark-alchemy` is Copyright © 2018-2020 [Swoop, Inc.](https://www.swoop.com) It is free software, and may be redistributed under the terms of the LICENSE. 81 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 1.2.1 2 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/FunctionRegistration.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions 2 | 3 | import org.apache.spark.sql.SparkSession 4 | 5 | trait FunctionRegistration { 6 | def registerFunctions(spark: SparkSession): Unit 7 | } 8 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/NativeFunctionRegistration.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions 2 | 3 | import org.apache.spark.sql.EncapsulationViolator.createAnalysisException 4 | import org.apache.spark.sql.SparkSession 5 | import org.apache.spark.sql.catalyst.FunctionIdentifier 6 | import org.apache.spark.sql.catalyst.analysis.FunctionRegistry 7 | import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionDescription, ExpressionInfo, RuntimeReplaceable} 8 | 9 | import scala.reflect.ClassTag 10 | import scala.util.{Failure, Success, Try} 11 | 12 | // based on Spark's FunctionRegistry @ossSpark 13 | trait NativeFunctionRegistration extends FunctionRegistration { 14 | 15 | type FunctionBuilder = Seq[Expression] => Expression 16 | 17 | def expressions: Map[String, (ExpressionInfo, FunctionBuilder)] 18 | 19 | 20 | def registerFunctions(fr: FunctionRegistry): Unit = { 21 | expressions.foreach { case (name, (info, builder)) => fr.registerFunction(FunctionIdentifier(name), info, builder) } 22 | } 23 | 24 | def registerFunctions(spark: SparkSession): Unit = { 25 | registerFunctions(spark.sessionState.functionRegistry) 26 | } 27 | 28 | /** See usage above. */ 29 | protected def expression[T <: Expression](name: String) 30 | (implicit tag: ClassTag[T]): (String, (ExpressionInfo, FunctionBuilder)) = { 31 | 32 | // For `RuntimeReplaceable`, skip the constructor with most arguments, which is the main 33 | // constructor and contains non-parameter `child` and should not be used as function builder. 34 | val constructors = if (classOf[RuntimeReplaceable].isAssignableFrom(tag.runtimeClass)) { 35 | val all = tag.runtimeClass.getConstructors 36 | val maxNumArgs = all.map(_.getParameterCount).max 37 | all.filterNot(_.getParameterCount == maxNumArgs) 38 | } else { 39 | tag.runtimeClass.getConstructors 40 | } 41 | // See if we can find a constructor that accepts Seq[Expression] 42 | val varargCtor = constructors.find(_.getParameterTypes.toSeq == Seq(classOf[Seq[_]])) 43 | val builder = (expressions: Seq[Expression]) => { 44 | if (varargCtor.isDefined) { 45 | // If there is an apply method that accepts Seq[Expression], use that one. 46 | Try(varargCtor.get.newInstance(expressions).asInstanceOf[Expression]) match { 47 | case Success(e) => e 48 | case Failure(e) => 49 | // the exception is an invocation exception. To get a meaningful message, we need the 50 | // cause. 51 | throw createAnalysisException(e.getCause.getMessage) 52 | } 53 | } else { 54 | // Otherwise, find a constructor method that matches the number of arguments, and use that. 55 | val params = Seq.fill(expressions.size)(classOf[Expression]) 56 | val f = constructors.find(_.getParameterTypes.toSeq == params).getOrElse { 57 | throw createAnalysisException(s"Invalid number of arguments for function $name") 58 | } 59 | Try(f.newInstance(expressions: _*).asInstanceOf[Expression]) match { 60 | case Success(e) => e 61 | case Failure(e) => 62 | // the exception is an invocation exception. To get a meaningful message, we need the 63 | // cause. 64 | throw createAnalysisException(e.getCause.getMessage) 65 | } 66 | } 67 | } 68 | 69 | (name, (expressionInfo[T](name), builder)) 70 | } 71 | 72 | /** 73 | * Creates an [[ExpressionInfo]] for the function as defined by expression T using the given name. 74 | */ 75 | protected def expressionInfo[T <: Expression : ClassTag](name: String): ExpressionInfo = { 76 | val clazz = scala.reflect.classTag[T].runtimeClass 77 | val df = clazz.getAnnotation(classOf[ExpressionDescription]) 78 | if (df != null) { 79 | new ExpressionInfo(clazz.getCanonicalName, null, name, df.usage(), df.extended()) 80 | } else { 81 | new ExpressionInfo(clazz.getCanonicalName, name) 82 | } 83 | } 84 | 85 | } 86 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/WithHelper.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions 2 | 3 | import org.apache.spark.sql.Column 4 | import org.apache.spark.sql.catalyst.expressions.Expression 5 | import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateFunction 6 | 7 | trait WithHelper { 8 | def withExpr(expr: Expression): Column = new Column(expr) 9 | 10 | def withAggregateFunction( 11 | func: AggregateFunction, 12 | isDistinct: Boolean = false): Column = { 13 | new Column(func.toAggregateExpression(isDistinct)) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/hll/BoundHLL.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions.hll 2 | 3 | import org.apache.spark.sql 4 | import org.apache.spark.sql.Column 5 | 6 | 7 | /** Convenience trait to use HyperLogLog functions with the same error consistently. 8 | * Spark's own [[sql.functions.approx_count_distinct()]] as well as the granular HLL 9 | * [[HLLFunctions.hll_init()]] and [[HLLFunctions.hll_init_collection()]] will be 10 | * automatically parameterized by [[BoundHLL.hllError]]. 11 | */ 12 | trait BoundHLL extends Serializable { 13 | 14 | def hllError: Double 15 | 16 | def functions: HLLFunctions 17 | 18 | def approx_count_distinct(col: Column): Column = 19 | sql.functions.approx_count_distinct(col, hllError) 20 | 21 | def approx_count_distinct(colName: String): Column = 22 | sql.functions.approx_count_distinct(colName, hllError) 23 | 24 | def hll_init(col: Column): Column = 25 | functions.hll_init(col, hllError) 26 | 27 | def hll_init(columnName: String): Column = 28 | functions.hll_init(columnName, hllError) 29 | 30 | def hll_init_collection(col: Column): Column = 31 | functions.hll_init_collection(col, hllError) 32 | 33 | def hll_init_collection(columnName: String): Column = 34 | functions.hll_init_collection(columnName, hllError) 35 | 36 | def hll_init_agg(col: Column): Column = 37 | functions.hll_init_agg(col, hllError) 38 | 39 | def hll_init_agg(columnName: String): Column = 40 | functions.hll_init_agg(columnName, hllError) 41 | 42 | def hll_init_collection_agg(col: Column): Column = 43 | functions.hll_init_collection_agg(col, hllError) 44 | 45 | def hll_init_collection_agg(columnName: String): Column = 46 | functions.hll_init_collection_agg(columnName, hllError) 47 | } 48 | 49 | object BoundHLL { 50 | /** 51 | * @param error maximum estimation error allowed 52 | * @param impl only affects the hll_* functions, not Spark's built-ins 53 | */ 54 | def apply(error: Double)(implicit impl: Implementation = null): BoundHLL = new BoundHLL { 55 | def hllError: Double = error 56 | 57 | val functions = HLLFunctions.withImpl(impl) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/hll/CardinalityHashFunction.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions.hll 2 | 3 | import org.apache.spark.sql.catalyst.InternalRow 4 | import org.apache.spark.sql.catalyst.expressions.{InterpretedHashFunction, XXH64} 5 | import org.apache.spark.sql.catalyst.util.{ArrayData, MapData} 6 | import org.apache.spark.sql.types._ 7 | import org.apache.spark.unsafe.types.UTF8String 8 | 9 | /** 10 | * Hash function for Spark data values that is suitable for cardinality counting. Unlike Spark's built-in hashing, 11 | * it differentiates between different data types and accounts for nulls. 12 | */ 13 | abstract class CardinalityHashFunction extends InterpretedHashFunction { 14 | 15 | override def hash(value: Any, dataType: DataType, seed: Long): Long = { 16 | 17 | def hashWithTag(typeTag: Long) = 18 | super.hash(value, dataType, hashLong(typeTag, seed)) 19 | 20 | value match { 21 | // change null handling to differentiate between things like Array.empty and Array(null) 22 | case null => hashLong(seed, seed) 23 | // add type tags to differentiate between values on their own or in complex types 24 | case _: Array[Byte] => hashWithTag(-3698894927619418744L) 25 | case _: UTF8String => hashWithTag(-8468821688391060513L) 26 | case _: ArrayData => hashWithTag(-1666055126678331734L) 27 | case _: MapData => hashWithTag(5587693012926141532L) 28 | case _: InternalRow => hashWithTag(-891294170547231607L) 29 | // pass through everything else (simple types) 30 | case _ => super.hash(value, dataType, seed) 31 | } 32 | } 33 | 34 | } 35 | 36 | 37 | object CardinalityXxHash64Function extends CardinalityHashFunction { 38 | 39 | override protected def hashInt(i: Int, seed: Long): Long = XXH64.hashInt(i, seed) 40 | 41 | override protected def hashLong(l: Long, seed: Long): Long = XXH64.hashLong(l, seed) 42 | 43 | override protected def hashUnsafeBytes(base: AnyRef, offset: Long, len: Int, seed: Long): Long = { 44 | XXH64.hashUnsafeBytes(base, offset, len, seed) 45 | } 46 | 47 | } 48 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/hll/HLLFunctionRegistration.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions.hll 2 | 3 | import com.swoop.alchemy.spark.expressions.NativeFunctionRegistration 4 | import org.apache.spark.sql.catalyst.expressions.ExpressionInfo 5 | 6 | object HLLFunctionRegistration extends NativeFunctionRegistration { 7 | 8 | val expressions: Map[String, (ExpressionInfo, FunctionBuilder)] = Map( 9 | expression[HyperLogLogInitSimple]("hll_init"), 10 | expression[HyperLogLogInitCollection]("hll_init_collection"), 11 | expression[HyperLogLogInitSimpleAgg]("hll_init_agg"), 12 | expression[HyperLogLogInitCollectionAgg]("hll_init_collection_agg"), 13 | expression[HyperLogLogMerge]("hll_merge"), 14 | expression[HyperLogLogRowMerge]("hll_row_merge"), 15 | expression[HyperLogLogCardinality]("hll_cardinality"), 16 | expression[HyperLogLogIntersectionCardinality]("hll_intersect_cardinality"), 17 | expression[HyperLogLogConvert]("hll_convert") 18 | ) 19 | } 20 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/hll/HLLFunctions.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions.hll 2 | 3 | import com.swoop.alchemy.spark.expressions.WithHelper 4 | import com.swoop.alchemy.spark.expressions.hll.HyperLogLogBase.{nameToImpl, resolveImplementation} 5 | import com.swoop.alchemy.spark.expressions.hll.Implementation.{AGGREGATE_KNOWLEDGE, AGKN, STREAM_LIB, STRM} 6 | import org.apache.spark.sql.EncapsulationViolator.createAnalysisException 7 | import org.apache.spark.sql.catalyst.InternalRow 8 | import org.apache.spark.sql.catalyst.analysis.TypeCheckResult 9 | import org.apache.spark.sql.catalyst.expressions.aggregate.HyperLogLogPlusPlus.validateDoubleLiteral 10 | import org.apache.spark.sql.catalyst.expressions.aggregate.TypedImperativeAggregate 11 | import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback 12 | import org.apache.spark.sql.catalyst.expressions.{BinaryExpression, ExpectsInputTypes, Expression, ExpressionDescription, Literal, UnaryExpression} 13 | import org.apache.spark.sql.catalyst.trees.UnaryLike 14 | import org.apache.spark.sql.catalyst.util.{ArrayData, MapData} 15 | import org.apache.spark.sql.functions.col 16 | import org.apache.spark.sql.types._ 17 | import org.apache.spark.sql.{Column, SparkSession} 18 | 19 | trait HyperLogLogBase { 20 | def impl: Implementation 21 | } 22 | 23 | object HyperLogLogBase { 24 | def resolveImplementation(exp: Expression): Implementation = exp match { 25 | case null => resolveImplementation 26 | case _ => nameToImpl(exp, "last argument") 27 | } 28 | 29 | def resolveImplementation(exp: String): Implementation = exp match { 30 | case null => resolveImplementation 31 | case s => nameToImpl(s) 32 | } 33 | 34 | def resolveImplementation(implicit impl: Implementation = null): Implementation = 35 | if (impl != null) 36 | impl 37 | else 38 | SparkSession.getActiveSession 39 | .flatMap(_.conf.getOption(IMPLEMENTATION_CONFIG_KEY)) 40 | .map(nameToImpl) 41 | .getOrElse(StreamLib) 42 | 43 | def nameToImpl(name: Expression, argName: String = "argument"): Implementation = name match { 44 | case Literal(s: Any, StringType) => 45 | nameToImpl(s.toString) 46 | case _ => 47 | throw createAnalysisException( 48 | s"The $argName must be a string argument (${Implementation.OPTIONS.mkString("/")}) designating one of the implementation options." 49 | ) 50 | } 51 | 52 | 53 | def nameToImpl(name: String): Implementation = name match { 54 | case STRM => StreamLib 55 | case STREAM_LIB => StreamLib 56 | case AGKN => AgKn 57 | case AGGREGATE_KNOWLEDGE => AgKn 58 | case s => throw createAnalysisException( 59 | s"The HLL implementation choice '$s' is not one of the valid options: ${Implementation.OPTIONS.mkString(", ")}" 60 | ) 61 | } 62 | } 63 | 64 | trait HyperLogLogInit extends Expression with UnaryLike[Expression] with HyperLogLogBase { 65 | def relativeSD: Double 66 | 67 | // This formula for `p` came from org.apache.spark.sql.catalyst.expressions.aggregate.HyperLogLogPlusPlus:93 68 | protected[this] val p: Int = Math.ceil(2.0d * Math.log(1.106d / relativeSD) / Math.log(2.0d)).toInt 69 | 70 | require(p >= 4, "HLL requires at least 4 bits for addressing. Use a lower error, at most 39%.") 71 | 72 | override def dataType: DataType = BinaryType 73 | 74 | def child: Expression 75 | 76 | def offer(value: Any, buffer: Instance): Instance 77 | 78 | def createHll: Instance = impl.createHll(p) 79 | 80 | def hash(value: Any, dataType: DataType, seed: Long): Long = CardinalityXxHash64Function.hash(value, dataType, seed) 81 | 82 | def hash(value: Any, dataType: DataType): Long = { 83 | // Using 0L as the seed results in a hash of 0L for empty arrays, which breaks our cardinality estimation tests due 84 | // to the improbably high number of leading zeros. Instead, use some other arbitrary "normal" long. 85 | hash(value, dataType, 6705405522910076594L) 86 | } 87 | } 88 | 89 | trait HyperLogLogSimple extends HyperLogLogInit { 90 | def offer(value: Any, buffer: Instance): Instance = { 91 | buffer.offer(hash(value, child.dataType)) 92 | } 93 | } 94 | 95 | trait HyperLogLogCollection extends HyperLogLogInit { 96 | 97 | override def checkInputDataTypes(): TypeCheckResult = 98 | child.dataType match { 99 | case _: ArrayType | _: MapType | _: NullType => TypeCheckResult.TypeCheckSuccess 100 | case _ => TypeCheckResult.TypeCheckFailure(s"$prettyName only supports array and map input.") 101 | } 102 | 103 | def offer(value: Any, buffer: Instance): Instance = { 104 | value match { 105 | case arr: ArrayData => 106 | child.dataType match { 107 | case ArrayType(et, _) => arr.foreach(et, (_, v) => { 108 | if (v != null) buffer.offer(hash(v, et)) 109 | }) 110 | case dt => throw new UnsupportedOperationException(s"Unknown DataType for ArrayData: $dt") 111 | } 112 | case map: MapData => 113 | child.dataType match { 114 | case MapType(kt, vt, _) => map.foreach(kt, vt, (k, v) => { 115 | buffer.offer(hash(v, vt, hash(k, kt))) // chain key and value hash 116 | }) 117 | case dt => throw new UnsupportedOperationException(s"Unknown DataType for MapData: $dt") 118 | } 119 | case _: NullType => // do nothing 120 | case _ => throw new UnsupportedOperationException(s"$prettyName only supports array and map input.") 121 | } 122 | buffer 123 | } 124 | } 125 | 126 | trait HyperLogLogInitSingle extends UnaryExpression with HyperLogLogInit with CodegenFallback { 127 | override def nullable: Boolean = child.nullable 128 | 129 | override def nullSafeEval(value: Any): Any = 130 | offer(value, createHll).serialize 131 | } 132 | 133 | trait HyperLogLogInitAgg extends NullableSketchAggregation with HyperLogLogInit { 134 | 135 | override def update(buffer: Option[Instance], inputRow: InternalRow): Option[Instance] = { 136 | val value = child.eval(inputRow) 137 | if (value != null) { 138 | Some(offer(value, buffer.getOrElse(createHll))) 139 | } else { 140 | buffer 141 | } 142 | } 143 | } 144 | 145 | trait NullableSketchAggregation extends TypedImperativeAggregate[Option[Instance]] with HyperLogLogBase with UnaryLike[Expression] { 146 | 147 | override def createAggregationBuffer(): Option[Instance] = None 148 | 149 | override def merge(buffer: Option[Instance], other: Option[Instance]): Option[Instance] = 150 | (buffer, other) match { 151 | case (Some(a), Some(b)) => 152 | Some(a.merge(b)) 153 | case (a, None) => a 154 | case (None, b) => b 155 | case _ => None 156 | } 157 | 158 | override def eval(buffer: Option[Instance]): Any = 159 | buffer.map(_.serialize).orNull 160 | 161 | def child: Expression 162 | 163 | override def nullable: Boolean = child.nullable 164 | 165 | override def serialize(hll: Option[Instance]): Array[Byte] = 166 | hll.map(_.serialize).orNull 167 | 168 | override def deserialize(bytes: Array[Byte]): Option[Instance] = 169 | if (bytes == null) None else Option(impl.deserialize(bytes)) 170 | } 171 | 172 | 173 | /** 174 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 175 | * 176 | * The underlying [[Implementation]] can be changed by setting a [[IMPLEMENTATION_CONFIG_KEY configuration value]] 177 | * in the [[SparkSession]] to the implementation name, or passing it as an argument. 178 | * 179 | * This function creates a composable "sketch" for each input row. 180 | * All expression values are treated as simple values. 181 | * 182 | * @param child to estimate the cardinality of. 183 | * @param relativeSD defines the maximum estimation error allowed 184 | * @param impl HLL implementation to use 185 | */ 186 | @ExpressionDescription( 187 | usage = 188 | """ 189 | _FUNC_(expr[, relativeSD[, implName]]) - Returns the composable "sketch" by HyperLogLog++. 190 | `relativeSD` defines the maximum estimation error allowed. 191 | """) 192 | case class HyperLogLogInitSimple( 193 | override val child: Expression, 194 | override val relativeSD: Double = 0.05, 195 | override val impl: Implementation = resolveImplementation) 196 | extends HyperLogLogInitSingle with HyperLogLogSimple { 197 | 198 | def this(child: Expression) = this(child, relativeSD = 0.05) 199 | 200 | def this(child: Expression, relativeSD: Expression) = { 201 | this( 202 | child = child, 203 | relativeSD = validateDoubleLiteral(relativeSD) 204 | ) 205 | } 206 | 207 | def this(child: Expression, relativeSD: Expression, implName: Expression) = { 208 | this( 209 | child = child, 210 | relativeSD = validateDoubleLiteral(relativeSD), 211 | impl = resolveImplementation(implName) 212 | ) 213 | } 214 | 215 | override def prettyName: String = "hll_init" 216 | 217 | override protected def withNewChildInternal(newChild: Expression): Expression = copy(child = newChild) 218 | } 219 | 220 | 221 | /** 222 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 223 | * 224 | * The underlying [[Implementation]] can be changed by setting a [[IMPLEMENTATION_CONFIG_KEY configuration value]] 225 | * in the [[SparkSession]] to the implementation name, or passing it as an argument. 226 | * 227 | * This version combines all input in each aggregate group into a single "sketch". 228 | * All expression values treated as simple values. 229 | * 230 | * @param child to estimate the cardinality of 231 | * @param relativeSD defines the maximum estimation error allowed 232 | * @param impl HLL implementation to use 233 | */ 234 | @ExpressionDescription( 235 | usage = 236 | """ 237 | _FUNC_(expr[, relativeSD[, implName]]) - Returns the composable "sketch" by HyperLogLog++. 238 | `relativeSD` defines the maximum estimation error allowed. 239 | """) 240 | case class HyperLogLogInitSimpleAgg( 241 | override val child: Expression, 242 | override val relativeSD: Double = 0.05, 243 | override val impl: Implementation = resolveImplementation, 244 | override val mutableAggBufferOffset: Int = 0, 245 | override val inputAggBufferOffset: Int = 0) 246 | extends HyperLogLogInitAgg with HyperLogLogSimple { 247 | 248 | def this(child: Expression) = this(child, relativeSD = 0.05) 249 | 250 | def this(child: Expression, relativeSD: Expression) = { 251 | this( 252 | child = child, 253 | relativeSD = validateDoubleLiteral(relativeSD)) 254 | } 255 | 256 | def this(child: Expression, relativeSD: Expression, implName: Expression) = { 257 | this( 258 | child = child, 259 | relativeSD = validateDoubleLiteral(relativeSD), 260 | impl = resolveImplementation(implName) 261 | ) 262 | } 263 | 264 | override def withNewMutableAggBufferOffset(newOffset: Int): HyperLogLogInitSimpleAgg = 265 | copy(mutableAggBufferOffset = newOffset) 266 | 267 | override def withNewInputAggBufferOffset(newOffset: Int): HyperLogLogInitSimpleAgg = 268 | copy(inputAggBufferOffset = newOffset) 269 | 270 | override def prettyName: String = "hll_init_agg" 271 | 272 | override protected def withNewChildInternal(newChild: Expression): Expression = copy(child = newChild) 273 | } 274 | 275 | /** 276 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 277 | * 278 | * The underlying [[Implementation]] can be changed by setting a [[IMPLEMENTATION_CONFIG_KEY configuration value]] 279 | * in the [[SparkSession]] to the implementation name, or passing it as an argument. 280 | * 281 | * This version creates a composable "sketch" for each input row. 282 | * Expression must be is a collection (Array, Map), and collection elements are treated as individual values. 283 | * 284 | * @param child to estimate the cardinality of. 285 | * @param relativeSD defines the maximum estimation error allowed 286 | * @param impl HLL implementation to use 287 | */ 288 | @ExpressionDescription( 289 | usage = 290 | """ 291 | _FUNC_(expr[, relativeSD[, implName]]) - Returns the composable "sketch" by HyperLogLog++. 292 | `relativeSD` defines the maximum estimation error allowed. 293 | """) 294 | case class HyperLogLogInitCollection( 295 | override val child: Expression, 296 | override val relativeSD: Double = 0.05, 297 | override val impl: Implementation = resolveImplementation) 298 | extends HyperLogLogInitSingle with HyperLogLogCollection { 299 | 300 | def this(child: Expression) = this(child, relativeSD = 0.05) 301 | 302 | def this(child: Expression, relativeSD: Expression) = { 303 | this( 304 | child = child, 305 | relativeSD = validateDoubleLiteral(relativeSD) 306 | ) 307 | } 308 | 309 | def this(child: Expression, relativeSD: Expression, implName: Expression) = { 310 | this( 311 | child = child, 312 | relativeSD = validateDoubleLiteral(relativeSD), 313 | impl = resolveImplementation(implName) 314 | ) 315 | } 316 | 317 | 318 | override def prettyName: String = "hll_init_collection" 319 | 320 | override protected def withNewChildInternal(newChild: Expression): Expression = copy(child = newChild) 321 | } 322 | 323 | 324 | /** 325 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 326 | * 327 | * The underlying [[Implementation]] can be changed by setting a [[IMPLEMENTATION_CONFIG_KEY configuration value]] 328 | * in the [[SparkSession]] to the implementation name, or passing it as an argument. 329 | * 330 | * This version combines all input in each aggregate group into a a single "sketch". 331 | * If `expr` is a collection (Array, Map), collection elements are treated as individual values. 332 | * 333 | * @param child to estimate the cardinality of 334 | * @param relativeSD defines the maximum estimation error allowed 335 | * @param impl HLL implementation to use 336 | */ 337 | @ExpressionDescription( 338 | usage = 339 | """ 340 | _FUNC_(expr[, relativeSD[, implName]]) - Returns the composable "sketch" by HyperLogLog++. 341 | `relativeSD` defines the maximum estimation error allowed. 342 | """) 343 | case class HyperLogLogInitCollectionAgg( 344 | override val child: Expression, 345 | override val relativeSD: Double = 0.05, 346 | override val impl: Implementation = resolveImplementation, 347 | override val mutableAggBufferOffset: Int = 0, 348 | override val inputAggBufferOffset: Int = 0) 349 | extends HyperLogLogInitAgg with HyperLogLogCollection { 350 | 351 | def this(child: Expression) = this(child, relativeSD = 0.05) 352 | 353 | def this(child: Expression, relativeSD: Expression) = { 354 | this( 355 | child, 356 | validateDoubleLiteral(relativeSD) 357 | ) 358 | } 359 | 360 | def this(child: Expression, relativeSD: Expression, implName: Expression) = { 361 | this( 362 | child, 363 | validateDoubleLiteral(relativeSD), 364 | resolveImplementation(implName) 365 | ) 366 | } 367 | 368 | override def withNewMutableAggBufferOffset(newOffset: Int): HyperLogLogInitCollectionAgg = 369 | copy(mutableAggBufferOffset = newOffset) 370 | 371 | override def withNewInputAggBufferOffset(newOffset: Int): HyperLogLogInitCollectionAgg = 372 | copy(inputAggBufferOffset = newOffset) 373 | 374 | override def prettyName: String = "hll_init_collection_agg" 375 | 376 | override protected def withNewChildInternal(newChild: Expression): Expression = copy(child = newChild) 377 | } 378 | 379 | 380 | /** 381 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 382 | * 383 | * The underlying [[Implementation]] can be changed by setting a [[IMPLEMENTATION_CONFIG_KEY configuration value]] 384 | * in the [[SparkSession]] to the implementation name, or passing it as an argument. 385 | * 386 | * This version aggregates the "sketches" into a single merged "sketch" that represents the union of the constituents. 387 | * 388 | * @param child "sketch" to merge 389 | * @param impl HLL implementation to use 390 | */ 391 | @ExpressionDescription( 392 | usage = 393 | """ 394 | _FUNC_(expr[, implName]) - Returns the merged HLL sketch. 395 | """) 396 | case class HyperLogLogMerge( 397 | child: Expression, 398 | override val impl: Implementation = resolveImplementation, 399 | override val mutableAggBufferOffset: Int = 0, 400 | override val inputAggBufferOffset: Int = 0) 401 | extends NullableSketchAggregation { 402 | 403 | def this(child: Expression) = this(child, resolveImplementation) 404 | 405 | def this(child: Expression, implName: Expression) = this(child, resolveImplementation(implName)) 406 | 407 | override def update(buffer: Option[Instance], inputRow: InternalRow): Option[Instance] = { 408 | val value = child.eval(inputRow) 409 | if (value != null) { 410 | val hll = value match { 411 | case b: Array[Byte] => impl.deserialize(b) 412 | case _ => throw new IllegalStateException(s"$prettyName only supports Array[Byte]") 413 | } 414 | buffer.map(_.merge(hll)) 415 | .orElse(Option(hll)) 416 | } else { 417 | buffer 418 | } 419 | } 420 | 421 | override def checkInputDataTypes(): TypeCheckResult = { 422 | child.dataType match { 423 | case BinaryType => TypeCheckResult.TypeCheckSuccess 424 | case _ => TypeCheckResult.TypeCheckFailure(s"$prettyName only supports binary input") 425 | } 426 | } 427 | 428 | override def dataType: DataType = BinaryType 429 | 430 | override def withNewMutableAggBufferOffset(newOffset: Int): HyperLogLogMerge = 431 | copy(mutableAggBufferOffset = newOffset) 432 | 433 | override def withNewInputAggBufferOffset(newOffset: Int): HyperLogLogMerge = 434 | copy(inputAggBufferOffset = newOffset) 435 | 436 | override def prettyName: String = "hll_merge" 437 | 438 | override protected def withNewChildInternal(newChild: Expression): Expression = copy(child = newChild) 439 | } 440 | 441 | /** 442 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 443 | * 444 | * The underlying [[Implementation]] can be changed by setting a [[IMPLEMENTATION_CONFIG_KEY configuration value]] 445 | * in the [[SparkSession]] to the implementation name, or passing it as an argument. 446 | * 447 | * This version merges multiple "sketches" in one row into a single field. 448 | * 449 | * @see [[HyperLogLogMerge]] 450 | * @param children "sketch" row fields to merge 451 | * @param impl HLL implementation to use 452 | */ 453 | @ExpressionDescription( 454 | usage = 455 | """ 456 | _FUNC_(expr[, implName]) - Returns the merged HLL sketch. 457 | """) 458 | case class HyperLogLogRowMerge( 459 | override val children: Seq[Expression], 460 | override val impl: Implementation = resolveImplementation) 461 | extends Expression with ExpectsInputTypes with CodegenFallback with HyperLogLogBase { 462 | 463 | def this(children: Seq[Expression]) = this({ 464 | assert(children.nonEmpty, s"function requires at least one argument") 465 | children 466 | }.last match { 467 | case Literal(_: Any, StringType) => children.init 468 | case _ => children 469 | }, 470 | children.last match { 471 | case Literal(s: Any, StringType) => resolveImplementation(s.toString) 472 | case _ => resolveImplementation 473 | } 474 | ) 475 | 476 | require(children.nonEmpty, s"$prettyName requires at least one argument.") 477 | 478 | override def inputTypes: Seq[DataType] = Seq.fill(children.size)(BinaryType) 479 | 480 | override def dataType: DataType = BinaryType 481 | 482 | override def nullable: Boolean = children.forall(_.nullable) 483 | 484 | override def foldable: Boolean = children.forall(_.foldable) 485 | 486 | override def eval(input: InternalRow): Any = { 487 | val flatInputs = children.flatMap(_.eval(input) match { 488 | case null => None 489 | case b: Array[Byte] => Some(impl.deserialize(b)) 490 | case _ => throw new IllegalStateException(s"$prettyName only supports Array[Byte]") 491 | }) 492 | 493 | if (flatInputs.isEmpty) null 494 | else { 495 | val acc = flatInputs.head 496 | flatInputs.tail.foreach(acc.merge) 497 | acc.serialize 498 | } 499 | } 500 | 501 | override def prettyName: String = "hll_row_merge" 502 | 503 | override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Expression = 504 | copy(children = newChildren) 505 | } 506 | 507 | /** 508 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 509 | * 510 | * The underlying [[Implementation]] can be changed by setting a [[IMPLEMENTATION_CONFIG_KEY configuration value]] 511 | * in the [[SparkSession]] to the implementation name, or passing it as an argument. 512 | * 513 | * Returns the estimated cardinality of an HLL "sketch" 514 | * 515 | * @param child HLL "sketch" 516 | * @param impl HLL implementation to use 517 | */ 518 | @ExpressionDescription( 519 | usage = 520 | """ 521 | _FUNC_(sketch[, implName]) - Returns the estimated cardinality of the binary representation produced by HyperLogLog++. 522 | """) 523 | case class HyperLogLogCardinality( 524 | override val child: Expression, 525 | override val impl: Implementation = resolveImplementation 526 | ) extends UnaryExpression with ExpectsInputTypes with CodegenFallback with HyperLogLogBase { 527 | 528 | def this(child: Expression) = this(child, resolveImplementation) 529 | 530 | def this(child: Expression, implName: Expression) = this(child, resolveImplementation(implName)) 531 | 532 | override def inputTypes: Seq[DataType] = Seq(BinaryType) 533 | 534 | override def dataType: DataType = LongType 535 | 536 | override def nullSafeEval(input: Any): Long = { 537 | val data = input.asInstanceOf[Array[Byte]] 538 | impl.deserialize(data).cardinality 539 | } 540 | 541 | override def prettyName: String = "hll_cardinality" 542 | 543 | override protected def withNewChildInternal(newChild: Expression): Expression = copy(child = newChild) 544 | } 545 | 546 | /** 547 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 548 | * 549 | * The underlying [[Implementation]] can be changed by setting a [[IMPLEMENTATION_CONFIG_KEY configuration value]] 550 | * in the [[SparkSession]] to the implementation name, or passing it as an argument. 551 | * 552 | * Computes a merged (unioned) sketch and uses the fact that |A intersect B| = (|A| + |B|) - |A union B| to estimate 553 | * the intersection cardinality of two HLL "sketches". 554 | * 555 | * @note Error in the cardinality of the intersection is determined by the cardinality of the constituent sketches, not 556 | * the cardinality of the intersection itself (i.e. it may be much larger than naively expected) - 557 | * https://research.neustar.biz/2012/12/17/hll-intersections-2/ 558 | * 559 | * @see HyperLogLogRowMerge 560 | * @see HyperLogLogCardinality 561 | * @param left HLL "sketch" 562 | * @param right HLL "sketch" 563 | * @param impl HLL implementation to use 564 | * @return the estimated intersection cardinality (0 if one sketch is null, but null if both are) 565 | */ 566 | @ExpressionDescription( 567 | usage = 568 | """ 569 | _FUNC_(sketchL, sketchR[, implName]) - Returns the estimated intersection cardinality of the binary representations produced by 570 | HyperLogLog. Computes a merged (unioned) sketch and uses the fact that |A intersect B| = (|A| + |B|) - |A union B|. 571 | Returns null if both sketches are null, but 0 if only one is 572 | """) 573 | case class HyperLogLogIntersectionCardinality( 574 | override val left: Expression, 575 | override val right: Expression, 576 | override val impl: Implementation = resolveImplementation 577 | ) extends BinaryExpression with ExpectsInputTypes with CodegenFallback with HyperLogLogBase { 578 | 579 | def this(left: Expression, right: Expression) = this(left, right, resolveImplementation) 580 | 581 | def this(left: Expression, right: Expression, implName: Expression) = 582 | this(left, right, resolveImplementation(implName)) 583 | 584 | override def inputTypes: Seq[DataType] = Seq(BinaryType, BinaryType) 585 | 586 | override def dataType: DataType = LongType 587 | 588 | override def nullable: Boolean = left.nullable && right.nullable 589 | 590 | override def eval(input: InternalRow): Any = { 591 | val leftValue = left.eval(input) 592 | val rightValue = right.eval(input) 593 | 594 | if (leftValue != null && rightValue != null) { 595 | val leftHLL = impl.deserialize(leftValue.asInstanceOf[Array[Byte]]) 596 | val rightHLL = impl.deserialize(rightValue.asInstanceOf[Array[Byte]]) 597 | 598 | val leftCount = leftHLL.cardinality 599 | val rightCount = rightHLL.cardinality 600 | leftHLL.merge(rightHLL) 601 | val unionCount = leftHLL.cardinality 602 | 603 | // guarantee a non-negative result despite the approximate nature of the counts 604 | math.max((leftCount + rightCount) - unionCount, 0L) 605 | } else { 606 | if (leftValue != null || rightValue != null) { 607 | 0L 608 | } else { 609 | null 610 | } 611 | } 612 | } 613 | 614 | override def prettyName: String = "hll_intersect_cardinality" 615 | 616 | override protected def withNewChildrenInternal(newLeft: Expression, newRight: Expression): Expression = 617 | copy(left = newLeft, right = newRight) 618 | } 619 | 620 | 621 | /** 622 | * HyperLogLog (HLL) is a state of the art cardinality estimation algorithm with multiple implementations available. 623 | * 624 | * This function converts between implementations. Currently the only conversion supported is from the StreamLib 625 | * implementation (`"STRM"` or `"STREAM_LIB"`) to the Aggregate Knowledge implementation (`"AGKN"` or 626 | * `"AGGREGATE_KNOWLEDGE"`). 627 | * 628 | * @note Converted values CANNOT be merged with unconverted ("native") values of type that they've been converted to. 629 | * This is because the different implementations use different parts of the hashed valued to construct the HLL 630 | * (effectively equivalent to using different hash functions). 631 | * @param child HLL "sketch" 632 | * @param from string name of implementation type of the given sketch 633 | * @param to string name of implementation type to convert the given sketch to 634 | */ 635 | 636 | @ExpressionDescription( 637 | usage = 638 | """ 639 | _FUNC_(sketch, implNameFrom, implNameTo) - Converts between implementations. 640 | """) 641 | case class HyperLogLogConvert( 642 | override val child: Expression, 643 | from: Implementation, 644 | to: Implementation 645 | ) extends UnaryExpression with CodegenFallback with ExpectsInputTypes { 646 | 647 | def this(hll: Expression, fromName: Expression, toName: Expression) = { 648 | this(hll, nameToImpl(fromName, "second argument"), nameToImpl(toName, "third argument")) 649 | } 650 | 651 | 652 | def this(hll: Expression, fromName: String, toName: String) = { 653 | this(hll, nameToImpl(fromName), nameToImpl(toName)) 654 | } 655 | 656 | override def dataType: DataType = BinaryType 657 | 658 | override def inputTypes: Seq[DataType] = Seq(BinaryType) 659 | 660 | override def nullSafeEval(hll: Any): Any = (from, to) match { 661 | case (StreamLib, AgKn) => strmToAgkn(hll.asInstanceOf[Array[Byte]]) 662 | case _ => throw new IllegalArgumentException( 663 | "HLL conversion is currently only supported from STREAM_LIB to AGGREGATE_KNOWLEDGE" 664 | ) 665 | } 666 | 667 | override def prettyName: String = "hll_convert" 668 | 669 | override protected def withNewChildInternal(newChild: Expression): Expression = copy(child = newChild) 670 | } 671 | 672 | object functions extends HLLFunctions { 673 | val impl: Implementation = null 674 | } 675 | 676 | trait HLLFunctions extends WithHelper { 677 | 678 | implicit def impl: Implementation 679 | 680 | def hll_init(e: Column, relativeSD: Double, implName: String = null): Column = withExpr { 681 | HyperLogLogInitSimple(e.expr, relativeSD, resolveImplementation(implName)) 682 | } 683 | 684 | def hll_init(columnName: String, relativeSD: Double): Column = 685 | hll_init(col(columnName), relativeSD) 686 | 687 | def hll_init(columnName: String, relativeSD: Double, implName: String): Column = 688 | hll_init(col(columnName), relativeSD, implName) 689 | 690 | def hll_init(e: Column): Column = withExpr { 691 | HyperLogLogInitSimple(e.expr, impl = resolveImplementation) 692 | } 693 | 694 | def hll_init(columnName: String): Column = 695 | hll_init(col(columnName)) 696 | 697 | def hll_init_collection(e: Column, relativeSD: Double, implName: String = null): Column = withExpr { 698 | HyperLogLogInitCollection(e.expr, relativeSD, resolveImplementation(implName)) 699 | } 700 | 701 | def hll_init_collection(columnName: String, relativeSD: Double): Column = 702 | hll_init_collection(col(columnName), relativeSD) 703 | 704 | def hll_init_collection(columnName: String, relativeSD: Double, implName: String): Column = 705 | hll_init_collection(col(columnName), relativeSD, implName) 706 | 707 | def hll_init_collection(e: Column): Column = withExpr { 708 | HyperLogLogInitCollection(e.expr, impl = resolveImplementation) 709 | } 710 | 711 | def hll_init_collection(columnName: String): Column = 712 | hll_init_collection(col(columnName)) 713 | 714 | def hll_init_agg(e: Column, relativeSD: Double, implName: String = null): Column = withAggregateFunction { 715 | HyperLogLogInitSimpleAgg(e.expr, relativeSD, resolveImplementation(implName)) 716 | } 717 | 718 | def hll_init_agg(columnName: String, relativeSD: Double): Column = 719 | hll_init_agg(col(columnName), relativeSD) 720 | 721 | def hll_init_agg(columnName: String, relativeSD: Double, implName: String): Column = 722 | hll_init_agg(col(columnName), relativeSD, implName) 723 | 724 | def hll_init_agg(e: Column): Column = withAggregateFunction { 725 | HyperLogLogInitSimpleAgg(e.expr, impl = resolveImplementation) 726 | } 727 | 728 | def hll_init_agg(columnName: String): Column = 729 | hll_init_agg(col(columnName)) 730 | 731 | def hll_init_collection_agg(e: Column, relativeSD: Double, implName: String = null): Column = withAggregateFunction { 732 | HyperLogLogInitCollectionAgg(e.expr, relativeSD, resolveImplementation(implName)) 733 | } 734 | 735 | def hll_init_collection_agg(columnName: String, relativeSD: Double): Column = 736 | hll_init_collection_agg(col(columnName), relativeSD) 737 | 738 | def hll_init_collection_agg(columnName: String, relativeSD: Double, implName: String): Column = 739 | hll_init_collection_agg(col(columnName), relativeSD, implName) 740 | 741 | def hll_init_collection_agg(e: Column): Column = withAggregateFunction { 742 | HyperLogLogInitCollectionAgg(e.expr, impl = resolveImplementation) 743 | } 744 | 745 | def hll_init_collection_agg(columnName: String): Column = 746 | hll_init_collection_agg(col(columnName)) 747 | 748 | def hll_merge(e: Column, implName: String = null): Column = withAggregateFunction { 749 | HyperLogLogMerge(e.expr, resolveImplementation(implName)) 750 | } 751 | 752 | def hll_merge(columnName: String): Column = 753 | hll_merge(col(columnName)) 754 | 755 | def hll_merge(columnName: String, implName: String): Column = 756 | hll_merge(col(columnName), implName) 757 | 758 | def hll_row_merge(es: Column*): Column = withExpr { 759 | HyperLogLogRowMerge(es.map(_.expr), resolveImplementation) 760 | } 761 | 762 | def hll_row_merge(implName: String, es: Column*): Column = withExpr { 763 | HyperLogLogRowMerge(es.map(_.expr), resolveImplementation(implName)) 764 | } 765 | 766 | def hll_cardinality(e: Column, implName: String = null): Column = withExpr { 767 | HyperLogLogCardinality(e.expr, resolveImplementation(implName)) 768 | } 769 | 770 | def hll_cardinality(columnName: String): Column = 771 | hll_cardinality(col(columnName)) 772 | 773 | def hll_cardinality(columnName: String, implName: String): Column = 774 | hll_cardinality(col(columnName), implName) 775 | 776 | def hll_intersect_cardinality(l: Column, r: Column, implName: String = null): Column = withExpr { 777 | HyperLogLogIntersectionCardinality(l.expr, r.expr, resolveImplementation(implName)) 778 | } 779 | 780 | def hll_intersect_cardinality(leftColumnName: String, rightColumnName: String): Column = 781 | hll_intersect_cardinality(col(leftColumnName), col(rightColumnName)) 782 | 783 | def hll_intersect_cardinality(leftColumnName: String, rightColumnName: String, implName: String): Column = 784 | hll_intersect_cardinality(col(leftColumnName), col(rightColumnName), implName) 785 | 786 | def hll_convert(hll: Column, from: String, to: String): Column = withExpr { 787 | HyperLogLogConvert(hll.expr, nameToImpl(from), nameToImpl(to)) 788 | } 789 | 790 | def hll_convert(columnName: String, from: String, to: String): Column = 791 | hll_convert(col(columnName), from, to) 792 | } 793 | 794 | object HLLFunctions { 795 | def withImpl(hllImpl: Implementation): HLLFunctions = new HLLFunctions { 796 | override implicit def impl: Implementation = hllImpl 797 | } 798 | } 799 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/hll/Implementation.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions.hll 2 | 3 | import com.clearspring.analytics.stream 4 | import com.clearspring.analytics.stream.cardinality.{HyperLogLogPlus, RegisterSet} 5 | import net.agkn.hll.HLL 6 | import net.agkn.hll.util.BitVector 7 | 8 | /** 9 | * Wrapper for instances of different HLL implementations 10 | * 11 | * @note `offer`` and `merge`` may just mutate and return the same underlying HLL instance 12 | */ 13 | sealed trait Instance { 14 | def offer(hashedValue: Long): Instance 15 | 16 | def merge(other: Instance): Instance 17 | 18 | def serialize: Array[Byte] 19 | 20 | def cardinality: Long 21 | } 22 | 23 | class AgKnInstance(val hll: net.agkn.hll.HLL) extends Instance { 24 | override def offer(hashedValue: Long): Instance = { 25 | hll.addRaw(hashedValue) 26 | this 27 | } 28 | 29 | override def merge(other: Instance): Instance = { 30 | if (other.isInstanceOf[AgKnInstance]) { 31 | hll.union(other.asInstanceOf[AgKnInstance].hll) 32 | this 33 | } else 34 | throw new IllegalArgumentException(s"Type of HLL to merge does not match this HLL (${hll.getClass.getName})") 35 | } 36 | 37 | override def serialize: Array[Byte] = hll.toBytes 38 | 39 | def cardinality: Long = hll.cardinality() 40 | } 41 | 42 | class StreamLibInstance(val hll: stream.cardinality.HyperLogLogPlus) extends Instance { 43 | override def offer(hashedValue: Long): Instance = { 44 | hll.offerHashed(hashedValue) 45 | this 46 | } 47 | 48 | override def merge(other: Instance): Instance = { 49 | if (other.isInstanceOf[StreamLibInstance]) { 50 | hll.addAll(other.asInstanceOf[StreamLibInstance].hll) 51 | this 52 | } else 53 | throw new IllegalArgumentException(s"Type of HLL to merge does not match this HLL (${hll.getClass.getName})") 54 | } 55 | 56 | override def serialize: Array[Byte] = hll.getBytes 57 | 58 | def cardinality: Long = hll.cardinality() 59 | } 60 | 61 | /** 62 | * Option for the underlying HLL implementation used by all functions 63 | */ 64 | trait Implementation { 65 | def createHll(p: Int): Instance 66 | 67 | def deserialize(bytes: Array[Byte]): Instance 68 | } 69 | 70 | object Implementation { 71 | val AGKN = "AGKN" 72 | val STRM = "STRM" 73 | val AGGREGATE_KNOWLEDGE = "AGGREGATE_KNOWLEDGE" 74 | val STREAM_LIB = "STREAM_LIB" 75 | val OPTIONS = Seq(AGKN, STRM, AGGREGATE_KNOWLEDGE, STREAM_LIB) 76 | 77 | 78 | // TODO @peter debugging tools, remove: 79 | def registerSetToSeq(r: RegisterSet): Seq[Int] = 80 | for (i <- 0 until r.count) yield r.get(i) 81 | 82 | def bitVectorToSeq(b: BitVector): Seq[Long] = { 83 | val i = b.registerIterator() 84 | new Iterator[Long] { 85 | def hasNext = i.hasNext 86 | 87 | def next = i.next() 88 | }.toArray 89 | } 90 | } 91 | 92 | case object AgKn extends Implementation { 93 | override def createHll(p: Int) = new AgKnInstance(new HLL(p, 5)) 94 | 95 | override def deserialize(bytes: Array[Byte]) = new AgKnInstance(HLL.fromBytes(bytes)) 96 | } 97 | 98 | case object StreamLib extends Implementation { 99 | override def createHll(p: Int) = new StreamLibInstance(new HyperLogLogPlus(p, 0)) 100 | 101 | override def deserialize(bytes: Array[Byte]) = new StreamLibInstance(HyperLogLogPlus.Builder.build(bytes)) 102 | } 103 | 104 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/spark/expressions/hll/package.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions 2 | 3 | import java.io.{ByteArrayInputStream, DataInputStream} 4 | 5 | import com.clearspring.analytics.stream 6 | import com.clearspring.analytics.stream.cardinality.RegisterSet 7 | import com.clearspring.analytics.util.{Bits, Varint} 8 | import net.agkn.hll.HLL 9 | import net.agkn.hll.serialization.{HLLMetadata, SchemaVersionOne} 10 | import net.agkn.hll.util.BitVector 11 | 12 | package object hll { 13 | val IMPLEMENTATION_CONFIG_KEY = "com.swoop.alchemy.hll.implementation" 14 | 15 | def strmToAgkn(from: stream.cardinality.HyperLogLogPlus): net.agkn.hll.HLL = { 16 | HLL.fromBytes(strmToAgkn(from.getBytes)) 17 | } 18 | 19 | def strmToAgkn(from: Array[Byte]): Array[Byte] = { 20 | var bais = new ByteArrayInputStream(from) 21 | var oi = new DataInputStream(bais) 22 | val version = oi.readInt 23 | // the new encoding scheme includes a version field 24 | // that is always negative. 25 | if (version >= 0) { 26 | throw new UnsupportedOperationException("conversion is only supported for the new style encoding scheme") 27 | } 28 | 29 | val p = Varint.readUnsignedVarInt(oi) 30 | val sp = Varint.readUnsignedVarInt(oi) 31 | val formatType = Varint.readUnsignedVarInt(oi) 32 | if (formatType != 0) { 33 | throw new UnsupportedOperationException("conversion is only supported for non-sparse representation") 34 | } 35 | 36 | val size = Varint.readUnsignedVarInt(oi) 37 | val longArrayBytes = new Array[Byte](size) 38 | oi.readFully(longArrayBytes) 39 | val registerSet = new RegisterSet(Math.pow(2, p).toInt, Bits.getBits(longArrayBytes)) 40 | val bitVector = new BitVector(RegisterSet.REGISTER_SIZE, registerSet.count) 41 | 42 | for (i <- 0 until registerSet.count) bitVector.setRegister(i, registerSet.get(i)) 43 | val schemaVersion = new SchemaVersionOne 44 | val serializer = 45 | schemaVersion.getSerializer(net.agkn.hll.HLLType.FULL, RegisterSet.REGISTER_SIZE, registerSet.count) 46 | bitVector.getRegisterContents(serializer) 47 | var outBytes = serializer.getBytes 48 | 49 | val metadata = new HLLMetadata( 50 | schemaVersion.schemaVersionNumber(), 51 | net.agkn.hll.HLLType.FULL, 52 | p, 53 | RegisterSet.REGISTER_SIZE, 54 | 0, 55 | true, 56 | false, 57 | false 58 | ) 59 | schemaVersion.writeMetadata(outBytes, metadata) 60 | outBytes 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/com/swoop/alchemy/utils/AnyExtensions.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.utils 2 | 3 | /** 4 | * Convenience methods for all types 5 | */ 6 | object AnyExtensions { 7 | 8 | /** Sugar for applying functions in a method chain. */ 9 | implicit class TransformOps[A](val underlying: A) extends AnyVal { 10 | 11 | /** Applies a transformer function in a method chain. 12 | * 13 | * @param f function to apply 14 | * @tparam B the return type 15 | * @return the result of applying `f` on `underlying`. 16 | */ 17 | @inline def transform[B](f: A => B): B = 18 | f(underlying) 19 | 20 | /** Conditionally applies a transformer function in a method chain. 21 | * Use this instead of [[transformWhen()]] when the predicate requires the value of `underlying`. 22 | * 23 | * @param predicate predicate to evaluate to determine if the function should be applied 24 | * @tparam B the return type of the function 25 | * @return `underlying` if the predicate evaluates to `false` or the result of function application. 26 | */ 27 | @inline def transformIf[B <: A](predicate: A => Boolean)(f: A => B): A = 28 | if (predicate(underlying)) 29 | f(underlying) 30 | else underlying 31 | 32 | /** Conditionally applies a transformer function in a method chain. 33 | * Use this instead of [[transformIf()]] when the condition does not require the value of `underlying`. 34 | * 35 | * @param condition condition to evaluate to determine if the function should be applied 36 | * @tparam B the return type of the function 37 | * @return `underlying` if the expression evaluates to `false` or the result of function application. 38 | */ 39 | @inline def transformWhen[B <: A](condition: => Boolean)(f: A => B): A = 40 | if (condition) 41 | f(underlying) 42 | else underlying 43 | 44 | } 45 | 46 | /** Sugar for creating side-effects in method chains. */ 47 | implicit class TapOps[A](val underlying: A) extends AnyVal { 48 | 49 | /** Applies a function for its side-effect as part of a method chain. 50 | * Inspired by Ruby's `Object#tap`. 51 | * 52 | * @param f side-effect function to call 53 | * @tparam B the return type of the function; ignored 54 | * @return `this` 55 | */ 56 | @inline def tap[B](f: A => B): A = { 57 | f(underlying) 58 | underlying 59 | } 60 | 61 | /** Conditionally applies a function for its side-effect as part of a method chain. 62 | * Use this instead of [[tapWhen()]] when the predicate requires the value of `underlying`. 63 | * 64 | * @param predicate predicate to evaluate to determine if the side-effect should be invoked 65 | * @param f side-effect function to call 66 | * @tparam B the return type of the function; ignored 67 | * @return `this` 68 | */ 69 | @inline def tapIf[B](predicate: A => Boolean)(f: A => B): A = { 70 | if (predicate(underlying)) 71 | f(underlying) 72 | underlying 73 | } 74 | 75 | /** Conditionally applies a function for its side-effect as part of a method chain. 76 | * Use this instead of [[tapIf()]] when the condition does not require the value of `underlying`. 77 | * 78 | * @param condition condition to evaluate to determine if the side-effect should be invoked 79 | * @param f side-effect function to call 80 | * @tparam B the return type of the function; ignored 81 | * @return `this` 82 | */ 83 | @inline def tapWhen[B](condition: => Boolean)(f: A => B): A = { 84 | if (condition) 85 | f(underlying) 86 | underlying 87 | } 88 | } 89 | 90 | /** Sugar for simple debugging/reporting by printing in a method chain. */ 91 | implicit class PrintOps[A](val underlying: A) extends AnyVal { 92 | 93 | /** Taps and prints the object in a method chain. 94 | * Shorthand for `.tap(println)`. 95 | * 96 | * @return `underlying` 97 | */ 98 | def tapp: A = 99 | underlying.tap(println) 100 | 101 | /** Prints a value as a side effect. 102 | * 103 | * @param v the value to print 104 | * @return `underlying` 105 | */ 106 | def print[B](v: B): A = 107 | underlying.tap((_: A) => println(v)) 108 | 109 | /** Conditionally taps and prints the object in a method chain. 110 | * Use this instead of [[printWhen()]] when the predicate requires the value of `underlying`. 111 | * 112 | * @param predicate predicate to evaluate to determine if the underlying value should be printed 113 | * @return `this` 114 | */ 115 | def printIf(predicate: A => Boolean): A = 116 | underlying.tapIf(predicate)(println) 117 | 118 | /** Conditionally prints a value as a side effect. 119 | * Use this instead of [[printWhen()]] when the predicate requires the value of `underlying`. 120 | * 121 | * @param predicate predicate to evaluate to determine if the value should be printed 122 | * @param v side-effect function to call 123 | * @tparam B the value type; ignored 124 | * @return `this` 125 | */ 126 | def printIf[B](predicate: A => Boolean, v: B): A = 127 | underlying.tapIf(predicate)((_: A) => println(v)) 128 | 129 | /** Conditionally taps and prints the object in a method chain. 130 | * Use this instead of [[printIf()]] when the condition does not require the value of `underlying`. 131 | * 132 | * @param condition condition to evaluate to determine if the underlying value should be printed 133 | * @return `underlying` 134 | */ 135 | def printWhen(condition: => Boolean): A = 136 | underlying.tapWhen(condition)(println) 137 | 138 | /** Conditionally prints a value as a side effect. 139 | * Use this instead of [[printIf()]] when the condition does not require the value of `underlying`. 140 | * 141 | * @param condition condition to evaluate to determine if the value should be printed 142 | * @tparam B the value type; ignored 143 | * @return `underlying` 144 | */ 145 | def printWhen[B](condition: => Boolean, v: B): A = 146 | underlying.tapWhen(condition)((_: A) => println(v)) 147 | 148 | } 149 | 150 | /** Sugar for conditionally raising exceptions as part of a method chain. */ 151 | implicit class ThrowOps[A](val underlying: A) extends AnyVal { 152 | 153 | /** Raises an exception if a predicate is satisfied. 154 | * Use this instead of [[throwWhen()]] when the predicate requires the value of `underlying`. 155 | * 156 | * @param predicate predicate to evaluate to determine if the exception should be thrown 157 | * @param e expression that will return an exception 158 | * @tparam B the exception type 159 | * @return `underlying` if the predicate evaluates to `false`. 160 | * @throws B 161 | */ 162 | def throwIf[B <: Throwable](predicate: A => Boolean)(e: => B): A = { 163 | if (predicate(underlying)) 164 | throw e 165 | underlying 166 | } 167 | 168 | /** Raises an exception if a condition is satisfied. 169 | * Use this instead of [[throwIf()]] when the condition does not require the value of `underlying`. 170 | * 171 | * @param condition condition to evaluate to determine if the exception should be thrown 172 | * @param e expression that will return an exception 173 | * @tparam B the exception type 174 | * @return `underlying` if the predicate evaluates to `false`. 175 | * @throws B 176 | */ 177 | def throwWhen[B <: Throwable](condition: => Boolean, e: => B): A = { 178 | if (condition) 179 | throw e 180 | underlying 181 | } 182 | 183 | } 184 | 185 | } 186 | -------------------------------------------------------------------------------- /alchemy/src/main/scala/org/apache/spark/sql/EncapsulationViolator.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark.sql 2 | 3 | import org.apache.spark.sql.catalyst.expressions.{GenericRow, NamedExpression} 4 | import org.apache.spark.sql.internal.SessionState 5 | import org.apache.spark.sql.types.{DataType, Metadata, StructType} 6 | import org.json4s.JsonAST.JValue 7 | 8 | object EncapsulationViolator { 9 | 10 | def createAnalysisException(message: String): AnalysisException = 11 | new AnalysisException(message) 12 | 13 | def parseDataType(jv: JValue): DataType = 14 | DataType.parseDataType(jv) 15 | 16 | object implicits { 17 | 18 | implicit class EncapsulationViolationSparkSessionOps(val underlying: SparkSession) extends AnyVal { 19 | def evSessionState: SessionState = underlying.sessionState 20 | } 21 | 22 | implicit class EncapsulationViolationRowOps(val underlying: GenericRow) extends AnyVal { 23 | def evValues: Array[Any] = underlying.values 24 | } 25 | 26 | implicit class EncapsulationViolationColumnOps(val underlying: Column) extends AnyVal { 27 | def evNamed: NamedExpression = underlying.named 28 | 29 | def metadata: Metadata = underlying.expr match { 30 | case ne: NamedExpression => ne.metadata 31 | case other => Metadata.empty 32 | } 33 | } 34 | 35 | implicit class EncapsulationViolationDataTypeOps(val underlying: DataType) extends AnyVal { 36 | def isSameType(other: DataType): Boolean = underlying.sameType(other) 37 | 38 | def jValue: JValue = underlying.jsonValue 39 | 40 | def toNullable: DataType = underlying.asNullable 41 | } 42 | 43 | implicit class EncapsulationViolationStructTypeOps(val underlying: StructType) extends AnyVal { 44 | def evMerge(that: StructType): StructType = underlying.merge(that) 45 | } 46 | 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /alchemy/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Set everything to be logged to the console 2 | log4j.rootCategory=ERROR, console 3 | log4j.appender.console=org.apache.log4j.ConsoleAppender 4 | log4j.appender.console.target=System.err 5 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n 7 | 8 | # Settings to quiet third party logs that are too verbose 9 | log4j.logger.org.eclipse.jetty=WARN 10 | log4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR 11 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN 12 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN 13 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/com/swoop/alchemy/spark/expressions/hll/CardinalityHashFunctionTest.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions.hll 2 | 3 | import org.apache.spark.sql.catalyst.InternalRow 4 | import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, ArrayData} 5 | import org.apache.spark.sql.types._ 6 | import org.apache.spark.unsafe.types.UTF8String 7 | import org.scalatest.matchers.should.Matchers 8 | import org.scalatest.wordspec.AnyWordSpec 9 | 10 | 11 | class CardinalityHashFunctionTest extends AnyWordSpec with Matchers { 12 | 13 | "Cardinality hash functions" should { 14 | "account for nulls" in { 15 | val a = UTF8String.fromString("a") 16 | 17 | allDistinct(Seq( 18 | null, 19 | Array.empty[Byte], 20 | Array.apply(1.toByte) 21 | ), BinaryType) 22 | 23 | allDistinct(Seq( 24 | null, 25 | UTF8String.fromString(""), 26 | a 27 | ), StringType) 28 | 29 | allDistinct(Seq( 30 | null, 31 | ArrayData.toArrayData(Array.empty), 32 | ArrayData.toArrayData(Array(null)), 33 | ArrayData.toArrayData(Array(null, null)), 34 | ArrayData.toArrayData(Array(a, null)), 35 | ArrayData.toArrayData(Array(null, a)) 36 | ), ArrayType(StringType)) 37 | 38 | 39 | allDistinct(Seq( 40 | null, 41 | ArrayBasedMapData(Map.empty), 42 | ArrayBasedMapData(Map(null.asInstanceOf[String] -> null)) 43 | ), MapType(StringType, StringType)) 44 | 45 | allDistinct(Seq( 46 | null, 47 | InternalRow(null), 48 | InternalRow(a) 49 | ), new StructType().add("foo", StringType)) 50 | 51 | allDistinct(Seq( 52 | InternalRow(null, a), 53 | InternalRow(a, null) 54 | ), new StructType().add("foo", StringType).add("bar", StringType)) 55 | } 56 | } 57 | 58 | def allDistinct(values: Seq[Any], dataType: DataType): Unit = { 59 | val hashed = values.map(x => CardinalityXxHash64Function.hash(x, dataType, 0)) 60 | hashed.distinct.length should be(hashed.length) 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/com/swoop/alchemy/spark/expressions/hll/HLLFunctionsTest.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions.hll 2 | 3 | import com.clearspring.analytics.stream.cardinality.HyperLogLogPlus 4 | import com.swoop.alchemy.spark.expressions.hll.Implementation.{AGKN, STRM} 5 | import com.swoop.alchemy.spark.expressions.hll.functions.{hll_init_collection, hll_init_collection_agg, _} 6 | import com.swoop.test_utils.SparkSessionSpec 7 | import net.agkn.hll.HLL 8 | import net.agkn.hll.HLLType.FULL 9 | import org.apache.spark.sql.DataFrame 10 | import org.apache.spark.sql.catalyst.expressions.XXH64 11 | import org.apache.spark.sql.functions.{array, col, lit, map} 12 | import org.apache.spark.sql.types._ 13 | import org.scalatest.matchers.should.Matchers 14 | import org.scalatest.wordspec.AnyWordSpec 15 | 16 | 17 | object HLLFunctionsTest { 18 | 19 | System.setSecurityManager(null) 20 | 21 | case class Data(c1: Int, c2: String, c3: Array[Int], c4: Map[String, String], c5: Array[String]) 22 | 23 | object Data { 24 | def apply(c1: Int, c2: String): Data = Data(c1, c2, null, null, null) 25 | } 26 | 27 | case class Data2(c1: Array[String], c2: Map[String, String]) 28 | 29 | case class Data3(c1: String, c2: String, c3: String) 30 | 31 | } 32 | 33 | class HLLFunctionsTest extends AnyWordSpec with Matchers with SparkSessionSpec { 34 | 35 | import HLLFunctionsTest._ 36 | import testImplicits._ 37 | 38 | "HyperLogLog functions" when { 39 | "config key unset" should { 40 | behave like hllImplementation(StreamLib, spark.conf.unset(IMPLEMENTATION_CONFIG_KEY)) 41 | } 42 | "config key AGKN" should { 43 | behave like hllImplementation(AgKn, spark.conf.set(IMPLEMENTATION_CONFIG_KEY, "AGKN")) 44 | } 45 | "config key STRM" should { 46 | behave like hllImplementation(StreamLib, spark.conf.set(IMPLEMENTATION_CONFIG_KEY, "STRM")) 47 | } 48 | } 49 | 50 | private def hllImplementation(impl: Implementation, setup: => Unit): Unit = { 51 | "use right implementation" in { 52 | setup 53 | hll_init(lit(null), 0.39).expr.asInstanceOf[HyperLogLogInitSimple].impl should be(impl) 54 | } 55 | "not allow relativeSD > 39%" in { 56 | setup 57 | val err = "requirement failed: HLL requires at least 4 bits for addressing. Use a lower error, at most 39%." 58 | val c = lit(null) 59 | 60 | noException should be thrownBy hll_init(c, 0.39) 61 | 62 | the[IllegalArgumentException] thrownBy { 63 | hll_init(c, 0.40) 64 | } should have message err 65 | 66 | noException should be thrownBy hll_init_collection(c, 0.39) 67 | 68 | the[IllegalArgumentException] thrownBy { 69 | hll_init_collection(c, 0.40) 70 | } should have message err 71 | } 72 | "register native org.apache.spark.sql.ext.functions" in { 73 | setup 74 | HLLFunctionRegistration.registerFunctions(spark) 75 | 76 | noException should be thrownBy spark.sql( 77 | """select 78 | | hll_cardinality(hll_merge(hll_init(1))), 79 | | hll_cardinality(hll_merge(hll_init_collection(array(1,2,3)))), 80 | | hll_cardinality(hll_init_agg(1)), 81 | | hll_cardinality(hll_init_collection_agg(array(1,2,3))), 82 | | hll_cardinality(hll_merge(hll_init(1, 0.05))), 83 | | hll_cardinality(hll_merge(hll_init_collection(array(1,2,3), 0.05))), 84 | | hll_cardinality(hll_init_agg(1, 0.05)), 85 | | hll_cardinality(hll_init_collection_agg(array(1,2,3), 0.05)), 86 | | hll_cardinality(hll_row_merge(hll_init(1),hll_init(1))), 87 | | hll_intersect_cardinality(hll_init(1), hll_init(1)), 88 | | hll_cardinality(hll_convert(hll_init(1),"STRM","AGKN")) 89 | """.stripMargin // last line will error if evaluated, but is valid under statical analysis 90 | ) 91 | } 92 | "estimate cardinality of simple types and collections" in { 93 | setup 94 | 95 | val a123 = array(lit(1), lit(2), lit(3)) 96 | 97 | val simpleValues = Seq( 98 | lit(null).cast(IntegerType), 99 | lit(""), 100 | a123 101 | ).map(hll_init) 102 | 103 | val collections = Seq( 104 | lit(null).cast(ArrayType(IntegerType)), 105 | array(), 106 | map(), 107 | a123 108 | ).map(hll_init_collection) 109 | 110 | val results = cardinalities(spark.range(1).select(simpleValues ++ collections: _*)) 111 | 112 | results should be(Seq( 113 | /* simple types */ 0, 1, 1, 114 | /* collections */ 0, 0, 0, 3 115 | )) 116 | } 117 | // @todo merge tests with grouping 118 | "estimate cardinality correctly" in { 119 | setup 120 | 121 | val df = spark.createDataset[Data](Seq[Data]( 122 | Data(1, "a", Array(1, 2, 3), Map("a" -> "A"), Array.empty), 123 | Data(2, "b", Array(2, 3, 1), Map("b" -> "B"), Array(null)), 124 | Data(2, "b", Array(2, 3, 1), Map("b" -> "B"), Array(null, null)), 125 | Data(3, "c", Array(3, 1, 2), Map("a" -> "A", "b" -> "B"), null), 126 | Data(2, "b", Array(1, 1, 1), Map("b" -> "B", "c" -> "C"), null), 127 | Data(3, "c", Array(2, 2, 2), Map("c" -> "C", "a" -> null), null), 128 | Data(4, "d", null, null, null), 129 | Data(4, "d", null, null, null), 130 | Data(5, "e", Array.empty, Map.empty, null), 131 | Data(5, "e", Array.empty, Map.empty, null) 132 | )) 133 | 134 | val results = cardinalities(merge(df.select( 135 | hll_init('c1), 136 | hll_init('c2), 137 | hll_init('c3), 138 | hll_init('c4), 139 | hll_init('c5), 140 | hll_init_collection('c3), 141 | hll_init_collection('c4), 142 | hll_init_collection('c5) 143 | ))) 144 | 145 | results should be(Seq( 146 | 5, // 5 unique simple values 147 | 5, // 5 unique simple values 148 | 6, // 6 unique arrays (treated as simple types, nulls not counted) 149 | 6, // 6 unique maps (treated as simple types, nulls not counted) 150 | 3, // 3 unique arrays 151 | 3, // 3 unique values across all arrays 152 | 4, // 4 unique (k, v) pairs across all maps 153 | 0 // 0 unique values across all arrays, nulls not counted 154 | )) 155 | } 156 | "estimate multiples correctly" in { 157 | setup 158 | 159 | val createSampleData = 160 | spark.createDataset(Seq( 161 | Data(1, "a"), 162 | Data(2, "b"), 163 | Data(2, "b"), 164 | Data(3, "c"), 165 | Data(4, "d") 166 | )).select(hll_init('c1), hll_init('c2)) 167 | 168 | val results = cardinalities(merge(createSampleData union createSampleData)) 169 | 170 | results should be(Seq(4, 4)) 171 | } 172 | } 173 | 174 | "HyperLogLog aggregate functions" when { 175 | "config key unset" should { 176 | behave like aggregateFunctions(spark.conf.unset(IMPLEMENTATION_CONFIG_KEY)) 177 | } 178 | 179 | "config key AGKN" should { 180 | behave like aggregateFunctions(spark.conf.set(IMPLEMENTATION_CONFIG_KEY, "AGKN")) 181 | 182 | } 183 | 184 | "config key STRM" should { 185 | behave like aggregateFunctions(spark.conf.set(IMPLEMENTATION_CONFIG_KEY, "STRM")) 186 | } 187 | } 188 | 189 | private def aggregateFunctions(setup: => Unit): Unit = { 190 | // @todo merge tests with grouping 191 | "estimate cardinality correctly" in { 192 | setup 193 | 194 | val df = spark.createDataset[Data](Seq[Data]( 195 | Data(1, "a", Array(1, 2, 3), Map("a" -> "A"), Array.empty), 196 | Data(2, "b", Array(2, 3, 1), Map("b" -> "B"), Array(null)), 197 | Data(2, "b", Array(2, 3, 1), Map("b" -> "B"), Array(null, null)), 198 | Data(3, "c", Array(3, 1, 2), Map("a" -> "A", "b" -> "B"), null), 199 | Data(2, "b", Array(1, 1, 1), Map("b" -> "B", "c" -> "C"), null), 200 | Data(3, "c", Array(2, 2, 2), Map("c" -> "C", "a" -> null), null), 201 | Data(4, "d", null, null, null), 202 | Data(4, "d", null, null, null), 203 | Data(5, "e", Array.empty, Map.empty, null), 204 | Data(5, "e", Array.empty, Map.empty, null) 205 | )) 206 | 207 | val results = cardinalities(df.select( 208 | hll_init_agg('c1), 209 | hll_init_agg('c2), 210 | hll_init_agg('c3), 211 | hll_init_agg('c4), 212 | hll_init_agg('c5), 213 | hll_init_collection_agg('c3), 214 | hll_init_collection_agg('c4), 215 | hll_init_collection_agg('c5) 216 | )) 217 | 218 | results should be(Seq( 219 | 5, // 5 unique simple values 220 | 5, // 5 unique simple values 221 | 6, // 6 unique arrays (treated as simple types, nulls not counted) 222 | 6, // 6 unique maps (treated as simple types, nulls not counted) 223 | 3, // 3 unique arrays 224 | 3, // 3 unique values across all arrays 225 | 4, // 4 unique (k, v) pairs across all maps 226 | 0 // 0 unique values across all arrays, nulls not counted 227 | )) 228 | } 229 | "estimate multiples correctly" in { 230 | setup 231 | 232 | val createSampleData = 233 | spark.createDataset(Seq( 234 | Data(1, "a"), 235 | Data(2, "b"), 236 | Data(2, "b"), 237 | Data(3, "c"), 238 | Data(4, "d") 239 | )).select(hll_init_agg('c1), hll_init_agg('c2)) 240 | 241 | val results = cardinalities(createSampleData union createSampleData) 242 | 243 | results should be(Seq(4, 4)) 244 | } 245 | } 246 | 247 | def merge(df: DataFrame): DataFrame = 248 | df.select( 249 | df.columns.zipWithIndex.map { case (name, idx) => 250 | hll_merge(col(name)).as(s"c$idx") 251 | }: _* 252 | ) 253 | 254 | def cardinalities(df: DataFrame): Seq[Long] = 255 | df.select( 256 | df.columns.zipWithIndex.map { case (name, idx) => 257 | hll_cardinality(col(name)).as(s"c$idx") 258 | }: _* 259 | ).head.toSeq.map(_.asInstanceOf[Long]) 260 | 261 | "HyperLogLog row merge function" should { 262 | // @todo merge tests with grouping 263 | "estimate cardinality correctly, with nulls" in { 264 | val df = spark.createDataset[Data3](Seq[Data3]( 265 | Data3("a", "a", "a"), 266 | Data3("a", "b", "c"), 267 | Data3("a", "b", null), 268 | Data3("a", null, null), 269 | Data3(null, null, null) 270 | )) 271 | 272 | val results = df 273 | .select(hll_init('c1).as('c1), hll_init('c2).as('c2), hll_init('c3).as('c3)) 274 | .select(hll_cardinality(hll_row_merge('c1, 'c2, 'c3))) 275 | .na.fill(-1L) 276 | .as[Long] 277 | .head(5) 278 | .toSeq 279 | 280 | results should be(Seq(1, 3, 2, 1, -1)) // nulls skipped 281 | } 282 | } 283 | 284 | "HyperLogLog intersection function" when { 285 | "config key unset" should { 286 | behave like intersectionFunction(spark.conf.unset(IMPLEMENTATION_CONFIG_KEY)) 287 | } 288 | "config key AGKN" should { 289 | behave like intersectionFunction(spark.conf.set(IMPLEMENTATION_CONFIG_KEY, "AGKN")) 290 | } 291 | "config key STRM" should { 292 | behave like intersectionFunction(spark.conf.set(IMPLEMENTATION_CONFIG_KEY, "STRM")) 293 | } 294 | } 295 | 296 | def intersectionFunction(setup: => Unit): Unit = { 297 | // @todo merge tests with grouping 298 | "estimate cardinality correctly" in { 299 | setup 300 | 301 | val df = spark.createDataset[Data3](Seq[Data3]( 302 | Data3("a", "e", "f"), 303 | Data3("b", "d", "g"), 304 | Data3("c", "c", "h"), 305 | Data3("d", "b", "i"), 306 | Data3("e", "a", "j") 307 | )) 308 | 309 | val results = df 310 | .select(hll_init_agg('c1).as('c1), hll_init_agg('c2).as('c2), hll_init_agg('c3).as('c3)) 311 | .select(hll_intersect_cardinality('c1, 'c2), hll_intersect_cardinality('c2, 'c3)) 312 | .as[(Long, Long)] 313 | .head() 314 | 315 | results should be((5, 0)) 316 | } 317 | "handle nulls correctly" in { 318 | setup 319 | 320 | val df = spark.createDataset[Data3](Seq[Data3]( 321 | Data3("a", null, null), 322 | Data3("b", null, null), 323 | Data3("c", null, null), 324 | Data3("d", null, null), 325 | Data3("e", null, null) 326 | )) 327 | 328 | val results = df 329 | .select(hll_init_agg('c1).as('c1), hll_init_agg('c2).as('c2), hll_init_agg('c3).as('c3)) 330 | .select(hll_intersect_cardinality('c1, 'c2), hll_intersect_cardinality('c2, 'c3)) 331 | .na.fill(-1L) 332 | .as[(Long, Long)] 333 | .head() 334 | 335 | println(results) 336 | results should be((0, -1)) 337 | } 338 | } 339 | 340 | "Spark SQL functions" should { 341 | "accept HLL implementation by name in signature" in { 342 | HLLFunctionRegistration.registerFunctions(spark) 343 | noException should be thrownBy spark.sql( 344 | """select 345 | | hll_cardinality(hll_merge(hll_init(1, 0.05, "AGKN"), "AGKN"), "AGKN"), 346 | | hll_cardinality(hll_merge(hll_init_collection(array(1,2,3), 0.05, "STRM"), "STRM"), "STRM"), 347 | | hll_cardinality(hll_init_agg(1, 0.05, "AGKN"), "AGKN"), 348 | | hll_cardinality(hll_init_collection_agg(array(1,2,3), 0.05, "STRM"), "STRM"), 349 | | hll_cardinality(hll_row_merge(hll_init(1, 0.05, "AGKN"),hll_init(1, 0.05, "AGKN"), "AGKN"), "AGKN"), 350 | | hll_intersect_cardinality(hll_init(1, 0.05, "STRM"), hll_init(1, 0.05, "STRM"), "STRM") 351 | """.stripMargin 352 | ) 353 | } 354 | } 355 | 356 | "Conversion function" should { 357 | "estimate similar as original" in { 358 | 359 | def randomize(callable: Long => Unit, n: Int): Unit = { 360 | val rand = new scala.util.Random(42) 361 | for (_ <- 0 until n) { 362 | callable(XXH64.hashInt(rand.nextInt(n), 0)) 363 | } 364 | } 365 | 366 | val p = 20 367 | val strm = new HyperLogLogPlus(p, 0) 368 | val agkn = new HLL(p, 5, 0, false, FULL) 369 | 370 | val n = 10000 371 | randomize(strm.offerHashed(_: Long), n) 372 | randomize(agkn.addRaw, n) 373 | 374 | val converted = strmToAgkn(strm) 375 | 376 | converted.cardinality() should be(agkn.cardinality() +- 1) 377 | } 378 | } 379 | 380 | "error on unsupported conversion" in { 381 | the[IllegalArgumentException] thrownBy { 382 | spark.range(1) 383 | .withColumn("foo", hll_convert(hll_init(lit(1)), AGKN, STRM)) 384 | .collect() 385 | } should have message "HLL conversion is currently only supported from STREAM_LIB to AGGREGATE_KNOWLEDGE" 386 | } 387 | 388 | } 389 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/com/swoop/alchemy/spark/expressions/hll/PostgresInteropTest.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.alchemy.spark.expressions.hll 2 | 3 | import java.sql.{DriverManager, ResultSet, Statement} 4 | 5 | import com.swoop.alchemy.spark.expressions.hll.functions._ 6 | import com.swoop.test_utils.SparkSessionSpec 7 | import org.apache.spark.sql.{DataFrame, SparkSession} 8 | import org.scalatest.matchers.should.Matchers 9 | import org.scalatest.wordspec.AnyWordSpec 10 | 11 | 12 | case class Postgres(user: String, database: String, port: Int) { 13 | val con_str = s"jdbc:postgresql://localhost:$port/$database?user=$user" 14 | 15 | def execute[T](query: String, handler: ResultSet => T): T = 16 | execute(stm => handler(stm.executeQuery(query))) 17 | 18 | def update(query: String): Unit = 19 | execute(_.executeUpdate(query)) 20 | 21 | def sparkRead(schema: String, table: String)(implicit spark: SparkSession): DataFrame = 22 | spark.read 23 | .format("jdbc") 24 | .option("url", s"jdbc:postgresql:${database}") 25 | .option("dbtable", s"${schema}.${table}") 26 | .option("user", user) 27 | .load() 28 | 29 | def sparkWrite(schema: String, table: String)(df: DataFrame): Unit = 30 | df.write 31 | .format("jdbc") 32 | .option("url", s"jdbc:postgresql:${database}") 33 | .option("dbtable", s"${schema}.${table}") 34 | .option("user", user) 35 | .save() 36 | 37 | private def execute[T](fn: Statement => T): T = { 38 | val conn = DriverManager.getConnection(con_str) 39 | try { 40 | val stm = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY) 41 | fn(stm) 42 | } finally { 43 | conn.close() 44 | } 45 | } 46 | } 47 | 48 | 49 | class PostgresInteropTest extends AnyWordSpec with Matchers with SparkSessionSpec { 50 | 51 | import testImplicits._ 52 | 53 | lazy val pg = Postgres("postgres", "postgres", 5432) 54 | 55 | "Postgres interop" should { 56 | "calculate same results" in { 57 | // use Aggregate Knowledge (Postgres-compatible) HLL implementation 58 | spark.conf.set(IMPLEMENTATION_CONFIG_KEY, "AGKN") 59 | 60 | // init Postgres extension for database 61 | pg.update("CREATE EXTENSION IF NOT EXISTS hll;") 62 | 63 | // create some random not-entirely distinct rows 64 | val rand = new scala.util.Random(42) 65 | val n = 100000 66 | val randomDF = sc.parallelize( 67 | Seq.fill(n) { 68 | (rand.nextInt(24), rand.nextInt(n)) 69 | } 70 | ).toDF("hour", "id").cache 71 | 72 | // create hll aggregates (by hour) 73 | val byHourDF = randomDF.groupBy("hour").agg(hll_init_agg("id", .39).as("hll_id")).cache 74 | 75 | // send hlls to postgres 76 | pg.update("DROP TABLE IF EXISTS spark_hlls CASCADE;") 77 | pg.sparkWrite("public", "spark_hlls")(byHourDF) 78 | 79 | // convert hll column from `bytea` to `hll` type 80 | pg.update( 81 | """ 82 | |ALTER TABLE spark_hlls 83 | |ALTER COLUMN hll_id TYPE hll USING CAST (hll_id AS hll); 84 | |""".stripMargin 85 | ) 86 | 87 | // re-aggregate all hours in Spark 88 | val distinctSpark = byHourDF.select(hll_cardinality(hll_merge(byHourDF("hll_id")))).as[Long].first() 89 | // re-aggregate all hours in Postgres 90 | val distinctPostgres = pg.execute( 91 | "SELECT CAST (hll_cardinality(hll_union_agg(hll_id)) as Integer) AS approx FROM spark_hlls", 92 | (rs) => { 93 | rs.next; 94 | rs.getInt("approx") 95 | } 96 | ) 97 | 98 | distinctSpark should be(distinctPostgres) 99 | } 100 | } 101 | 102 | } 103 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/com/swoop/test_utils/SparkSessionSpec.scala: -------------------------------------------------------------------------------- 1 | package com.swoop.test_utils 2 | 3 | import org.apache.spark.sql.SQLContext 4 | import org.apache.spark.sql.test.{SharedSparkSessionBase, TestSparkSession} 5 | import org.apache.spark.{SparkConf, SparkContext} 6 | import org.scalatest.TestSuite 7 | 8 | 9 | trait SparkSessionSpec extends SharedSparkSessionBase { 10 | this: TestSuite => 11 | 12 | override protected def createSparkSession: TestSparkSession = { 13 | val spark = super.createSparkSession 14 | spark 15 | } 16 | 17 | def sparkSession = spark 18 | 19 | def sqlc: SQLContext = sparkSession.sqlContext 20 | 21 | def sc: SparkContext = sparkSession.sparkContext 22 | 23 | override protected def sparkConf: SparkConf = 24 | super.sparkConf 25 | .set("spark.driver.bindAddress", "127.0.0.1") 26 | 27 | } 28 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/DebugFilesystem.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark 2 | 3 | import java.io.{FileDescriptor, InputStream} 4 | import java.lang 5 | import java.nio.ByteBuffer 6 | 7 | import scala.collection.JavaConverters._ 8 | import scala.collection.mutable 9 | 10 | import org.apache.hadoop.fs._ 11 | 12 | import org.apache.spark.internal.Logging 13 | 14 | object DebugFilesystem extends Logging { 15 | // Stores the set of active streams and their creation sites. 16 | private val openStreams = mutable.Map.empty[FSDataInputStream, Throwable] 17 | 18 | def addOpenStream(stream: FSDataInputStream): Unit = openStreams.synchronized { 19 | openStreams.put(stream, new Throwable()) 20 | } 21 | 22 | def clearOpenStreams(): Unit = openStreams.synchronized { 23 | openStreams.clear() 24 | } 25 | 26 | def removeOpenStream(stream: FSDataInputStream): Unit = openStreams.synchronized { 27 | openStreams.remove(stream) 28 | } 29 | 30 | def assertNoOpenStreams(): Unit = openStreams.synchronized { 31 | val numOpen = openStreams.values.size 32 | if (numOpen > 0) { 33 | for (exc <- openStreams.values) { 34 | logWarning("Leaked filesystem connection created at:") 35 | exc.printStackTrace() 36 | } 37 | throw new IllegalStateException(s"There are $numOpen possibly leaked file streams.", 38 | openStreams.values.head) 39 | } 40 | } 41 | } 42 | 43 | /** 44 | * DebugFilesystem wraps file open calls to track all open connections. This can be used in tests 45 | * to check that connections are not leaked. 46 | */ 47 | // TODO(ekl) we should consider always interposing this to expose num open conns as a metric 48 | class DebugFilesystem extends LocalFileSystem { 49 | import DebugFilesystem._ 50 | 51 | override def open(f: Path, bufferSize: Int): FSDataInputStream = { 52 | val wrapped: FSDataInputStream = super.open(f, bufferSize) 53 | addOpenStream(wrapped) 54 | new FSDataInputStream(wrapped.getWrappedStream) { 55 | override def setDropBehind(dropBehind: lang.Boolean): Unit = wrapped.setDropBehind(dropBehind) 56 | 57 | override def getWrappedStream: InputStream = wrapped.getWrappedStream 58 | 59 | override def getFileDescriptor: FileDescriptor = wrapped.getFileDescriptor 60 | 61 | override def getPos: Long = wrapped.getPos 62 | 63 | override def seekToNewSource(targetPos: Long): Boolean = wrapped.seekToNewSource(targetPos) 64 | 65 | override def seek(desired: Long): Unit = wrapped.seek(desired) 66 | 67 | override def setReadahead(readahead: lang.Long): Unit = wrapped.setReadahead(readahead) 68 | 69 | override def read(position: Long, buffer: Array[Byte], offset: Int, length: Int): Int = 70 | wrapped.read(position, buffer, offset, length) 71 | 72 | override def read(buf: ByteBuffer): Int = wrapped.read(buf) 73 | 74 | override def readFully(position: Long, buffer: Array[Byte], offset: Int, length: Int): Unit = 75 | wrapped.readFully(position, buffer, offset, length) 76 | 77 | override def readFully(position: Long, buffer: Array[Byte]): Unit = 78 | wrapped.readFully(position, buffer) 79 | 80 | override def available(): Int = wrapped.available() 81 | 82 | override def mark(readlimit: Int): Unit = wrapped.mark(readlimit) 83 | 84 | override def skip(n: Long): Long = wrapped.skip(n) 85 | 86 | override def markSupported(): Boolean = wrapped.markSupported() 87 | 88 | override def close(): Unit = { 89 | try { 90 | wrapped.close() 91 | } finally { 92 | removeOpenStream(wrapped) 93 | } 94 | } 95 | 96 | override def read(): Int = wrapped.read() 97 | 98 | override def reset(): Unit = wrapped.reset() 99 | 100 | override def toString: String = wrapped.toString 101 | 102 | override def equals(obj: scala.Any): Boolean = wrapped.equals(obj) 103 | 104 | override def hashCode(): Int = wrapped.hashCode() 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/SparkFunSuite.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark 2 | 3 | // scalastyle:off 4 | import java.io.File 5 | 6 | import scala.annotation.tailrec 7 | import org.apache.log4j.{Appender, Level, Logger} 8 | import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, BeforeAndAfterEach, Outcome, Suite} 9 | import org.apache.spark.internal.Logging 10 | import org.apache.spark.internal.config.Tests.IS_TESTING 11 | import org.apache.spark.util.{AccumulatorContext, Utils} 12 | import org.scalatest.funsuite.AnyFunSuite 13 | 14 | /** 15 | * Base abstract class for all unit tests in Spark for handling common functionality. 16 | * 17 | * Thread audit happens normally here automatically when a new test suite created. 18 | * The only prerequisite for that is that the test class must extend [[SparkFunSuite]]. 19 | * 20 | * It is possible to override the default thread audit behavior by setting enableAutoThreadAudit 21 | * to false and manually calling the audit methods, if desired. For example: 22 | * 23 | * class MyTestSuite extends SparkFunSuite { 24 | * 25 | * override val enableAutoThreadAudit = false 26 | * 27 | * protected override def beforeAll(): Unit = { 28 | * doThreadPreAudit() 29 | * super.beforeAll() 30 | * } 31 | * 32 | * protected override def afterAll(): Unit = { 33 | * super.afterAll() 34 | * doThreadPostAudit() 35 | * } 36 | * } 37 | */ 38 | abstract class SparkFunSuite 39 | extends AnyFunSuite 40 | with SparkSuiteBase { 41 | // scalastyle:on 42 | 43 | /** 44 | * Note: this method doesn't support `BeforeAndAfter`. You must use `BeforeAndAfterEach` to 45 | * set up and tear down resources. 46 | */ 47 | def testRetry(s: String, n: Int = 2)(body: => Unit): Unit = { 48 | test(s) { 49 | retry(n) { 50 | body 51 | } 52 | } 53 | } 54 | 55 | /** 56 | * Log the suite name and the test name before and after each test. 57 | * 58 | * Subclasses should never override this method. If they wish to run 59 | * custom code before and after each test, they should mix in the 60 | * {{org.scalatest.BeforeAndAfter}} trait instead. 61 | */ 62 | final protected override def withFixture(test: NoArgTest): Outcome = { 63 | val testName = test.text 64 | val suiteName = this.getClass.getName 65 | val shortSuiteName = suiteName.replaceAll("org.apache.spark", "o.a.s") 66 | try { 67 | logInfo(s"\n\n===== TEST OUTPUT FOR $shortSuiteName: '$testName' =====\n") 68 | test() 69 | } finally { 70 | logInfo(s"\n\n===== FINISHED $shortSuiteName: '$testName' =====\n") 71 | } 72 | } 73 | 74 | } 75 | 76 | 77 | trait SparkSuiteBase 78 | extends BeforeAndAfterAll 79 | with BeforeAndAfterEach 80 | with Logging { 81 | // scalastyle:on 82 | this: Suite => 83 | 84 | protected override def beforeAll(): Unit = { 85 | System.setProperty(IS_TESTING.key, "true") 86 | super.beforeAll() 87 | } 88 | 89 | protected override def afterAll(): Unit = { 90 | try { 91 | // Avoid leaking map entries in tests that use accumulators without SparkContext 92 | AccumulatorContext.clear() 93 | } finally { 94 | super.afterAll() 95 | } 96 | } 97 | 98 | // helper function 99 | protected final def getTestResourceFile(file: String): File = { 100 | new File(getClass.getClassLoader.getResource(file).getFile) 101 | } 102 | 103 | protected final def getTestResourcePath(file: String): String = { 104 | getTestResourceFile(file).getCanonicalPath 105 | } 106 | 107 | /** 108 | * Note: this method doesn't support `BeforeAndAfter`. You must use `BeforeAndAfterEach` to 109 | * set up and tear down resources. 110 | */ 111 | def retry[T](n: Int)(body: => T): T = { 112 | if (this.isInstanceOf[BeforeAndAfter]) { 113 | throw new UnsupportedOperationException( 114 | s"testRetry/retry cannot be used with ${classOf[BeforeAndAfter]}. " + 115 | s"Please use ${classOf[BeforeAndAfterEach]} instead.") 116 | } 117 | retry0(n, n)(body) 118 | } 119 | 120 | @tailrec private final def retry0[T](n: Int, n0: Int)(body: => T): T = { 121 | try body 122 | catch { case e: Throwable => 123 | if (n > 0) { 124 | logWarning(e.getMessage, e) 125 | logInfo(s"\n\n===== RETRY #${n0 - n + 1} =====\n") 126 | // Reset state before re-attempting in order so that tests which use patterns like 127 | // LocalSparkContext to clean up state can work correctly when retried. 128 | afterEach() 129 | beforeEach() 130 | retry0(n-1, n0)(body) 131 | } 132 | else throw e 133 | } 134 | } 135 | 136 | /** 137 | * Creates a temporary directory, which is then passed to `f` and will be deleted after `f` 138 | * returns. 139 | */ 140 | protected def withTempDir(f: File => Unit): Unit = { 141 | val dir = Utils.createTempDir() 142 | try f(dir) finally { 143 | Utils.deleteRecursively(dir) 144 | } 145 | } 146 | 147 | /** 148 | * Adds a log appender and optionally sets a log level to the root logger or the logger with 149 | * the specified name, then executes the specified function, and in the end removes the log 150 | * appender and restores the log level if necessary. 151 | */ 152 | protected def withLogAppender( 153 | appender: Appender, 154 | loggerName: Option[String] = None, 155 | level: Option[Level] = None)( 156 | f: => Unit): Unit = { 157 | val logger = loggerName.map(Logger.getLogger).getOrElse(Logger.getRootLogger) 158 | val restoreLevel = logger.getLevel 159 | logger.addAppender(appender) 160 | if (level.isDefined) { 161 | logger.setLevel(level.get) 162 | } 163 | try f finally { 164 | logger.removeAppender(appender) 165 | if (level.isDefined) { 166 | logger.setLevel(restoreLevel) 167 | } 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/SparkTestUtilsEncapsulationViolator.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark 2 | 3 | import org.apache.spark.sql.SparkSession 4 | 5 | 6 | object SparkTestUtilsEncapsulationViolator { 7 | 8 | def builderWithSparkContext(builder: SparkSession.Builder, sc: SparkContext): SparkSession.Builder = 9 | builder.sparkContext(sc) 10 | 11 | def cleanupAnyExistingSession(): Unit = 12 | SparkSession.cleanupAnyExistingSession() 13 | 14 | def utils = org.apache.spark.util.Utils 15 | 16 | } 17 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark.sql.catalyst.plans 2 | 3 | 4 | import org.apache.spark.SparkFunSuite 5 | import org.apache.spark.sql.catalyst.analysis.SimpleAnalyzer 6 | import org.apache.spark.sql.catalyst.expressions._ 7 | import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression 8 | import org.apache.spark.sql.catalyst.plans.logical._ 9 | import org.apache.spark.sql.catalyst.util._ 10 | import org.apache.spark.sql.internal.SQLConf 11 | import org.apache.spark.sql.test.SQLHelper 12 | import org.scalatest.Suite 13 | 14 | /** 15 | * Provides helper methods for comparing plans. 16 | */ 17 | trait PlanTest extends SparkFunSuite with PlanTestBase 18 | 19 | /** 20 | * Provides helper methods for comparing plans, but without the overhead of 21 | * mandating a FunSuite. 22 | */ 23 | trait PlanTestBase extends PredicateHelper with SQLHelper { 24 | self: Suite => 25 | 26 | // TODO(gatorsmile): remove this from PlanTest and all the analyzer rules 27 | protected def conf = SQLConf.get 28 | 29 | /** 30 | * Since attribute references are given globally unique ids during analysis, 31 | * we must normalize them to check if two different queries are identical. 32 | */ 33 | protected def normalizeExprIds(plan: LogicalPlan) = { 34 | plan transformAllExpressions { 35 | case s: ScalarSubquery => 36 | s.copy(exprId = ExprId(0)) 37 | case e: Exists => 38 | e.copy(exprId = ExprId(0)) 39 | case l: ListQuery => 40 | l.copy(exprId = ExprId(0)) 41 | case a: AttributeReference => 42 | AttributeReference(a.name, a.dataType, a.nullable)(exprId = ExprId(0)) 43 | case a: Alias => 44 | Alias(a.child, a.name)(exprId = ExprId(0)) 45 | case ae: AggregateExpression => 46 | ae.copy(resultId = ExprId(0)) 47 | case lv: NamedLambdaVariable => 48 | lv.copy(exprId = ExprId(0), value = null) 49 | case udf: PythonUDF => 50 | udf.copy(resultId = ExprId(0)) 51 | } 52 | } 53 | 54 | private def rewriteNameFromAttrNullability(plan: LogicalPlan): LogicalPlan = { 55 | plan.transformAllExpressions { 56 | case a@AttributeReference(name, _, false, _) => 57 | a.copy(name = s"*$name")(exprId = a.exprId, qualifier = a.qualifier) 58 | } 59 | } 60 | 61 | /** 62 | * Normalizes plans: 63 | * - Filter the filter conditions that appear in a plan. For instance, 64 | * ((expr 1 && expr 2) && expr 3), (expr 1 && expr 2 && expr 3), (expr 3 && (expr 1 && expr 2) 65 | * etc., will all now be equivalent. 66 | * - Sample the seed will replaced by 0L. 67 | * - Join conditions will be resorted by hashCode. 68 | */ 69 | protected def normalizePlan(plan: LogicalPlan): LogicalPlan = { 70 | plan transform { 71 | case Filter(condition: Expression, child: LogicalPlan) => 72 | Filter(splitConjunctivePredicates(condition).map(rewriteEqual).sortBy(_.hashCode()) 73 | .reduce(And), child) 74 | case sample: Sample => 75 | sample.copy(seed = 0L) 76 | case Join(left, right, joinType, condition, hint) if condition.isDefined => 77 | val newCondition = 78 | splitConjunctivePredicates(condition.get).map(rewriteEqual).sortBy(_.hashCode()) 79 | .reduce(And) 80 | Join(left, right, joinType, Some(newCondition), hint) 81 | } 82 | } 83 | 84 | /** 85 | * Rewrite [[EqualTo]] and [[EqualNullSafe]] operator to keep order. The following cases will be 86 | * equivalent: 87 | * 1. (a = b), (b = a); 88 | * 2. (a <=> b), (b <=> a). 89 | */ 90 | private def rewriteEqual(condition: Expression): Expression = condition match { 91 | case eq@EqualTo(l: Expression, r: Expression) => 92 | Seq(l, r).sortBy(_.hashCode()).reduce(EqualTo) 93 | case eq@EqualNullSafe(l: Expression, r: Expression) => 94 | Seq(l, r).sortBy(_.hashCode()).reduce(EqualNullSafe) 95 | case _ => condition // Don't reorder. 96 | } 97 | 98 | /** Fails the test if the two plans do not match */ 99 | protected def comparePlans( 100 | plan1: LogicalPlan, 101 | plan2: LogicalPlan, 102 | checkAnalysis: Boolean = true): Unit = { 103 | if (checkAnalysis) { 104 | // Make sure both plan pass checkAnalysis. 105 | SimpleAnalyzer.checkAnalysis(plan1) 106 | SimpleAnalyzer.checkAnalysis(plan2) 107 | } 108 | 109 | val normalized1 = normalizePlan(normalizeExprIds(plan1)) 110 | val normalized2 = normalizePlan(normalizeExprIds(plan2)) 111 | if (normalized1 != normalized2) { 112 | fail( 113 | s""" 114 | |== FAIL: Plans do not match === 115 | |${ 116 | sideBySide( 117 | rewriteNameFromAttrNullability(normalized1).treeString, 118 | rewriteNameFromAttrNullability(normalized2).treeString).mkString("\n") 119 | } 120 | """.stripMargin) 121 | } 122 | } 123 | 124 | /** Fails the test if the two expressions do not match */ 125 | protected def compareExpressions(e1: Expression, e2: Expression): Unit = { 126 | comparePlans(Filter(e1, OneRowRelation()), Filter(e2, OneRowRelation()), checkAnalysis = false) 127 | } 128 | 129 | /** Fails the test if the join order in the two plans do not match */ 130 | protected def compareJoinOrder(plan1: LogicalPlan, plan2: LogicalPlan) { 131 | val normalized1 = normalizePlan(normalizeExprIds(plan1)) 132 | val normalized2 = normalizePlan(normalizeExprIds(plan2)) 133 | if (!sameJoinPlan(normalized1, normalized2)) { 134 | fail( 135 | s""" 136 | |== FAIL: Plans do not match === 137 | |${ 138 | sideBySide( 139 | rewriteNameFromAttrNullability(normalized1).treeString, 140 | rewriteNameFromAttrNullability(normalized2).treeString).mkString("\n") 141 | } 142 | """.stripMargin) 143 | } 144 | } 145 | 146 | /** Consider symmetry for joins when comparing plans. */ 147 | private def sameJoinPlan(plan1: LogicalPlan, plan2: LogicalPlan): Boolean = { 148 | (plan1, plan2) match { 149 | case (j1: Join, j2: Join) => 150 | (sameJoinPlan(j1.left, j2.left) && sameJoinPlan(j1.right, j2.right)) || 151 | (sameJoinPlan(j1.left, j2.right) && sameJoinPlan(j1.right, j2.left)) 152 | case (p1: Project, p2: Project) => 153 | p1.projectList == p2.projectList && sameJoinPlan(p1.child, p2.child) 154 | case _ => 155 | plan1 == plan2 156 | } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/sql/test/SQLHelper.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark.sql.test 2 | 3 | import java.io.File 4 | 5 | import org.apache.spark.sql.AnalysisException 6 | import org.apache.spark.sql.internal.SQLConf 7 | import org.apache.spark.util.Utils 8 | 9 | trait SQLHelper { 10 | 11 | /** 12 | * Sets all SQL configurations specified in `pairs`, calls `f`, and then restores all SQL 13 | * configurations. 14 | */ 15 | protected def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = { 16 | val conf = SQLConf.get 17 | val (keys, values) = pairs.unzip 18 | val currentValues = keys.map { key => 19 | if (conf.contains(key)) { 20 | Some(conf.getConfString(key)) 21 | } else { 22 | None 23 | } 24 | } 25 | (keys, values).zipped.foreach { (k, v) => 26 | if (SQLConf.isStaticConfigKey(k)) { 27 | throw new AnalysisException(s"Cannot modify the value of a static config: $k") 28 | } 29 | conf.setConfString(k, v) 30 | } 31 | try f finally { 32 | keys.zip(currentValues).foreach { 33 | case (key, Some(value)) => conf.setConfString(key, value) 34 | case (key, None) => conf.unsetConf(key) 35 | } 36 | } 37 | } 38 | 39 | /** 40 | * Generates a temporary path without creating the actual file/directory, then pass it to `f`. If 41 | * a file/directory is created there by `f`, it will be delete after `f` returns. 42 | */ 43 | protected def withTempPath(f: File => Unit): Unit = { 44 | val path = Utils.createTempDir() 45 | path.delete() 46 | try f(path) finally Utils.deleteRecursively(path) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark.sql.test 2 | 3 | import java.nio.charset.StandardCharsets 4 | 5 | import org.apache.spark.rdd.RDD 6 | import org.apache.spark.sql.{DataFrame, SQLContext, SQLImplicits, SparkSession} 7 | 8 | /** 9 | * A collection of sample data used in SQL tests. 10 | */ 11 | private[sql] trait SQLTestData { self => 12 | protected def spark: SparkSession 13 | 14 | // Helper object to import SQL implicits without a concrete SQLContext 15 | private object internalImplicits extends SQLImplicits { 16 | protected override def _sqlContext: SQLContext = self.spark.sqlContext 17 | } 18 | 19 | import SQLTestData._ 20 | import internalImplicits._ 21 | 22 | // Note: all test data should be lazy because the SQLContext is not set up yet. 23 | 24 | protected lazy val emptyTestData: DataFrame = { 25 | val df = spark.sparkContext.parallelize( 26 | Seq.empty[Int].map(i => TestData1(i, i.toString))).toDF() 27 | df.createOrReplaceTempView("emptyTestData") 28 | df 29 | } 30 | 31 | protected lazy val testData1: DataFrame = { 32 | val df = spark.sparkContext.parallelize( 33 | (1 to 100).map(i => TestData1(i, i.toString))).toDF() 34 | df.createOrReplaceTempView("testData") 35 | df 36 | } 37 | 38 | protected lazy val testData2: DataFrame = { 39 | val df = spark.sparkContext.parallelize( 40 | TestData2(1, 1) :: 41 | TestData2(1, 2) :: 42 | TestData2(2, 1) :: 43 | TestData2(2, 2) :: 44 | TestData2(3, 1) :: 45 | TestData2(3, 2) :: Nil, 2).toDF() 46 | df.createOrReplaceTempView("testData2") 47 | df 48 | } 49 | 50 | protected lazy val testData3: DataFrame = { 51 | val df = spark.sparkContext.parallelize( 52 | TestData3(1, None) :: 53 | TestData3(2, Some(2)) :: Nil).toDF() 54 | df.createOrReplaceTempView("testData3") 55 | df 56 | } 57 | 58 | protected lazy val negativeData: DataFrame = { 59 | val df = spark.sparkContext.parallelize( 60 | (1 to 100).map(i => TestData1(-i, (-i).toString))).toDF() 61 | df.createOrReplaceTempView("negativeData") 62 | df 63 | } 64 | 65 | protected lazy val largeAndSmallInts: DataFrame = { 66 | val df = spark.sparkContext.parallelize( 67 | LargeAndSmallInts(2147483644, 1) :: 68 | LargeAndSmallInts(1, 2) :: 69 | LargeAndSmallInts(2147483645, 1) :: 70 | LargeAndSmallInts(2, 2) :: 71 | LargeAndSmallInts(2147483646, 1) :: 72 | LargeAndSmallInts(3, 2) :: Nil).toDF() 73 | df.createOrReplaceTempView("largeAndSmallInts") 74 | df 75 | } 76 | 77 | protected lazy val decimalData: DataFrame = { 78 | val df = spark.sparkContext.parallelize( 79 | DecimalData(1, 1) :: 80 | DecimalData(1, 2) :: 81 | DecimalData(2, 1) :: 82 | DecimalData(2, 2) :: 83 | DecimalData(3, 1) :: 84 | DecimalData(3, 2) :: Nil).toDF() 85 | df.createOrReplaceTempView("decimalData") 86 | df 87 | } 88 | 89 | protected lazy val binaryData: DataFrame = { 90 | val df = spark.sparkContext.parallelize( 91 | BinaryData("12".getBytes(StandardCharsets.UTF_8), 1) :: 92 | BinaryData("22".getBytes(StandardCharsets.UTF_8), 5) :: 93 | BinaryData("122".getBytes(StandardCharsets.UTF_8), 3) :: 94 | BinaryData("121".getBytes(StandardCharsets.UTF_8), 2) :: 95 | BinaryData("123".getBytes(StandardCharsets.UTF_8), 4) :: Nil).toDF() 96 | df.createOrReplaceTempView("binaryData") 97 | df 98 | } 99 | 100 | protected lazy val upperCaseData: DataFrame = { 101 | val df = spark.sparkContext.parallelize( 102 | UpperCaseData(1, "A") :: 103 | UpperCaseData(2, "B") :: 104 | UpperCaseData(3, "C") :: 105 | UpperCaseData(4, "D") :: 106 | UpperCaseData(5, "E") :: 107 | UpperCaseData(6, "F") :: Nil).toDF() 108 | df.createOrReplaceTempView("upperCaseData") 109 | df 110 | } 111 | 112 | protected lazy val lowerCaseData: DataFrame = { 113 | val df = spark.sparkContext.parallelize( 114 | LowerCaseData(1, "a") :: 115 | LowerCaseData(2, "b") :: 116 | LowerCaseData(3, "c") :: 117 | LowerCaseData(4, "d") :: Nil).toDF() 118 | df.createOrReplaceTempView("lowerCaseData") 119 | df 120 | } 121 | 122 | protected lazy val lowerCaseDataWithDuplicates: DataFrame = { 123 | val df = spark.sparkContext.parallelize( 124 | LowerCaseData(1, "a") :: 125 | LowerCaseData(2, "b") :: 126 | LowerCaseData(2, "b") :: 127 | LowerCaseData(3, "c") :: 128 | LowerCaseData(3, "c") :: 129 | LowerCaseData(3, "c") :: 130 | LowerCaseData(4, "d") :: Nil).toDF() 131 | df.createOrReplaceTempView("lowerCaseData") 132 | df 133 | } 134 | 135 | protected lazy val arrayData: RDD[ArrayData] = { 136 | val rdd = spark.sparkContext.parallelize( 137 | ArrayData(Seq(1, 2, 3), Seq(Seq(1, 2, 3))) :: 138 | ArrayData(Seq(2, 3, 4), Seq(Seq(2, 3, 4))) :: Nil) 139 | rdd.toDF().createOrReplaceTempView("arrayData") 140 | rdd 141 | } 142 | 143 | protected lazy val mapData: RDD[MapData] = { 144 | val rdd = spark.sparkContext.parallelize( 145 | MapData(Map(1 -> "a1", 2 -> "b1", 3 -> "c1", 4 -> "d1", 5 -> "e1")) :: 146 | MapData(Map(1 -> "a2", 2 -> "b2", 3 -> "c2", 4 -> "d2")) :: 147 | MapData(Map(1 -> "a3", 2 -> "b3", 3 -> "c3")) :: 148 | MapData(Map(1 -> "a4", 2 -> "b4")) :: 149 | MapData(Map(1 -> "a5")) :: Nil) 150 | rdd.toDF().createOrReplaceTempView("mapData") 151 | rdd 152 | } 153 | 154 | protected lazy val repeatedData: RDD[StringData] = { 155 | val rdd = spark.sparkContext.parallelize(List.fill(2)(StringData("test"))) 156 | rdd.toDF().createOrReplaceTempView("repeatedData") 157 | rdd 158 | } 159 | 160 | protected lazy val nullableRepeatedData: RDD[StringData] = { 161 | val rdd = spark.sparkContext.parallelize( 162 | List.fill(2)(StringData(null)) ++ 163 | List.fill(2)(StringData("test"))) 164 | rdd.toDF().createOrReplaceTempView("nullableRepeatedData") 165 | rdd 166 | } 167 | 168 | protected lazy val nullInts: DataFrame = { 169 | val df = spark.sparkContext.parallelize( 170 | NullInts(1) :: 171 | NullInts(2) :: 172 | NullInts(3) :: 173 | NullInts(null) :: Nil).toDF() 174 | df.createOrReplaceTempView("nullInts") 175 | df 176 | } 177 | 178 | protected lazy val allNulls: DataFrame = { 179 | val df = spark.sparkContext.parallelize( 180 | NullInts(null) :: 181 | NullInts(null) :: 182 | NullInts(null) :: 183 | NullInts(null) :: Nil).toDF() 184 | df.createOrReplaceTempView("allNulls") 185 | df 186 | } 187 | 188 | protected lazy val nullStrings: DataFrame = { 189 | val df = spark.sparkContext.parallelize( 190 | NullStrings(1, "abc") :: 191 | NullStrings(2, "ABC") :: 192 | NullStrings(3, null) :: Nil).toDF() 193 | df.createOrReplaceTempView("nullStrings") 194 | df 195 | } 196 | 197 | protected lazy val tableName: DataFrame = { 198 | val df = spark.sparkContext.parallelize(TableName("test") :: Nil).toDF() 199 | df.createOrReplaceTempView("tableName") 200 | df 201 | } 202 | 203 | protected lazy val unparsedStrings: RDD[String] = { 204 | spark.sparkContext.parallelize( 205 | "1, A1, true, null" :: 206 | "2, B2, false, null" :: 207 | "3, C3, true, null" :: 208 | "4, D4, true, 2147483644" :: Nil) 209 | } 210 | 211 | // An RDD with 4 elements and 8 partitions 212 | protected lazy val withEmptyParts: RDD[IntField] = { 213 | val rdd = spark.sparkContext.parallelize((1 to 4).map(IntField), 8) 214 | rdd.toDF().createOrReplaceTempView("withEmptyParts") 215 | rdd 216 | } 217 | 218 | protected lazy val person: DataFrame = { 219 | val df = spark.sparkContext.parallelize( 220 | Person(0, "mike", 30) :: 221 | Person(1, "jim", 20) :: Nil).toDF() 222 | df.createOrReplaceTempView("person") 223 | df 224 | } 225 | 226 | protected lazy val salary: DataFrame = { 227 | val df = spark.sparkContext.parallelize( 228 | Salary(0, 2000.0) :: 229 | Salary(1, 1000.0) :: Nil).toDF() 230 | df.createOrReplaceTempView("salary") 231 | df 232 | } 233 | 234 | protected lazy val complexData: DataFrame = { 235 | val df = spark.sparkContext.parallelize( 236 | ComplexData(Map("1" -> 1), TestData1(1, "1"), Seq(1, 1, 1), true) :: 237 | ComplexData(Map("2" -> 2), TestData1(2, "2"), Seq(2, 2, 2), false) :: 238 | Nil).toDF() 239 | df.createOrReplaceTempView("complexData") 240 | df 241 | } 242 | 243 | protected lazy val courseSales: DataFrame = { 244 | val df = spark.sparkContext.parallelize( 245 | CourseSales("dotNET", 2012, 10000) :: 246 | CourseSales("Java", 2012, 20000) :: 247 | CourseSales("dotNET", 2012, 5000) :: 248 | CourseSales("dotNET", 2013, 48000) :: 249 | CourseSales("Java", 2013, 30000) :: Nil).toDF() 250 | df.createOrReplaceTempView("courseSales") 251 | df 252 | } 253 | 254 | protected lazy val trainingSales: DataFrame = { 255 | val df = spark.sparkContext.parallelize( 256 | TrainingSales("Experts", CourseSales("dotNET", 2012, 10000)) :: 257 | TrainingSales("Experts", CourseSales("JAVA", 2012, 20000)) :: 258 | TrainingSales("Dummies", CourseSales("dotNet", 2012, 5000)) :: 259 | TrainingSales("Experts", CourseSales("dotNET", 2013, 48000)) :: 260 | TrainingSales("Dummies", CourseSales("Java", 2013, 30000)) :: Nil).toDF() 261 | df.createOrReplaceTempView("trainingSales") 262 | df 263 | } 264 | 265 | /** 266 | * Initialize all test data such that all temp tables are properly registered. 267 | */ 268 | def loadTestData(): Unit = { 269 | assert(spark != null, "attempted to initialize test data before SparkSession.") 270 | emptyTestData 271 | testData1 272 | testData2 273 | testData3 274 | negativeData 275 | largeAndSmallInts 276 | decimalData 277 | binaryData 278 | upperCaseData 279 | lowerCaseData 280 | arrayData 281 | mapData 282 | repeatedData 283 | nullableRepeatedData 284 | nullInts 285 | allNulls 286 | nullStrings 287 | tableName 288 | unparsedStrings 289 | withEmptyParts 290 | person 291 | salary 292 | complexData 293 | courseSales 294 | } 295 | } 296 | 297 | /** 298 | * Case classes used in test data. 299 | */ 300 | private[sql] object SQLTestData { 301 | case class TestData1(key: Int, value: String) 302 | case class TestData2(a: Int, b: Int) 303 | case class TestData3(a: Int, b: Option[Int]) 304 | case class LargeAndSmallInts(a: Int, b: Int) 305 | case class DecimalData(a: BigDecimal, b: BigDecimal) 306 | case class BinaryData(a: Array[Byte], b: Int) 307 | case class UpperCaseData(N: Int, L: String) 308 | case class LowerCaseData(n: Int, l: String) 309 | case class ArrayData(data: Seq[Int], nestedData: Seq[Seq[Int]]) 310 | case class MapData(data: scala.collection.Map[Int, String]) 311 | case class StringData(s: String) 312 | case class IntField(i: Int) 313 | case class NullInts(a: Integer) 314 | case class NullStrings(n: Int, s: String) 315 | case class TableName(tableName: String) 316 | case class Person(id: Int, name: String, age: Int) 317 | case class Salary(personId: Int, salary: Double) 318 | case class ComplexData(m: Map[String, Int], s: TestData1, a: Seq[Int], b: Boolean) 319 | case class CourseSales(course: String, year: Int, earnings: Double) 320 | case class TrainingSales(training: String, sales: CourseSales) 321 | } 322 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark.sql.test 2 | 3 | import java.io.File 4 | import java.net.URI 5 | import java.nio.file.Files 6 | import java.util.{Locale, UUID} 7 | 8 | import org.apache.hadoop.fs.Path 9 | import org.apache.spark.{SparkFunSuite, SparkSuiteBase} 10 | import org.apache.spark.sql._ 11 | import org.apache.spark.sql.catalyst.FunctionIdentifier 12 | import org.apache.spark.sql.catalyst.analysis.NoSuchTableException 13 | import org.apache.spark.sql.catalyst.catalog.SessionCatalog.DEFAULT_DATABASE 14 | import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan 15 | import org.apache.spark.sql.catalyst.plans.{PlanTest, PlanTestBase} 16 | import org.apache.spark.sql.catalyst.util._ 17 | import org.apache.spark.sql.execution.FilterExec 18 | import org.apache.spark.sql.internal.SQLConf 19 | import org.apache.spark.util.Utils 20 | import org.scalatest.concurrent.Eventually 21 | import org.scalatest.{BeforeAndAfterAll, Suite} 22 | 23 | import scala.concurrent.duration._ 24 | import scala.language.implicitConversions 25 | 26 | /** 27 | * Helper trait that should be extended by all SQL test suites within the Spark 28 | * code base. 29 | * 30 | * This allows subclasses to plugin a custom `SQLContext`. It comes with test data 31 | * prepared in advance as well as all implicit conversions used extensively by dataframes. 32 | * To use implicit methods, import `testImplicits._` instead of through the `SQLContext`. 33 | * 34 | * Subclasses should *not* create `SQLContext`s in the test suite constructor, which is 35 | * prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM. 36 | */ 37 | trait SQLTestUtils extends SparkSuiteBase with SQLTestUtilsBase with PlanTestBase { 38 | this: Suite => 39 | 40 | // Whether to materialize all test data before the first test is run 41 | private var loadTestDataBeforeTests = false 42 | 43 | protected override def beforeAll(): Unit = { 44 | super.beforeAll() 45 | if (loadTestDataBeforeTests) { 46 | loadTestData() 47 | } 48 | } 49 | 50 | /** 51 | * Creates a temporary directory, which is then passed to `f` and will be deleted after `f` 52 | * returns. 53 | */ 54 | protected override def withTempDir(f: File => Unit): Unit = { 55 | super.withTempDir { dir => 56 | f(dir) 57 | waitForTasksToFinish() 58 | } 59 | } 60 | 61 | /** 62 | * Materialize the test data immediately after the `SQLContext` is set up. 63 | * This is necessary if the data is accessed by name but not through direct reference. 64 | */ 65 | protected def setupTestData(): Unit = { 66 | loadTestDataBeforeTests = true 67 | } 68 | 69 | /** 70 | * Copy file in jar's resource to a temp file, then pass it to `f`. 71 | * This function is used to make `f` can use the path of temp file(e.g. file:/), instead of 72 | * path of jar's resource which starts with 'jar:file:/' 73 | */ 74 | protected def withResourceTempPath(resourcePath: String)(f: File => Unit): Unit = { 75 | val inputStream = 76 | Thread.currentThread().getContextClassLoader.getResourceAsStream(resourcePath) 77 | withTempDir { dir => 78 | val tmpFile = new File(dir, "tmp") 79 | Files.copy(inputStream, tmpFile.toPath) 80 | f(tmpFile) 81 | } 82 | } 83 | 84 | /** 85 | * Waits for all tasks on all executors to be finished. 86 | */ 87 | protected def waitForTasksToFinish(): Unit = { 88 | eventually(timeout(30.seconds)) { 89 | assert(spark.sparkContext.statusTracker 90 | .getExecutorInfos.map(_.numRunningTasks()).sum == 0) 91 | } 92 | } 93 | 94 | /** 95 | * Creates the specified number of temporary directories, which is then passed to `f` and will be 96 | * deleted after `f` returns. 97 | */ 98 | protected def withTempPaths(numPaths: Int)(f: Seq[File] => Unit): Unit = { 99 | val files = Array.fill[File](numPaths)(Utils.createTempDir().getCanonicalFile) 100 | try f(files) finally { 101 | // wait for all tasks to finish before deleting files 102 | waitForTasksToFinish() 103 | files.foreach(Utils.deleteRecursively) 104 | } 105 | } 106 | } 107 | 108 | /** 109 | * Helper trait that can be extended by all external SQL test suites. 110 | * 111 | * This allows subclasses to plugin a custom `SQLContext`. 112 | * To use implicit methods, import `testImplicits._` instead of through the `SQLContext`. 113 | * 114 | * Subclasses should *not* create `SQLContext`s in the test suite constructor, which is 115 | * prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM. 116 | */ 117 | trait SQLTestUtilsBase 118 | extends Eventually 119 | with BeforeAndAfterAll 120 | with SQLTestData 121 | with PlanTestBase { 122 | self: Suite => 123 | 124 | protected def sparkContext = spark.sparkContext 125 | 126 | // Shorthand for running a query using our SQLContext 127 | protected lazy val sql = spark.sql _ 128 | 129 | /** 130 | * A helper object for importing SQL implicits. 131 | * 132 | * Note that the alternative of importing `spark.implicits._` is not possible here. 133 | * This is because we create the `SQLContext` immediately before the first test is run, 134 | * but the implicits import is needed in the constructor. 135 | */ 136 | protected object testImplicits extends SQLImplicits { 137 | protected override def _sqlContext: SQLContext = self.spark.sqlContext 138 | } 139 | 140 | protected override def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = { 141 | SparkSession.setActiveSession(spark) 142 | super.withSQLConf(pairs: _*)(f) 143 | } 144 | 145 | /** 146 | * Drops functions after calling `f`. A function is represented by (functionName, isTemporary). 147 | */ 148 | protected def withUserDefinedFunction(functions: (String, Boolean)*)(f: => Unit): Unit = { 149 | try { 150 | f 151 | } catch { 152 | case cause: Throwable => throw cause 153 | } finally { 154 | // If the test failed part way, we don't want to mask the failure by failing to remove 155 | // temp tables that never got created. 156 | functions.foreach { case (functionName, isTemporary) => 157 | val withTemporary = if (isTemporary) "TEMPORARY" else "" 158 | spark.sql(s"DROP $withTemporary FUNCTION IF EXISTS $functionName") 159 | assert( 160 | !spark.sessionState.catalog.functionExists(FunctionIdentifier(functionName)), 161 | s"Function $functionName should have been dropped. But, it still exists.") 162 | } 163 | } 164 | } 165 | 166 | /** 167 | * Drops temporary view `viewNames` after calling `f`. 168 | */ 169 | protected def withTempView(viewNames: String*)(f: => Unit): Unit = { 170 | Utils.tryWithSafeFinally(f) { 171 | viewNames.foreach { viewName => 172 | try spark.catalog.dropTempView(viewName) catch { 173 | // If the test failed part way, we don't want to mask the failure by failing to remove 174 | // temp views that never got created. 175 | case _: NoSuchTableException => 176 | } 177 | } 178 | } 179 | } 180 | 181 | /** 182 | * Drops global temporary view `viewNames` after calling `f`. 183 | */ 184 | protected def withGlobalTempView(viewNames: String*)(f: => Unit): Unit = { 185 | Utils.tryWithSafeFinally(f) { 186 | viewNames.foreach { viewName => 187 | try spark.catalog.dropGlobalTempView(viewName) catch { 188 | // If the test failed part way, we don't want to mask the failure by failing to remove 189 | // global temp views that never got created. 190 | case _: NoSuchTableException => 191 | } 192 | } 193 | } 194 | } 195 | 196 | /** 197 | * Drops table `tableName` after calling `f`. 198 | */ 199 | protected def withTable(tableNames: String*)(f: => Unit): Unit = { 200 | Utils.tryWithSafeFinally(f) { 201 | tableNames.foreach { name => 202 | spark.sql(s"DROP TABLE IF EXISTS $name") 203 | } 204 | } 205 | } 206 | 207 | /** 208 | * Drops view `viewName` after calling `f`. 209 | */ 210 | protected def withView(viewNames: String*)(f: => Unit): Unit = { 211 | Utils.tryWithSafeFinally(f)( 212 | viewNames.foreach { name => 213 | spark.sql(s"DROP VIEW IF EXISTS $name") 214 | } 215 | ) 216 | } 217 | 218 | /** 219 | * Drops cache `cacheName` after calling `f`. 220 | */ 221 | protected def withCache(cacheNames: String*)(f: => Unit): Unit = { 222 | Utils.tryWithSafeFinally(f) { 223 | cacheNames.foreach { cacheName => 224 | try uncacheTable(cacheName) catch { 225 | case _: AnalysisException => 226 | } 227 | } 228 | } 229 | } 230 | 231 | // Blocking uncache table for tests 232 | protected def uncacheTable(tableName: String): Unit = { 233 | val tableIdent = spark.sessionState.sqlParser.parseTableIdentifier(tableName) 234 | val cascade = !spark.sessionState.catalog.isTempView(tableIdent) 235 | spark.sharedState.cacheManager.uncacheQuery( 236 | spark, 237 | spark.table(tableName).logicalPlan, 238 | cascade = cascade, 239 | blocking = true) 240 | } 241 | 242 | /** 243 | * Creates a temporary database and switches current database to it before executing `f`. This 244 | * database is dropped after `f` returns. 245 | * 246 | * Note that this method doesn't switch current database before executing `f`. 247 | */ 248 | protected def withTempDatabase(f: String => Unit): Unit = { 249 | val dbName = s"db_${UUID.randomUUID().toString.replace('-', '_')}" 250 | 251 | try { 252 | spark.sql(s"CREATE DATABASE $dbName") 253 | } catch { 254 | case cause: Throwable => 255 | fail("Failed to create temporary database", cause) 256 | } 257 | 258 | try f(dbName) finally { 259 | if (spark.catalog.currentDatabase == dbName) { 260 | spark.sql(s"USE $DEFAULT_DATABASE") 261 | } 262 | spark.sql(s"DROP DATABASE $dbName CASCADE") 263 | } 264 | } 265 | 266 | /** 267 | * Drops database `dbName` after calling `f`. 268 | */ 269 | protected def withDatabase(dbNames: String*)(f: => Unit): Unit = { 270 | Utils.tryWithSafeFinally(f) { 271 | dbNames.foreach { name => 272 | spark.sql(s"DROP DATABASE IF EXISTS $name CASCADE") 273 | } 274 | spark.sql(s"USE $DEFAULT_DATABASE") 275 | } 276 | } 277 | 278 | /** 279 | * Enables Locale `language` before executing `f`, then switches back to the default locale of JVM 280 | * after `f` returns. 281 | */ 282 | protected def withLocale(language: String)(f: => Unit): Unit = { 283 | val originalLocale = Locale.getDefault 284 | try { 285 | // Add Locale setting 286 | Locale.setDefault(new Locale(language)) 287 | f 288 | } finally { 289 | Locale.setDefault(originalLocale) 290 | } 291 | } 292 | 293 | /** 294 | * Activates database `db` before executing `f`, then switches back to `default` database after 295 | * `f` returns. 296 | */ 297 | protected def activateDatabase(db: String)(f: => Unit): Unit = { 298 | spark.sessionState.catalog.setCurrentDatabase(db) 299 | Utils.tryWithSafeFinally(f)(spark.sessionState.catalog.setCurrentDatabase("default")) 300 | } 301 | 302 | /** 303 | * Strip Spark-side filtering in order to check if a datasource filters rows correctly. 304 | */ 305 | protected def stripSparkFilter(df: DataFrame): DataFrame = { 306 | val schema = df.schema 307 | val withoutFilters = df.queryExecution.executedPlan.transform { 308 | case FilterExec(_, child) => child 309 | } 310 | 311 | spark.internalCreateDataFrame(withoutFilters.execute(), schema) 312 | } 313 | 314 | /** 315 | * Turn a logical plan into a `DataFrame`. This should be removed once we have an easier 316 | * way to construct `DataFrame` directly out of local data without relying on implicits. 317 | */ 318 | protected implicit def logicalPlanToSparkQuery(plan: LogicalPlan): DataFrame = { 319 | Dataset.ofRows(spark, plan) 320 | } 321 | 322 | 323 | /** 324 | * This method is used to make the given path qualified, when a path 325 | * does not contain a scheme, this path will not be changed after the default 326 | * FileSystem is changed. 327 | */ 328 | def makeQualifiedPath(path: String): URI = { 329 | val hadoopPath = new Path(path) 330 | val fs = hadoopPath.getFileSystem(spark.sessionState.newHadoopConf()) 331 | fs.makeQualified(hadoopPath).toUri 332 | } 333 | 334 | /** 335 | * Returns full path to the given file in the resource folder 336 | */ 337 | protected def testFile(fileName: String): String = { 338 | Thread.currentThread().getContextClassLoader.getResource(fileName).toString 339 | } 340 | 341 | } 342 | 343 | object SQLTestUtils { 344 | 345 | def compareAnswers( 346 | sparkAnswer: Seq[Row], 347 | expectedAnswer: Seq[Row], 348 | sort: Boolean): Option[String] = { 349 | def prepareAnswer(answer: Seq[Row]): Seq[Row] = { 350 | // Converts data to types that we can do equality comparison using Scala collections. 351 | // For BigDecimal type, the Scala type has a better definition of equality test (similar to 352 | // Java's java.math.BigDecimal.compareTo). 353 | // For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals for 354 | // equality test. 355 | // This function is copied from Catalyst's QueryTest 356 | val converted: Seq[Row] = answer.map { s => 357 | Row.fromSeq(s.toSeq.map { 358 | case d: java.math.BigDecimal => BigDecimal(d) 359 | case b: Array[Byte] => b.toSeq 360 | case o => o 361 | }) 362 | } 363 | if (sort) { 364 | converted.sortBy(_.toString()) 365 | } else { 366 | converted 367 | } 368 | } 369 | 370 | if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) { 371 | val errorMessage = 372 | s""" 373 | | == Results == 374 | | ${ 375 | sideBySide( 376 | s"== Expected Answer - ${expectedAnswer.size} ==" +: 377 | prepareAnswer(expectedAnswer).map(_.toString()), 378 | s"== Actual Answer - ${sparkAnswer.size} ==" +: 379 | prepareAnswer(sparkAnswer).map(_.toString())).mkString("\n") 380 | } 381 | """.stripMargin 382 | Some(errorMessage) 383 | } else { 384 | None 385 | } 386 | } 387 | } 388 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/sql/test/SharedSparkSessionBase.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark.sql.test 2 | 3 | import org.apache.spark.sql.internal.StaticSQLConf 4 | import org.apache.spark.sql.{SQLContext, SparkSession} 5 | import org.apache.spark.{DebugFilesystem, SparkConf} 6 | import org.scalatest.Suite 7 | import org.scalatest.concurrent.Eventually 8 | 9 | import scala.concurrent.duration._ 10 | 11 | 12 | /** 13 | * Helper trait for SQL test suites where all tests share a single [[TestSparkSession]]. 14 | */ 15 | trait SharedSparkSessionBase 16 | extends SQLTestUtils 17 | with Eventually { 18 | self: Suite => 19 | 20 | protected def sparkConf = { 21 | val conf = new SparkConf() 22 | conf 23 | .set(StaticSQLConf.WAREHOUSE_PATH, conf.get(StaticSQLConf.WAREHOUSE_PATH) + "/" + getClass.getCanonicalName) 24 | .set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName) 25 | } 26 | 27 | /** 28 | * The [[TestSparkSession]] to use for all tests in this suite. 29 | * 30 | * By default, the underlying [[org.apache.spark.SparkContext]] will be run in local 31 | * mode with the default test configurations. 32 | */ 33 | private var _spark: TestSparkSession = _ 34 | 35 | /** 36 | * The [[TestSparkSession]] to use for all tests in this suite. 37 | */ 38 | protected implicit def spark: SparkSession = _spark 39 | 40 | /** 41 | * The [[TestSQLContext]] to use for all tests in this suite. 42 | */ 43 | protected implicit def sqlContext: SQLContext = _spark.sqlContext 44 | 45 | protected def createSparkSession: TestSparkSession = { 46 | SparkSession.cleanupAnyExistingSession() 47 | new TestSparkSession(sparkConf) 48 | } 49 | 50 | /** 51 | * Initialize the [[TestSparkSession]]. Generally, this is just called from 52 | * beforeAll; however, in test using styles other than FunSuite, there is 53 | * often code that relies on the session between test group constructs and 54 | * the actual tests, which may need this session. It is purely a semantic 55 | * difference, but semantically, it makes more sense to call 56 | * 'initializeSession' between a 'describe' and an 'it' call than it does to 57 | * call 'beforeAll'. 58 | */ 59 | protected def initializeSession(): Unit = { 60 | if (_spark == null) { 61 | _spark = createSparkSession 62 | } 63 | } 64 | 65 | /** 66 | * Make sure the [[TestSparkSession]] is initialized before any tests are run. 67 | */ 68 | protected override def beforeAll(): Unit = { 69 | initializeSession() 70 | 71 | // Ensure we have initialized the context before calling parent code 72 | super.beforeAll() 73 | } 74 | 75 | /** 76 | * Stop the underlying [[org.apache.spark.SparkContext]], if any. 77 | */ 78 | protected override def afterAll(): Unit = { 79 | try { 80 | super.afterAll() 81 | } finally { 82 | try { 83 | if (_spark != null) { 84 | try { 85 | _spark.sessionState.catalog.reset() 86 | } finally { 87 | try { 88 | waitForTasksToFinish() 89 | } finally { 90 | _spark.stop() 91 | _spark = null 92 | } 93 | } 94 | } 95 | } finally { 96 | SparkSession.clearActiveSession() 97 | SparkSession.clearDefaultSession() 98 | } 99 | } 100 | } 101 | 102 | protected override def beforeEach(): Unit = { 103 | super.beforeEach() 104 | DebugFilesystem.clearOpenStreams() 105 | } 106 | 107 | protected override def afterEach(): Unit = { 108 | super.afterEach() 109 | // Clear all persistent datasets after each test 110 | spark.sharedState.cacheManager.clearCache() 111 | // files can be closed from other threads, so wait a bit 112 | // normally this doesn't take more than 1s 113 | eventually(timeout(30.seconds), interval(2.seconds)) { 114 | DebugFilesystem.assertNoOpenStreams() 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /alchemy/src/test/scala/org/apache/spark/sql/test/TestSparkSession.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark.sql.test 2 | 3 | import org.apache.spark.sql.{SparkSession, SparkSessionExtensions} 4 | import org.apache.spark.sql.internal.{SQLConf, SessionState, SessionStateBuilder, WithTestConf} 5 | import org.apache.spark.{SparkConf, SparkContext} 6 | 7 | /** 8 | * A special `SparkSession` prepared for testing. 9 | */ 10 | class TestSparkSession(sc: SparkContext) extends SparkSession(sc) { self => 11 | def this(sparkConf: SparkConf) { 12 | this(new SparkContext("local[2]", "test-sql-context", 13 | sparkConf.set("spark.sql.testkey", "true"))) 14 | } 15 | 16 | def this() { 17 | this(new SparkConf) 18 | } 19 | 20 | SparkSession.setDefaultSession(this) 21 | SparkSession.setActiveSession(this) 22 | 23 | @transient 24 | override lazy val sessionState: SessionState = { 25 | new TestSQLSessionStateBuilder(this, None).build() 26 | } 27 | 28 | // Needed for extension loading 29 | def injectExtension(extension: SparkSessionExtensions => Unit): Unit = 30 | extension(extensions) 31 | 32 | // Needed for Java tests 33 | def loadTestData(): Unit = { 34 | testData.loadTestData() 35 | } 36 | 37 | private object testData extends SQLTestData { 38 | protected override def spark: SparkSession = self 39 | } 40 | } 41 | 42 | 43 | object TestSQLContext { 44 | 45 | /** 46 | * A map used to store all confs that need to be overridden in sql/core unit tests. 47 | */ 48 | val overrideConfs: Map[String, String] = 49 | Map( 50 | // Fewer shuffle partitions to speed up testing. 51 | SQLConf.SHUFFLE_PARTITIONS.key -> "3" 52 | ) 53 | } 54 | 55 | private[sql] class TestSQLSessionStateBuilder( 56 | session: SparkSession, 57 | state: Option[SessionState]) 58 | extends SessionStateBuilder(session, state) with WithTestConf { 59 | override def overrideConfs: Map[String, String] = TestSQLContext.overrideConfs 60 | override def newBuilder: NewBuilder = new TestSQLSessionStateBuilder(_, _) 61 | } 62 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | ThisBuild / organization := "com.swoop" 2 | ThisBuild / version := scala.io.Source.fromFile("VERSION").mkString.stripLineEnd 3 | 4 | ThisBuild / scalaVersion := "2.12.11" 5 | ThisBuild / crossScalaVersions := Seq("2.12.11") 6 | 7 | ThisBuild / javacOptions ++= Seq("-source", "1.8", "-target", "1.8") 8 | 9 | val sparkVersion = "3.2.0" 10 | 11 | lazy val scalaSettings = Seq( 12 | scalaVersion := "2.12.11", 13 | crossScalaVersions := Seq("2.12.11"), 14 | scalacOptions in(Compile, doc) ++= Seq("-doc-root-content", baseDirectory.value + "/docs/root-doc.txt"), 15 | scalacOptions in(Compile, doc) ++= Seq("-groups", "-implicits"), 16 | javacOptions in(Compile, doc) ++= Seq("-notimestamp", "-linksource"), 17 | javacOptions ++= Seq("-source", "1.8", "-target", "1.8") 18 | ) 19 | 20 | lazy val alchemy = (project in file(".")) 21 | .settings( 22 | name := "spark-alchemy", 23 | scalaSource in Compile := baseDirectory.value / "alchemy/src/main/scala", 24 | scalaSource in Test := baseDirectory.value / "alchemy/src/test/scala", 25 | resourceDirectory in Compile := baseDirectory.value / "alchemy/src/main/resources", 26 | resourceDirectory in Test := baseDirectory.value / "alchemy/src/test/resources", 27 | libraryDependencies ++= Seq( 28 | "org.scalatest" %% "scalatest" % "3.2.2" % Test, 29 | "net.agkn" % "hll" % "1.6.0", 30 | "org.postgresql" % "postgresql" % "42.2.8" % Test, 31 | "org.apache.spark" %% "spark-sql" % sparkVersion % "provided" 32 | ), 33 | fork in Test := true, // required for Spark 34 | scalaSettings 35 | ) 36 | .enablePlugins(SiteScaladocPlugin) 37 | .enablePlugins(BuildInfoPlugin) 38 | .enablePlugins(GitVersioning, GitBranchPrompt) 39 | 40 | lazy val docs = project 41 | .in(file("docs")) 42 | .settings( 43 | moduleName := "spark-alchemy-docs", 44 | name := moduleName.value, 45 | scalaSettings, 46 | micrositeSettings 47 | ) 48 | .dependsOn(alchemy) 49 | .enablePlugins(MicrositesPlugin) 50 | 51 | lazy val micrositeSettings = Seq( 52 | micrositeName := "Spark Alchemy", 53 | micrositeDescription := "Useful extensions to Apache Spark", 54 | micrositeAuthor := "Swoop", 55 | micrositeHomepage := "https://www.swoop.com", 56 | micrositeBaseUrl := "/spark-alchemy", 57 | micrositeDocumentationUrl := "/spark-alchemy/docs.html", 58 | micrositeGithubOwner := "swoop-inc", 59 | micrositeGithubRepo := "spark-alchemy", 60 | micrositeHighlightTheme := "tomorrow", 61 | micrositeTheme := "pattern", 62 | micrositePushSiteWith := GitHub4s, 63 | micrositeGithubToken := sys.env.get("GITHUB_TOKEN"), 64 | micrositeImgDirectory := (Compile / resourceDirectory).value / "site" / "images", 65 | micrositeCssDirectory := (Compile / resourceDirectory).value / "site" / "styles", 66 | micrositeJsDirectory := (Compile / resourceDirectory).value / "site" / "scripts" 67 | ) 68 | 69 | 70 | // Speed up dependency resolution (experimental) 71 | // @see https://www.scala-sbt.org/1.0/docs/Cached-Resolution.html 72 | ThisBuild / updateOptions := updateOptions.value.withCachedResolution(true) 73 | 74 | // @see https://wiki.scala-lang.org/display/SW/Configuring+SBT+to+Generate+a+Scaladoc+Root+Page 75 | autoAPIMappings := true 76 | 77 | buildInfoPackage := "com.swoop.alchemy" 78 | 79 | // SBT header settings 80 | ThisBuild / organizationName := "Swoop, Inc" 81 | ThisBuild / startYear := Some(2018) 82 | ThisBuild / licenses += ("Apache-2.0", new URL("https://www.apache.org/licenses/LICENSE-2.0.txt")) 83 | 84 | ThisBuild / credentials += Credentials(Path.userHome / ".sbt" / "sonatype_credentials") 85 | ThisBuild / homepage := Some(url("https://swoop-inc.github.io/spark-alchemy/")) 86 | ThisBuild / developers ++= List( 87 | Developer("ssimeonov", "Simeon Simeonov", "@ssimeonov", url("https://github.com/ssimeonov")) 88 | ) 89 | ThisBuild / scmInfo := Some(ScmInfo(url("https://github.com/swoop-inc/spark-alchemy"), "git@github.com:swoop-inc/spark-alchemy.git")) 90 | ThisBuild / updateOptions := updateOptions.value.withLatestSnapshots(false) 91 | ThisBuild / publishMavenStyle := true 92 | ThisBuild / publishTo := sonatypePublishToBundle.value 93 | Global / useGpgPinentry := true 94 | -------------------------------------------------------------------------------- /codeStyleSettings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 17 | 19 | 20 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | volumes: 4 | postgres-data: 5 | driver: local 6 | 7 | services: 8 | postgres: 9 | image: swoopinc/postgres-hll:11 10 | ports: 11 | - 5432:5432 12 | volumes: 13 | - postgres-data:/var/lib/postgresql/data:cached 14 | -------------------------------------------------------------------------------- /docs/docs/docs.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: docs 3 | --- 4 | 5 | # spark-alchemy by example 6 | 7 | ```scala mdoc 8 | import com.swoop.alchemy.utils.AnyExtensions 9 | ``` 10 | -------------------------------------------------------------------------------- /docs/docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: home 3 | title: "Home" 4 | section: "Home" 5 | --- 6 | 7 | ## spark-alchemy 8 | 9 | Spark Alchemy is a collection of open-source Spark tools & frameworks that have made the data engineering and 10 | data science teams at [Swoop](https://www.swoop.com) highly productive in our demanding petabyte-scale environment 11 | with rich data (thousands of columns). 12 | 13 | We are preparing to release `spark-alchemy`. Click Watch above to be notified when we do. 14 | 15 | Here is a preview of what we'd like to include here: 16 | 17 | - Configuration Addressable Production (CAP), Automatic Lifecycle Management (ALM) and Just-in-time Dependency Resolution 18 | (JDR) as outlined in our Spark+AI Summit talk [Unafraid of Change: Optimizing ETL, ML, and AI in Fast-Paced Environments](https://databricks.com/session/unafraid-of-change-optimizing-etl-ml-ai-in-fast-paced-environments). 19 | 20 | - Our extensive set of [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) (HLL) functions that allow the saving of HLL sketches as binary columns for fast 21 | reaggregation as well as HLL interoperability with Postgres. (Spark has an HLL implementation but does not expose the binary HLL sketches, 22 | which makes its usefulness rather limited.) 23 | 24 | - Hundreds of productivity-enhancing extensions to the core user-level data types: `Column`, `Dataset`, `SparkSession`, etc. 25 | 26 | - Data discovery and cleansing tools we use to ingest and clean up large amounts of dirty data from third parties. 27 | 28 | - Cross-cluster named lock manager, which simplifies data production by removing the need for workflow servers much of the time. 29 | 30 | - Versioned data source, which allows a new version to be written while the current version is being read. 31 | 32 | - `case class` code generation from Spark schema, with easy implementation customization. 33 | 34 | - Tools for deploying Spark ML pipelines to production. 35 | 36 | - Lots more, as we are constantly building up our internal toolset. 37 | 38 | All this is code we use on a daily basis at [Swoop](https://www.swoop.com) and [IPM.ai](https://www.ipm.ai). However, making the code 39 | suited for external use and taking on the responsibility to manage it for the broader Spark community is not a task we take lightly. 40 | We are reviewing/refactoring APIs based on what we've learned from using the code over months and years at Swoop and adjusting it for 41 | Spark 2.4.x. The process we go through is as follows: 42 | 43 | 1. Code we would like to consider for open-sourcing goes into an internal `spark-magic` library, which has no dependencies on Swoop-related 44 | code, where it begins its "live use test" on multiple Spark clusters. 45 | 46 | 2. Once we feel the APIs have baked enough, we review candidate code and move it to an internal/private version of `spark-alchemy` to check 47 | for dependencies and interactions with other components. 48 | 49 | 3. If all looks good, we promote the code to the public `spark-alchemy` (this repository). 50 | 51 | If you'd like to contribute to our open-source efforts, by joining our team or from your company, let us know at `spark-interest at swoop dot com`. 52 | 53 | For more Spark OSS work from Swoop, check out [spark-records](https://github.com/swoop-inc/spark-records). 54 | -------------------------------------------------------------------------------- /docs/src/main/resources/site/images/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swoop-inc/spark-alchemy/10eadbd66c3970e255e1449b19338e0f2f2ee5ea/docs/src/main/resources/site/images/favicon.png -------------------------------------------------------------------------------- /docs/src/main/resources/site/images/navbar_brand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swoop-inc/spark-alchemy/10eadbd66c3970e255e1449b19338e0f2f2ee5ea/docs/src/main/resources/site/images/navbar_brand.png -------------------------------------------------------------------------------- /docs/src/main/resources/site/images/navbar_brand2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swoop-inc/spark-alchemy/10eadbd66c3970e255e1449b19338e0f2f2ee5ea/docs/src/main/resources/site/images/navbar_brand2x.png -------------------------------------------------------------------------------- /docs/src/main/resources/site/images/sidebar_brand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swoop-inc/spark-alchemy/10eadbd66c3970e255e1449b19338e0f2f2ee5ea/docs/src/main/resources/site/images/sidebar_brand.png -------------------------------------------------------------------------------- /docs/src/main/resources/site/images/sidebar_brand2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swoop-inc/spark-alchemy/10eadbd66c3970e255e1449b19338e0f2f2ee5ea/docs/src/main/resources/site/images/sidebar_brand2x.png -------------------------------------------------------------------------------- /docs/src/main/resources/site/images/swoop-icon_130x130.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swoop-inc/spark-alchemy/10eadbd66c3970e255e1449b19338e0f2f2ee5ea/docs/src/main/resources/site/images/swoop-icon_130x130.png -------------------------------------------------------------------------------- /docs/src/main/resources/site/images/swoop-icon_80x80.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swoop-inc/spark-alchemy/10eadbd66c3970e255e1449b19338e0f2f2ee5ea/docs/src/main/resources/site/images/swoop-icon_80x80.png -------------------------------------------------------------------------------- /docs/src/main/resources/site/scripts/automenu.js: -------------------------------------------------------------------------------- 1 | 2 | jQuery(document).ready(function() { 3 | activeLinks(); 4 | activeToggle(); 5 | organizeContent(); 6 | }); 7 | 8 | function organizeContent() { 9 | var content = $('#content'); 10 | var subcontent = $('
'); 11 | content.prepend(subcontent); 12 | content.find('h1').each(function(index) { 13 | var section = $('
'); 14 | subcontent.append(section); 15 | var h1 = $(this); 16 | var elements = h1.nextUntil('h1'); 17 | var text = h1.text(); 18 | var slug = slugify(text) + '-' + index; 19 | addSectionToSidebar(text, slug); 20 | section.append(makeSectionAnchor(h1, text, slug)); 21 | if (elements.length > 0) { 22 | elements.appendTo(section); 23 | organizeSubSection(slug, elements); 24 | } 25 | }); 26 | removeEmptyList(); 27 | } 28 | 29 | function organizeSubSection(s, children) { 30 | children.filter('h2').each(function(index, el) { 31 | var h2 = $(this); 32 | var text = h2.text(); 33 | var slug = s + '-' + slugify(text) + '-' + index; 34 | var a = makeSectionAnchor(h2, text, slug); 35 | addSubSectionToSidebar(text, slug, s); 36 | }); 37 | } 38 | 39 | function makeSectionAnchor(h, text, slug) { 40 | var a = $('').attr({ 41 | 'class': 'anchor', 42 | 'name': slug, 43 | 'href': '#' + slug 44 | }); 45 | a.append(h.clone()); 46 | h.replaceWith(a); 47 | return a; 48 | } 49 | 50 | function addSectionToSidebar(text, slug) { 51 | var ul = $('').addClass('sub_section'); 52 | var a = $('' + text + ''); 53 | a.find('.fa-angle-right').css('padding-top', '0.7em'); 54 | var li = $('
  • '); 55 | li.append(a).append(ul); 56 | ul.hide(); 57 | $('#sidebar').append(li); 58 | a.click(function(event) { 59 | $('#sidebar li').add('#sidebar a').removeClass('active'); 60 | $('#sidebar .sub_section').not(ul).slideUp(); 61 | ul.slideToggle('fast'); 62 | li.add(a).toggleClass('active'); 63 | }); 64 | } 65 | 66 | function addSubSectionToSidebar(text, slug, s) { 67 | var ul = $('#sidebar li.' + s + ' ul'); 68 | var li = $('
  • ' + text + '
  • '); 69 | ul.append(li); 70 | } 71 | 72 | function removeEmptyList() { 73 | $('#sidebar>li').not('.sidebar-brand').each(function(index, el) { 74 | var li = $(this); 75 | var children = li.find('li'); 76 | if (children.size() == 0) { 77 | li.find('span').remove(); 78 | } 79 | }); 80 | } 81 | 82 | function slugify(text) { 83 | return text.toString().toLowerCase() 84 | .replace(/\s+/g, '-') // Replace spaces with - 85 | .replace(/[^\w\-]+/g, '') // Remove all non-word chars 86 | .replace(/\-\-+/g, '-') // Replace multiple - with single - 87 | .replace(/^-+/, '') // Trim - from start of text 88 | .replace(/-+$/, ''); // Trim - from end of text 89 | } 90 | 91 | function activeToggle() { 92 | $("#menu-toggle").click(function(e) { 93 | e.preventDefault(); 94 | $("#wrapper").toggleClass("toggled"); 95 | }); 96 | } 97 | 98 | function activeLinks() { 99 | $('a[data-href]').each(function(index, el) { 100 | $(this).attr('href', $(this).attr('data-href')); 101 | }); 102 | } 103 | -------------------------------------------------------------------------------- /docs/src/main/resources/site/styles/overrides.css: -------------------------------------------------------------------------------- 1 | @media (min-width: 768px) { 2 | .container { 3 | width: 750px; } } 4 | @media (min-width: 992px) { 5 | .container { 6 | width: 800px; } } 7 | @media (min-width: 1200px) { 8 | .container { 9 | width: 800px; } } 10 | 11 | body { 12 | font-size: 16px; 13 | } 14 | 15 | a > code { 16 | text-decoration: underline; 17 | } 18 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.3.12 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | scalaVersion := "2.12.11" 2 | 3 | addSbtPlugin("com.47deg" % "sbt-microsites" % "1.3.4") 4 | addSbtPlugin("com.eed3si9n" % "sbt-buildinfo" % "0.9.0") 5 | addSbtPlugin("com.typesafe.sbt" % "sbt-ghpages" % "0.6.3") 6 | addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "1.0.0") 7 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.0.0") 8 | addSbtPlugin("org.scalameta" % "sbt-mdoc" % "2.2.21") 9 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.15.0") 10 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "3.9.4") 11 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "2.0.1") 12 | --------------------------------------------------------------------------------