├── changelog ├── @unreleased │ └── .gitkeep ├── 0.2.8 │ └── pr-224.v2.yml ├── 0.2.4 │ ├── pr-193.v2.yml │ ├── pr-202.v2.yml │ ├── pr-200.v2.yml │ ├── pr-201.v2.yml │ ├── pr-192.v2.yml │ ├── pr-196.v2.yml │ ├── pr-194.v2.yml │ └── pr-198.v2.yml ├── 0.2.5 │ ├── pr-210.v2.yml │ ├── pr-207.v2.yml │ ├── pr-213.v2.yml │ ├── pr-203.v2.yml │ ├── pr-217.v2.yml │ └── pr-216.v2.yml ├── 0.2.10 │ └── pr-229.v2.yml ├── 0.2.16 │ └── pr-247.v2.yml ├── 0.2.12 │ └── pr-236.v2.yml ├── 0.2.3 │ ├── pr-190.v2.yml │ ├── pr-189.v2.yml │ └── pr-191.v2.yml ├── 0.2.7 │ └── pr-222.v2.yml ├── 0.2.9 │ └── pr-225.v2.yml ├── 0.2.2 │ ├── pr-183.v2.yml │ ├── pr-185.v2.yml │ ├── pr-187.v2.yml │ └── pr-178.v2.yml ├── 0.2.14 │ ├── pr-243.v2.yml │ └── pr-242.v2.yml ├── 0.2.6 │ └── pr-221.v2.yml ├── 0.2.1 │ └── pr-68.v2.yml ├── 0.2.15 │ └── pr-244.v2.yml ├── 0.2.13 │ └── pr-241.v2.yml └── 0.2.11 │ └── pr-231.v2.yml ├── gradle ├── wrapper │ ├── .gitignore │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties └── publish-dist.gradle ├── spark-tpcds-benchmark-runner ├── service │ └── bin │ │ ├── gensort │ │ ├── gensort_osx │ │ └── gensort-linux-1.5.tar.gz │ │ └── tpcds │ │ ├── tpcds_osx.tgz │ │ └── tpcds_linux.tgz ├── src │ ├── main │ │ ├── resources │ │ │ └── queries.tar │ │ └── java │ │ │ └── com │ │ │ └── palantir │ │ │ └── spark │ │ │ └── benchmark │ │ │ ├── datagen │ │ │ ├── SortDataGenerator.java │ │ │ ├── ParquetTransformer.java │ │ │ ├── DefaultParquetTransformer.java │ │ │ ├── TpcdsDataGenerator.java │ │ │ └── GenSortDataGenerator.java │ │ │ ├── queries │ │ │ ├── Query.java │ │ │ ├── SortBenchmarkQuery.java │ │ │ ├── QuerySessionIdentifier.java │ │ │ └── SqlQuery.java │ │ │ ├── config │ │ │ ├── SourceDataGenerationConfig.java │ │ │ ├── SourceBenchmarksConfiguration.java │ │ │ ├── DataGenerationConfiguration.java │ │ │ ├── BenchmarksConfiguration.java │ │ │ ├── SimpleFilesystemConfiguration.java │ │ │ ├── SparkConfiguration.java │ │ │ ├── FilesystemConfiguration.java │ │ │ ├── S3Configuration.java │ │ │ ├── BenchmarkRunnerConfig.java │ │ │ ├── AzureBlobStoreConfiguration.java │ │ │ └── HadoopConfiguration.java │ │ │ ├── immutables │ │ │ ├── ImmutablesStyle.java │ │ │ └── ImmutablesConfigStyle.java │ │ │ ├── util │ │ │ ├── FileSystems.java │ │ │ ├── MoreFutures.java │ │ │ └── DataGenUtils.java │ │ │ ├── constants │ │ │ └── TpcdsTable.java │ │ │ ├── registration │ │ │ └── TableRegistration.java │ │ │ ├── paths │ │ │ └── BenchmarkPaths.java │ │ │ ├── schemas │ │ │ └── Schemas.java │ │ │ ├── metrics │ │ │ ├── BenchmarkMetric.java │ │ │ └── BenchmarkMetrics.java │ │ │ ├── BenchmarkRunner.java │ │ │ └── correctness │ │ │ └── TpcdsQueryCorrectnessChecks.java │ └── test │ │ └── java │ │ └── com │ │ └── palantir │ │ └── spark │ │ └── benchmark │ │ ├── config │ │ └── ConfigDeserializationTest.java │ │ ├── TestIdentifiers.java │ │ ├── AbstractLocalSparkTest.java │ │ └── datagen │ │ ├── TpcdsDataGeneratorIntegrationTest.java │ │ ├── BenchmarkMetricsTest.java │ │ └── GenSortTest.java ├── build.gradle └── var │ └── conf │ └── config.yml ├── .circleci ├── template.sh └── config.yml ├── jars └── org │ └── apache │ └── spark │ └── spark-hadoop-cloud_2.12 │ └── 3.0.0 │ └── spark-hadoop-cloud_2.12-3.0.0.jar ├── .changelog.yml ├── .palantir ├── excavator.yml ├── policy.yml └── bulldozer.yml ├── .github ├── ISSUE_TEMPLATE.md └── PULL_REQUEST_TEMPLATE.md ├── .excavator.yml ├── .bulldozer.yml ├── .baseline ├── copyright │ └── 001_apache-2.0.txt ├── eclipse │ ├── dynamic │ │ └── dotfile.checkstyle │ └── static │ │ └── dotfile.settings │ │ └── org.eclipse.jdt.ui.prefs └── checkstyle │ └── checkstyle-suppressions.xml ├── versions.props ├── scripts ├── aws_mount_ephemeral_disk.sh └── azure_mount_and_encrypt_ephemeral_disk.sh ├── settings.gradle ├── gradle.properties ├── .gitignore ├── .policy.yml ├── gradlew.bat ├── readme.md ├── gradlew └── LICENSE /changelog/@unreleased/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gradle/wrapper/.gitignore: -------------------------------------------------------------------------------- 1 | !*.jar 2 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palantir/spark-tpcds-benchmark/HEAD/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /changelog/0.2.8/pr-224.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Fix metrics schema 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/224 6 | -------------------------------------------------------------------------------- /changelog/0.2.4/pr-193.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Rename clashing config fields 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/193 6 | -------------------------------------------------------------------------------- /changelog/0.2.4/pr-202.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Use azure container name from config 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/202 6 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/service/bin/gensort/gensort_osx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palantir/spark-tpcds-benchmark/HEAD/spark-tpcds-benchmark-runner/service/bin/gensort/gensort_osx -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/service/bin/tpcds/tpcds_osx.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palantir/spark-tpcds-benchmark/HEAD/spark-tpcds-benchmark-runner/service/bin/tpcds/tpcds_osx.tgz -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/resources/queries.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palantir/spark-tpcds-benchmark/HEAD/spark-tpcds-benchmark-runner/src/main/resources/queries.tar -------------------------------------------------------------------------------- /changelog/0.2.4/pr-200.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Check existence of gensort file correctly 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/200 6 | -------------------------------------------------------------------------------- /changelog/0.2.4/pr-201.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Register gensort table at benchmark time 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/201 6 | -------------------------------------------------------------------------------- /changelog/0.2.5/pr-210.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: upgrade to spark 3.0.0 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/210 6 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/service/bin/tpcds/tpcds_linux.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palantir/spark-tpcds-benchmark/HEAD/spark-tpcds-benchmark-runner/service/bin/tpcds/tpcds_linux.tgz -------------------------------------------------------------------------------- /.circleci/template.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export CIRCLECI_TEMPLATE=java-library-oss 3 | export DOCKER_TESTS=true 4 | export JAVA_11=true 5 | export ONLY_11=true 6 | export UNIT_TEST_14=false 7 | -------------------------------------------------------------------------------- /changelog/0.2.10/pr-229.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Use local buffer for metrics 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/229 6 | -------------------------------------------------------------------------------- /changelog/0.2.16/pr-247.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: pass in separate file system for metrics 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/247 6 | -------------------------------------------------------------------------------- /changelog/0.2.12/pr-236.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Generate new QueryIdentifier for each iteration 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/236 6 | -------------------------------------------------------------------------------- /changelog/0.2.3/pr-190.v2.yml: -------------------------------------------------------------------------------- 1 | type: feature 2 | feature: 3 | description: Generate gensort data in 1GB partitions 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/190 6 | -------------------------------------------------------------------------------- /changelog/0.2.4/pr-192.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Rename package and fix mainClass in distribution 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/192 6 | -------------------------------------------------------------------------------- /changelog/0.2.7/pr-222.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Include spark built with hadoop 3.x 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/222 6 | -------------------------------------------------------------------------------- /changelog/0.2.9/pr-225.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Do not bundle spark sql jar anymore 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/225 6 | -------------------------------------------------------------------------------- /changelog/0.2.2/pr-183.v2.yml: -------------------------------------------------------------------------------- 1 | type: feature 2 | feature: 3 | description: upload and transform gensort data to parquet 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/183 6 | -------------------------------------------------------------------------------- /changelog/0.2.5/pr-207.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Write experiment name to metrics file 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/207 6 | -------------------------------------------------------------------------------- /changelog/0.2.2/pr-185.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Use gensort data to perform sort benchmark 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/185 6 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/service/bin/gensort/gensort-linux-1.5.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palantir/spark-tpcds-benchmark/HEAD/spark-tpcds-benchmark-runner/service/bin/gensort/gensort-linux-1.5.tar.gz -------------------------------------------------------------------------------- /changelog/0.2.5/pr-213.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Rename some paths to be consistent with each other 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/213 6 | -------------------------------------------------------------------------------- /changelog/0.2.14/pr-243.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Make iteration optional to fix backcompat of QuerySessionIdentifier 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/243 6 | -------------------------------------------------------------------------------- /changelog/0.2.5/pr-203.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Save results of each experiment in a separate folder 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/203 6 | -------------------------------------------------------------------------------- /changelog/0.2.6/pr-221.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Add hadoop-cloud jars to get the right s3 committers 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/221 6 | -------------------------------------------------------------------------------- /changelog/0.2.5/pr-217.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Run gensort first so that we fail fast on committer issues 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/217 6 | -------------------------------------------------------------------------------- /changelog/0.2.2/pr-187.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Support passing in a scale for gensort and wire to benchmark runner 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/187 6 | -------------------------------------------------------------------------------- /changelog/0.2.4/pr-196.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Allow resuming parquet transformation stage in tpcds data generation 4 | links: 5 | - https://github.com/palantir/spark-tpcds-benchmark/pull/196 6 | -------------------------------------------------------------------------------- /changelog/0.2.5/pr-216.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: Bundle spark-hadoop-cloud jar to get access to new output committer 4 | classes 5 | links: 6 | - https://github.com/palantir/spark-tpcds-benchmark/pull/216 7 | -------------------------------------------------------------------------------- /jars/org/apache/spark/spark-hadoop-cloud_2.12/3.0.0/spark-hadoop-cloud_2.12-3.0.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/palantir/spark-tpcds-benchmark/HEAD/jars/org/apache/spark/spark-hadoop-cloud_2.12/3.0.0/spark-hadoop-cloud_2.12-3.0.0.jar -------------------------------------------------------------------------------- /changelog/0.2.4/pr-194.v2.yml: -------------------------------------------------------------------------------- 1 | type: fix 2 | fix: 3 | description: IgnoreUnknownProperties in FileSystemConfiguration to get json subtypes 4 | to work 5 | links: 6 | - https://github.com/palantir/spark-tpcds-benchmark/pull/194 7 | -------------------------------------------------------------------------------- /.changelog.yml: -------------------------------------------------------------------------------- 1 | # Excavator auto-updates this file. Please contribute improvements to the central template. 2 | 3 | # This file is intentionally empty. The file's existence enables changelog-app and is empty to use the default configuration. 4 | -------------------------------------------------------------------------------- /.palantir/excavator.yml: -------------------------------------------------------------------------------- 1 | auto-label: 2 | names: 3 | versions-props/upgrade-all: [ "merge when ready" ] 4 | excavator/manage-circleci: [ "merge when ready" ] 5 | tags: 6 | roomba: [ "merge when ready" ] 7 | swiffer: [ "merge when ready" ] 8 | -------------------------------------------------------------------------------- /changelog/0.2.2/pr-178.v2.yml: -------------------------------------------------------------------------------- 1 | type: feature 2 | feature: 3 | description: |- 4 | Add ability to generate data from the gensort program used in http://sortbenchmark.org/. 5 | links: 6 | - https://github.com/palantir/spark-tpcds-benchmark/pull/178 7 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-5.6.4-bin.zip 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | -------------------------------------------------------------------------------- /changelog/0.2.4/pr-198.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: |- 4 | Refactor config structure to support selective data generation, as well as general consolidation 5 | links: 6 | - https://github.com/palantir/spark-tpcds-benchmark/pull/198 7 | -------------------------------------------------------------------------------- /changelog/0.2.1/pr-68.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Add a custom benchmark that explicitly forces a high volume of data 4 | shuffling (and is not part of standard TPC-DS). 5 | links: 6 | - https://github.com/palantir/spark-tpcds-benchmark/pull/68 7 | -------------------------------------------------------------------------------- /changelog/0.2.14/pr-242.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: increase YARN max executor failures in default config, so benchmarks 4 | with many iterations are less likely to fail 5 | links: 6 | - https://github.com/palantir/spark-tpcds-benchmark/pull/242 7 | -------------------------------------------------------------------------------- /.palantir/policy.yml: -------------------------------------------------------------------------------- 1 | # Excavator auto-updates this file. Please contribute improvements to the central template: 2 | # https://github.palantir.build/foundry/policies/blob/master/foundry/default.policy.yml 3 | 4 | remote: foundry/policies 5 | path: foundry/default.policy.yml 6 | ref: master 7 | -------------------------------------------------------------------------------- /changelog/0.2.3/pr-189.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Adds support for Azure & AWS's respective blob stores. Relatedly, upgrade 4 | to Java 11 to avoid the many non-performant ciphers on Java 8. 5 | links: 6 | - https://github.com/palantir/spark-tpcds-benchmark/pull/189 7 | -------------------------------------------------------------------------------- /.palantir/bulldozer.yml: -------------------------------------------------------------------------------- 1 | version: 1 2 | merge: 3 | whitelist: 4 | labels: ["merge when ready"] 5 | blacklist: 6 | labels: ["do not merge"] 7 | method: squash 8 | options: 9 | squash: 10 | body: pull_request_body 11 | message_delimiter: ==COMMIT_MSG== 12 | delete_after_merge: true 13 | -------------------------------------------------------------------------------- /changelog/0.2.15/pr-244.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: The data filesystem can be explicitly different from the default filesystem 4 | (so that, e.g., results/metrics may be written to default fs instead of data fs). 5 | links: 6 | - https://github.com/palantir/spark-tpcds-benchmark/pull/244 7 | -------------------------------------------------------------------------------- /changelog/0.2.13/pr-241.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: Results metrics will always be written to a unique path, since they 4 | are hive partitioned by sessionId (a random UUID), as well as iteration & attempt 5 | numbers. 6 | links: 7 | - https://github.com/palantir/spark-tpcds-benchmark/pull/241 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## What happened? 2 | 3 | 7 | 8 | ## What did you want to happen? 9 | 10 | 13 | -------------------------------------------------------------------------------- /.excavator.yml: -------------------------------------------------------------------------------- 1 | # Excavator auto-updates this file. Please contribute improvements to the central template. 2 | 3 | auto-label: 4 | names: 5 | versions-props/upgrade-all: [ "merge when ready" ] 6 | circleci/manage-circleci: [ "merge when ready" ] 7 | tags: 8 | roomba: [ "merge when ready" ] 9 | automerge: [ "merge when ready" ] 10 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Before this PR 2 | 3 | 4 | ## After this PR 5 | 6 | ==COMMIT_MSG== 7 | ==COMMIT_MSG== 8 | 9 | ## Possible downsides? 10 | 11 | 12 | -------------------------------------------------------------------------------- /changelog/0.2.11/pr-231.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: 'Attempt to record as much useful information as possible from runs, 4 | and reduce transient errors. We accomplish this by: using a unique filesystem 5 | paths for all outputs, recording verification as a column rather than throwing 6 | on failure, and retrying failed queries.' 7 | links: 8 | - https://github.com/palantir/spark-tpcds-benchmark/pull/231 9 | -------------------------------------------------------------------------------- /.bulldozer.yml: -------------------------------------------------------------------------------- 1 | # Excavator auto-updates this file. Please contribute improvements to the central template. 2 | 3 | version: 1 4 | merge: 5 | trigger: 6 | labels: ["merge when ready"] 7 | ignore: 8 | labels: ["do not merge"] 9 | method: squash 10 | options: 11 | squash: 12 | body: pull_request_body 13 | message_delimiter: ==COMMIT_MSG== 14 | delete_after_merge: true 15 | update: 16 | trigger: 17 | labels: ["update me"] 18 | -------------------------------------------------------------------------------- /changelog/0.2.3/pr-191.v2.yml: -------------------------------------------------------------------------------- 1 | type: improvement 2 | improvement: 3 | description: |- 4 | Turn on AQE for better partitioning 5 | 6 | Downside is that this will also apply to the queries - I can refactor the code to take different spark configs for those if needed, but if we run the queries enough times, we should be able to account for any differences because of the non determinism? 7 | links: 8 | - https://github.com/palantir/spark-tpcds-benchmark/pull/191 9 | -------------------------------------------------------------------------------- /.baseline/copyright/001_apache-2.0.txt: -------------------------------------------------------------------------------- 1 | (c) Copyright ${today.year} Palantir Technologies Inc. All rights reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /gradle/publish-dist.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: 'com.jfrog.bintray' 2 | apply plugin: 'maven-publish' 3 | 4 | bintray { 5 | user = System.env.BINTRAY_USERNAME 6 | key = System.env.BINTRAY_PASSWORD 7 | publish = true 8 | pkg { 9 | repo = 'releases' 10 | name = 'spark-tpcds-benchmark' 11 | userOrg = 'palantir' 12 | licenses = ['Apache-2.0'] 13 | publications = ['dist'] 14 | } 15 | } 16 | 17 | publish.dependsOn bintrayUpload 18 | bintrayUpload.onlyIf { 19 | versionDetails().isCleanTag 20 | } 21 | 22 | publishing { 23 | publications { 24 | dist(MavenPublication) { 25 | artifact distTar 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /versions.props: -------------------------------------------------------------------------------- 1 | com.amazonaws:aws-java-sdk-* = 1.11.903 2 | com.fasterxml.jackson.*:* = 2.11.3 3 | com.github.rholder:guava-retrying = 2.0.0 4 | com.google.guava:guava = 30.0-jre 5 | com.microsoft.azure:azure-storage = 8.4.0 6 | com.palantir.common:streams = 1.9.1 7 | com.palantir.conjure.java.runtime:* = 6.8.0 8 | com.palantir.safe-logging:preconditions = 1.12.2 9 | io.dropwizard.metrics:* = 3.2.6 10 | io.netty:netty-all = 4.1.54.Final 11 | javax.servlet.jsp:jsp-api = 2.2 12 | org.assertj:* = 3.18.1 13 | org.apache.parquet:* = 1.12.0-palantir.7 14 | org.apache.hadoop:* = 3.3.0 15 | org.apache.spark:* = 3.0.0 16 | org.immutables:* = 2.8.8 17 | org.junit.jupiter:* = 5.7.0 18 | org.mapdb:* = 3.0.8 19 | org.mockito:* = 3.6.0 20 | -------------------------------------------------------------------------------- /scripts/aws_mount_ephemeral_disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | export NVME_DEVICE_MD_PATH=/dev/md/ephemeral_drive 5 | export NVME_DEVICE_MAPPER_PATH=/dev/mapper/nvme_ephemeral_drive 6 | export NVME_EPHEMERAL_MOUNT_PATH=/scratch 7 | export NVME_EPHEMERAL_DEVICES=$(ls /dev/disk/by-id/nvme-Amazon_EC2_NVMe_Instance_Storage_*) 8 | mkdir $NVME_EPHEMERAL_MOUNT_PATH 9 | 10 | chown root: $NVME_EPHEMERAL_MOUNT_PATH 11 | chmod 1777 $NVME_EPHEMERAL_MOUNT_PATH 12 | mdadm --create $NVME_DEVICE_MD_PATH --level=0 --run --raid-devices=2 $NVME_EPHEMERAL_DEVICES --verbose 13 | 14 | udevadm control --start-exec-queue 15 | udevadm settle 16 | mkfs.xfs $NVME_DEVICE_MD_PATH 17 | mount -o "defaults,nosuid,noatime,nodev" $NVME_DEVICE_MD_PATH $NVME_EPHEMERAL_MOUNT_PATH 18 | chmod 1777 $NVME_EPHEMERAL_MOUNT_PATH 19 | -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | rootProject.name = 'spark-tpcds-benchmark' 18 | 19 | include 'spark-tpcds-benchmark-runner' 20 | -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | # 2 | # (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | systemProp.org.gradle.internal.http.socketTimeout=600000 18 | systemProp.org.gradle.internal.http.connectionTimeout=600000 19 | org.gradle.parallel=true 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Eclipse 2 | *.class 3 | .project 4 | .gradle 5 | .classpath 6 | .checkstyle 7 | .settings 8 | .node 9 | /build/ 10 | */build 11 | */*/build/ 12 | examples/*/build/ 13 | extras/*/build/ 14 | bin 15 | .factorypath 16 | 17 | # SLSv2 18 | !*/service/bin 19 | **/var/log 20 | **/var/data 21 | 22 | **/var/security 23 | 24 | # Mobile Tools for Java (J2ME) 25 | .mtj.tmp/ 26 | 27 | # Package Files # 28 | *.jar 29 | !jars/**/*.jar 30 | *.war 31 | *.ear 32 | 33 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 34 | hs_err_pid* 35 | 36 | # Windows git artifact 37 | fileeditor 38 | 39 | # Mac specific files 40 | .DS_Store 41 | 42 | # IntelliJ 43 | *.iml 44 | *.ipr 45 | *.iws 46 | .idea/ 47 | out/ 48 | 49 | # Eclipse/IntelliJ APT 50 | generated_src/ 51 | generated_testSrc/ 52 | generated/ 53 | 54 | # Blueprint theme 55 | __init__.pyc 56 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/datagen/SortDataGenerator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.datagen; 18 | 19 | public interface SortDataGenerator { 20 | void generate(); 21 | } 22 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/queries/Query.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.queries; 18 | 19 | import java.util.Optional; 20 | import org.apache.spark.sql.types.StructType; 21 | 22 | public interface Query { 23 | String getName(); 24 | 25 | Optional getSqlStatement(); 26 | 27 | StructType getSchema(); 28 | 29 | void save(String resultLocation); 30 | } 31 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/test/java/com/palantir/spark/benchmark/config/ConfigDeserializationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import java.io.IOException; 20 | import java.nio.file.Paths; 21 | import org.junit.jupiter.api.Test; 22 | 23 | public final class ConfigDeserializationTest { 24 | @Test 25 | public void testDeserializesConfig() throws IOException { 26 | // should not throw. 27 | BenchmarkRunnerConfig.parse(Paths.get("var/conf/config.yml")); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/SourceDataGenerationConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 21 | import org.immutables.value.Value; 22 | 23 | @Value.Immutable 24 | @ImmutablesConfigStyle 25 | @JsonDeserialize(as = ImmutableSourceDataGenerationConfig.class) 26 | public interface SourceDataGenerationConfig { 27 | boolean enabled(); 28 | } 29 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/datagen/ParquetTransformer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.datagen; 18 | 19 | import java.util.Set; 20 | import org.apache.spark.sql.SparkSession; 21 | import org.apache.spark.sql.types.StructType; 22 | 23 | public interface ParquetTransformer { 24 | void transform( 25 | SparkSession sparkSession, 26 | StructType schema, 27 | Set sourcePaths, 28 | String destinationPath, 29 | String delimiter); 30 | } 31 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/SourceBenchmarksConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 21 | import org.immutables.value.Value; 22 | 23 | @Value.Immutable 24 | @ImmutablesConfigStyle 25 | @JsonDeserialize(as = ImmutableSourceBenchmarksConfiguration.class) 26 | public interface SourceBenchmarksConfiguration { 27 | boolean enabled(); 28 | } 29 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/test/java/com/palantir/spark/benchmark/TestIdentifiers.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark; 18 | 19 | import com.palantir.spark.benchmark.queries.QuerySessionIdentifier; 20 | import java.util.UUID; 21 | 22 | public final class TestIdentifiers { 23 | private TestIdentifiers() {} 24 | 25 | public static QuerySessionIdentifier create(String queryName, int scale) { 26 | return QuerySessionIdentifier.builder() 27 | .queryName(queryName) 28 | .scale(scale) 29 | .iteration(0) 30 | .session(UUID.randomUUID().toString()) 31 | .build(); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/immutables/ImmutablesStyle.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.immutables; 18 | 19 | import java.lang.annotation.ElementType; 20 | import java.lang.annotation.Retention; 21 | import java.lang.annotation.RetentionPolicy; 22 | import java.lang.annotation.Target; 23 | import org.immutables.value.Value; 24 | 25 | @Target({ElementType.PACKAGE, ElementType.TYPE}) 26 | @Retention(RetentionPolicy.CLASS) 27 | @Value.Style( 28 | visibility = Value.Style.ImplementationVisibility.PACKAGE, 29 | overshadowImplementation = true, 30 | jdkOnly = true, 31 | get = {"get*", "is*"}) 32 | public @interface ImmutablesStyle {} 33 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/util/FileSystems.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.util; 18 | 19 | import java.io.IOException; 20 | import java.net.URI; 21 | import org.apache.hadoop.conf.Configuration; 22 | import org.apache.hadoop.fs.FileSystem; 23 | 24 | public final class FileSystems { 25 | private FileSystems() {} 26 | 27 | public static FileSystem createFileSystem(URI baseUri, Configuration hadoopConfiguration) throws IOException { 28 | FileSystem fileSystem = FileSystem.get(baseUri, hadoopConfiguration); 29 | fileSystem.setWorkingDirectory(new org.apache.hadoop.fs.Path(baseUri.getPath())); 30 | return fileSystem; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/immutables/ImmutablesConfigStyle.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.immutables; 18 | 19 | import java.lang.annotation.ElementType; 20 | import java.lang.annotation.Retention; 21 | import java.lang.annotation.RetentionPolicy; 22 | import java.lang.annotation.Target; 23 | import org.immutables.value.Value; 24 | 25 | @Target({ElementType.PACKAGE, ElementType.TYPE}) 26 | @Retention(RetentionPolicy.CLASS) 27 | @Value.Style( 28 | visibility = Value.Style.ImplementationVisibility.PACKAGE, 29 | overshadowImplementation = true, 30 | jdkOnly = true, 31 | get = {"get*", "is*"}, 32 | forceJacksonPropertyNames = false) 33 | public @interface ImmutablesConfigStyle {} 34 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/util/MoreFutures.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.util; 18 | 19 | import com.palantir.logsafe.exceptions.SafeRuntimeException; 20 | import java.util.concurrent.ExecutionException; 21 | import java.util.concurrent.Future; 22 | 23 | public final class MoreFutures { 24 | private MoreFutures() { 25 | // utility class. 26 | } 27 | 28 | public static T join(Future future) { 29 | try { 30 | return future.get(); 31 | } catch (InterruptedException e) { 32 | Thread.currentThread().interrupt(); 33 | throw new SafeRuntimeException(e); 34 | } catch (ExecutionException e) { 35 | throw new SafeRuntimeException("Encountered an ExecutionException", e.getCause()); 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /.baseline/eclipse/dynamic/dotfile.checkstyle: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/DataGenerationConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import static com.palantir.logsafe.Preconditions.checkArgument; 20 | 21 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 22 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 23 | import org.immutables.value.Value; 24 | 25 | @Value.Immutable 26 | @ImmutablesConfigStyle 27 | @JsonDeserialize(as = ImmutableDataGenerationConfiguration.class) 28 | public interface DataGenerationConfiguration { 29 | int parallelism(); 30 | 31 | String tempWorkingDir(); 32 | 33 | SourceDataGenerationConfig tpcds(); 34 | 35 | SourceDataGenerationConfig gensort(); 36 | 37 | boolean overwriteData(); 38 | 39 | @Value.Check 40 | default void check() { 41 | checkArgument(parallelism() > 0, "Data generation parallelism must be positive."); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/BenchmarksConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import static com.palantir.logsafe.Preconditions.checkArgument; 20 | 21 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 22 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 23 | import java.util.Optional; 24 | import org.immutables.value.Value; 25 | 26 | @Value.Immutable 27 | @ImmutablesConfigStyle 28 | @JsonDeserialize(as = ImmutableBenchmarksConfiguration.class) 29 | public interface BenchmarksConfiguration { 30 | Optional experimentName(); 31 | 32 | int iterations(); 33 | 34 | @Value.Default 35 | default int attemptsPerQuery() { 36 | return 3; 37 | } 38 | 39 | SourceBenchmarksConfiguration tpcds(); 40 | 41 | SourceBenchmarksConfiguration gensort(); 42 | 43 | @Value.Check 44 | default void check() { 45 | checkArgument(iterations() >= 0, "Iterations must be non-negative."); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/SimpleFilesystemConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.google.common.collect.ImmutableMap; 21 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 22 | import java.util.Map; 23 | import org.immutables.value.Value; 24 | 25 | @Value.Immutable 26 | @ImmutablesConfigStyle 27 | @JsonDeserialize(as = ImmutableSimpleFilesystemConfiguration.class) 28 | public abstract class SimpleFilesystemConfiguration extends FilesystemConfiguration { 29 | @Override 30 | public final String type() { 31 | return FilesystemConfiguration.SIMPLE_TYPE; 32 | } 33 | 34 | @Override 35 | public final Map toHadoopConf() { 36 | return ImmutableMap.of(); 37 | } 38 | 39 | public static SimpleFilesystemConfiguration of(String baseUri) { 40 | return ImmutableSimpleFilesystemConfiguration.builder().baseUri(baseUri).build(); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /.baseline/checkstyle/checkstyle-suppressions.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/SparkConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 21 | import java.util.Map; 22 | import org.immutables.value.Value; 23 | 24 | @Value.Immutable 25 | @ImmutablesConfigStyle 26 | @JsonDeserialize(as = ImmutableSparkConfiguration.class) 27 | public interface SparkConfiguration { 28 | @Value.Default 29 | default String master() { 30 | return "yarn"; 31 | } 32 | 33 | @Value.Default 34 | default int executorInstances() { 35 | return 8; 36 | } 37 | 38 | @Value.Default 39 | default int executorCores() { 40 | return 1; 41 | } 42 | 43 | @Value.Default 44 | default String executorMemory() { 45 | return "1g"; 46 | } 47 | 48 | Map sparkConf(); 49 | 50 | static ImmutableSparkConfiguration.Builder builder() { 51 | return ImmutableSparkConfiguration.builder(); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/datagen/DefaultParquetTransformer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.datagen; 18 | 19 | import java.util.Optional; 20 | import java.util.Set; 21 | import org.apache.spark.sql.Dataset; 22 | import org.apache.spark.sql.Row; 23 | import org.apache.spark.sql.SparkSession; 24 | import org.apache.spark.sql.types.StructType; 25 | 26 | public final class DefaultParquetTransformer implements ParquetTransformer { 27 | @Override 28 | public void transform( 29 | SparkSession sparkSession, 30 | StructType schema, 31 | Set sourcePaths, 32 | String destinationPath, 33 | String delimiter) { 34 | Optional> unioned = sourcePaths.stream() 35 | .map(path -> sparkSession 36 | .read() 37 | .format("csv") 38 | .option("delimiter", delimiter) 39 | .schema(schema) 40 | .load(path)) 41 | .reduce(Dataset::union); 42 | if (!unioned.isPresent()) { 43 | return; 44 | } 45 | unioned.get().write().format("parquet").save(destinationPath); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/FilesystemConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import com.fasterxml.jackson.annotation.JsonIgnoreProperties; 20 | import com.fasterxml.jackson.annotation.JsonProperty; 21 | import com.fasterxml.jackson.annotation.JsonSubTypes; 22 | import com.fasterxml.jackson.annotation.JsonTypeInfo; 23 | import java.util.Map; 24 | 25 | @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type", visible = true) 26 | @JsonSubTypes({ 27 | @JsonSubTypes.Type(value = SimpleFilesystemConfiguration.class, name = FilesystemConfiguration.SIMPLE_TYPE), 28 | @JsonSubTypes.Type(value = S3Configuration.class, name = FilesystemConfiguration.AMAZON_S3_TYPE), 29 | @JsonSubTypes.Type(value = AzureBlobStoreConfiguration.class, name = FilesystemConfiguration.AZURE_BLOB_STORE) 30 | }) 31 | @JsonIgnoreProperties(ignoreUnknown = true) 32 | public abstract class FilesystemConfiguration { 33 | public static final String SIMPLE_TYPE = "simple"; 34 | public static final String AMAZON_S3_TYPE = "s3a"; 35 | public static final String AZURE_BLOB_STORE = "azure"; 36 | 37 | @JsonProperty("type") 38 | public abstract String type(); 39 | 40 | public abstract String baseUri(); 41 | 42 | public abstract Map toHadoopConf(); 43 | } 44 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/queries/SortBenchmarkQuery.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.queries; 18 | 19 | import java.util.Optional; 20 | import java.util.function.Supplier; 21 | import org.apache.spark.sql.Dataset; 22 | import org.apache.spark.sql.Row; 23 | import org.apache.spark.sql.SparkSession; 24 | import org.apache.spark.sql.types.StructType; 25 | 26 | public final class SortBenchmarkQuery implements Query { 27 | public static final String TABLE_NAME = "gensort_data"; 28 | 29 | private final SparkSession spark; 30 | private final Supplier> datasetSupplier; 31 | 32 | public SortBenchmarkQuery(SparkSession spark) { 33 | this.spark = spark; 34 | this.datasetSupplier = this::buildDataset; 35 | } 36 | 37 | @Override 38 | public String getName() { 39 | return "gen-sort-benchmark"; 40 | } 41 | 42 | @Override 43 | public Optional getSqlStatement() { 44 | return Optional.empty(); 45 | } 46 | 47 | @Override 48 | public StructType getSchema() { 49 | return null; 50 | } 51 | 52 | @Override 53 | public void save(String resultLocation) { 54 | this.datasetSupplier.get().write().format("parquet").save(resultLocation); 55 | } 56 | 57 | private Dataset buildDataset() { 58 | return spark.table(TABLE_NAME).sort("record"); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/queries/QuerySessionIdentifier.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.queries; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.fasterxml.jackson.databind.annotation.JsonSerialize; 21 | import com.palantir.spark.benchmark.immutables.ImmutablesStyle; 22 | import java.util.UUID; 23 | import org.immutables.value.Value; 24 | 25 | @Value.Immutable 26 | @ImmutablesStyle 27 | @JsonSerialize(as = ImmutableQuerySessionIdentifier.class) 28 | @JsonDeserialize(as = ImmutableQuerySessionIdentifier.class) 29 | public interface QuerySessionIdentifier { 30 | String NO_SESSION = "NO_SESSION"; 31 | String SESSION_ID = UUID.randomUUID().toString(); 32 | 33 | @Value.Parameter 34 | String queryName(); 35 | 36 | @Value.Parameter 37 | int scale(); 38 | 39 | @Value.Default 40 | default int iteration() { 41 | return -1; 42 | } 43 | 44 | @Value.Default 45 | default String session() { 46 | return SESSION_ID; 47 | } 48 | 49 | final class Builder extends ImmutableQuerySessionIdentifier.Builder {} 50 | 51 | static Builder builder() { 52 | return new Builder(); 53 | } 54 | 55 | static QuerySessionIdentifier createDefault(String queryName, int scale, int iteration) { 56 | return builder().queryName(queryName).scale(scale).iteration(iteration).build(); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/S3Configuration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.google.common.collect.ImmutableMap; 21 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 22 | import java.util.Map; 23 | import java.util.Optional; 24 | import org.apache.hadoop.fs.s3a.Constants; 25 | import org.immutables.value.Value; 26 | 27 | @Value.Immutable 28 | @ImmutablesConfigStyle 29 | @JsonDeserialize(as = ImmutableS3Configuration.class) 30 | public abstract class S3Configuration extends FilesystemConfiguration { 31 | @Override 32 | public final String type() { 33 | return FilesystemConfiguration.AMAZON_S3_TYPE; 34 | } 35 | 36 | public abstract Optional accessKey(); 37 | 38 | public abstract Optional secretKey(); 39 | 40 | @Override 41 | public final Map toHadoopConf() { 42 | ImmutableMap.Builder builder = 43 | ImmutableMap.builder().put(Constants.FAST_UPLOAD, "true"); 44 | accessKey().ifPresent(accessKey -> builder.put(Constants.ACCESS_KEY, accessKey)); 45 | secretKey().ifPresent(secretKey -> builder.put(Constants.SECRET_KEY, secretKey)); 46 | return builder.build(); 47 | } 48 | 49 | public static Builder builder() { 50 | return new Builder(); 51 | } 52 | 53 | public static class Builder extends ImmutableS3Configuration.Builder {} 54 | } 55 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/build.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | apply plugin: 'com.palantir.sls-java-service-distribution' 17 | apply from: "${rootDir}/gradle/publish-dist.gradle" 18 | 19 | dependencies { 20 | implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-yaml' 21 | implementation 'com.fasterxml.jackson.datatype:jackson-datatype-jdk8' 22 | implementation 'com.fasterxml.jackson.datatype:jackson-datatype-guava' 23 | implementation 'com.github.rholder:guava-retrying' 24 | implementation 'com.google.guava:guava' 25 | implementation 'com.palantir.common:streams' 26 | implementation 'com.palantir.safe-logging:preconditions' 27 | implementation 'com.palantir.conjure.java.runtime:client-config' 28 | implementation 'org.apache.spark:spark-sql_2.12' 29 | implementation 'org.apache.spark:spark-hadoop-cloud_2.12' 30 | implementation('org.mapdb:mapdb') { 31 | exclude group: 'net.jpountz.lz4', module: 'lz4' 32 | } 33 | 34 | implementation "org.apache.hadoop:hadoop-aws" 35 | implementation "org.apache.hadoop:hadoop-azure" 36 | runtimeOnly 'org.apache.spark:spark-yarn_2.12' 37 | 38 | testImplementation 'org.assertj:assertj-core' 39 | testImplementation 'org.junit.jupiter:junit-jupiter' 40 | testImplementation 'org.mockito:mockito-core' 41 | testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine' 42 | 43 | annotationProcessor 'org.immutables:value' 44 | compileOnly 'org.immutables:value::annotations' 45 | } 46 | 47 | distribution { 48 | serviceName rootProject.name 49 | mainClass 'com.palantir.spark.benchmark.BenchmarkRunner' 50 | manifestExtensions 'upgrade-strategy': 'shutdown' 51 | manifestExtensions 'public-proxy-endpoints': [] 52 | } 53 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/constants/TpcdsTable.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.constants; 18 | 19 | import java.util.stream.Stream; 20 | 21 | public enum TpcdsTable { 22 | CALL_CENTER("call_center"), 23 | CATALOG_PAGE("catalog_page"), 24 | CATALOG_SALES("catalog_sales"), 25 | CATALOG_RETURNS("catalog_returns"), 26 | CUSTOMER("customer"), 27 | CUSTOMER_ADDRESS("customer_address"), 28 | CUSTOMER_DEMOGRAPHICS("customer_demographics"), 29 | DATE_DIM("date_dim"), 30 | HOUSEHOLD_DEMOGRAPHICS("household_demographics"), 31 | INCOME_BAND("income_band"), 32 | INVENTORY("inventory"), 33 | ITEM("item"), 34 | PROMOTION("promotion"), 35 | REASON("reason"), 36 | SHIP_MODE("ship_mode"), 37 | STORE("store"), 38 | STORE_RETURNS("store_returns"), 39 | STORE_SALES("store_sales"), 40 | TIME_DIM("time_dim"), 41 | WAREHOUSE("warehouse"), 42 | WEB_PAGE("web_page"), 43 | WEB_RETURNS("web_returns"), 44 | WEB_SALES("web_sales"), 45 | WEB_SITE("web_site"); 46 | 47 | private final String tableName; 48 | 49 | TpcdsTable(String tableName) { 50 | this.tableName = tableName; 51 | } 52 | 53 | @Override 54 | public String toString() { 55 | return tableName; 56 | } 57 | 58 | public String tableName() { 59 | return tableName; 60 | } 61 | 62 | public static TpcdsTable of(String stringValue) { 63 | return Stream.of(TpcdsTable.values()) 64 | .filter(table -> table.tableName().equalsIgnoreCase(stringValue)) 65 | .findFirst() 66 | .orElseThrow(() -> new IllegalArgumentException(String.format("No table named %s", stringValue))); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/BenchmarkRunnerConfig.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import static com.palantir.logsafe.Preconditions.checkArgument; 20 | 21 | import com.fasterxml.jackson.databind.ObjectMapper; 22 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 23 | import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; 24 | import com.fasterxml.jackson.datatype.guava.GuavaModule; 25 | import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; 26 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 27 | import java.io.IOException; 28 | import java.nio.file.Path; 29 | import java.util.List; 30 | import org.immutables.value.Value; 31 | 32 | @Value.Immutable 33 | @ImmutablesConfigStyle 34 | @JsonDeserialize(as = ImmutableBenchmarkRunnerConfig.class) 35 | public interface BenchmarkRunnerConfig { 36 | HadoopConfiguration hadoop(); 37 | 38 | SparkConfiguration spark(); 39 | 40 | List dataScalesGb(); 41 | 42 | DataGenerationConfiguration dataGeneration(); 43 | 44 | BenchmarksConfiguration benchmarks(); 45 | 46 | @Value.Check 47 | default void check() { 48 | checkArgument(!dataScalesGb().isEmpty(), "Must specify at least one data scale to run benchmarks against."); 49 | dataScalesGb().forEach(scale -> checkArgument(scale > 0, "All data scales must be positive.")); 50 | } 51 | 52 | static BenchmarkRunnerConfig parse(Path configFile) throws IOException { 53 | ObjectMapper objectMapper = 54 | new ObjectMapper(new YAMLFactory()).registerModules(new Jdk8Module(), new GuavaModule()); 55 | return objectMapper.readValue(configFile.toFile(), BenchmarkRunnerConfig.class); 56 | } 57 | 58 | class Builder extends ImmutableBenchmarkRunnerConfig.Builder {} 59 | 60 | static Builder builder() { 61 | return new Builder(); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/AzureBlobStoreConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.google.common.collect.ImmutableMap; 21 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 22 | import java.util.Map; 23 | import java.util.Optional; 24 | import org.apache.commons.lang3.StringUtils; 25 | import org.immutables.value.Value; 26 | 27 | @Value.Immutable 28 | @ImmutablesConfigStyle 29 | @JsonDeserialize(as = ImmutableAzureBlobStoreConfiguration.class) 30 | public abstract class AzureBlobStoreConfiguration extends FilesystemConfiguration { 31 | @Override 32 | public final String type() { 33 | return FilesystemConfiguration.AZURE_BLOB_STORE; 34 | } 35 | 36 | public abstract String accountName(); 37 | 38 | public abstract String accessKey(); 39 | 40 | public abstract String containerName(); 41 | 42 | public abstract Optional workingDirectory(); 43 | 44 | private String realWorkingDirectory() { 45 | return StringUtils.strip(workingDirectory().orElse("/"), "/"); 46 | } 47 | 48 | public final String accessKeyPropertyName() { 49 | return "fs.azure.account.key." + accountName() + ".blob.core.windows.net"; 50 | } 51 | 52 | @Override 53 | public final String baseUri() { 54 | return String.format( 55 | "wasbs://%s@%s.blob.core.windows.net/%s", containerName(), accountName(), realWorkingDirectory()); 56 | } 57 | 58 | @Override 59 | public final Map toHadoopConf() { 60 | return ImmutableMap.builder() 61 | .put(accessKeyPropertyName(), accessKey()) 62 | .build(); 63 | } 64 | 65 | public static Builder builder() { 66 | return new Builder(); 67 | } 68 | 69 | public static class Builder extends ImmutableAzureBlobStoreConfiguration.Builder {} 70 | } 71 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/queries/SqlQuery.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.queries; 18 | 19 | import com.google.common.base.Suppliers; 20 | import java.util.Optional; 21 | import java.util.function.Supplier; 22 | import org.apache.spark.sql.Dataset; 23 | import org.apache.spark.sql.Row; 24 | import org.apache.spark.sql.SparkSession; 25 | import org.apache.spark.sql.types.StructType; 26 | 27 | public final class SqlQuery implements Query { 28 | private final String queryName; 29 | private final String sqlStatement; 30 | private final Supplier> datasetSupplier; 31 | 32 | public SqlQuery(SparkSession spark, String queryName, String sqlStatement) { 33 | this.queryName = queryName; 34 | this.sqlStatement = sqlStatement; 35 | this.datasetSupplier = Suppliers.memoize(() -> sanitizeColumnNames(spark.sql(this.sqlStatement))); 36 | } 37 | 38 | @Override 39 | public String getName() { 40 | return queryName; 41 | } 42 | 43 | @Override 44 | public Optional getSqlStatement() { 45 | return Optional.of(sqlStatement); 46 | } 47 | 48 | @Override 49 | public StructType getSchema() { 50 | return datasetSupplier.get().schema(); 51 | } 52 | 53 | @Override 54 | public void save(String resultLocation) { 55 | datasetSupplier.get().write().format("parquet").save(resultLocation); 56 | } 57 | 58 | private Dataset sanitizeColumnNames(Dataset sqlOutput) { 59 | Dataset sanitizedSqlOutput = sqlOutput; 60 | for (String columnName : sqlOutput.columns()) { 61 | String sanitized = sanitize(columnName); 62 | if (!sanitized.equals(columnName)) { 63 | sanitizedSqlOutput = sanitizedSqlOutput.withColumnRenamed(columnName, sanitized); 64 | } 65 | } 66 | return sanitizedSqlOutput; 67 | } 68 | 69 | private static String sanitize(String name) { 70 | return name.replaceAll("[ ,;{}()]", "X"); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/test/java/com/palantir/spark/benchmark/AbstractLocalSparkTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark; 18 | 19 | import com.palantir.spark.benchmark.config.HadoopConfiguration; 20 | import com.palantir.spark.benchmark.config.SimpleFilesystemConfiguration; 21 | import java.io.IOException; 22 | import java.nio.file.Files; 23 | import java.nio.file.Path; 24 | import java.nio.file.Paths; 25 | import java.util.UUID; 26 | import org.apache.spark.sql.SparkSession; 27 | import org.junit.jupiter.api.BeforeEach; 28 | 29 | public abstract class AbstractLocalSparkTest { 30 | protected SparkSession sparkSession; 31 | 32 | @BeforeEach 33 | public void beforeEach() { 34 | sparkSession = SparkSession.builder() 35 | .appName("tests") 36 | .master("local") 37 | .config("spark.ui.enabled", false) 38 | .config("spark.ui.showConsoleProgress", false) 39 | // Adding these here so that we can make sure that these jars are in the classpath. 40 | .config( 41 | "spark.sql.sources.commitProtocolClass", 42 | "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") 43 | .config( 44 | "spark.sql.parquet.output.committer.class", 45 | "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") 46 | .getOrCreate(); 47 | } 48 | 49 | protected final Path createTemporaryWorkingDir(String prefix) throws IOException { 50 | Path directory = Files.createDirectory(Paths.get("/tmp", prefix + "_" + UUID.randomUUID())); 51 | directory.toFile().deleteOnExit(); 52 | return directory; 53 | } 54 | 55 | protected final HadoopConfiguration getHadoopConfiguration(Path destinationDataDirectory) { 56 | String fullyQualifiedDestinationDir = 57 | "file://" + destinationDataDirectory.toFile().getAbsolutePath(); 58 | return HadoopConfiguration.builder() 59 | .defaultFilesystem("local") 60 | .putFilesystems("local", SimpleFilesystemConfiguration.of(fullyQualifiedDestinationDir)) 61 | .build(); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/registration/TableRegistration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.registration; 18 | 19 | import com.palantir.spark.benchmark.constants.TpcdsTable; 20 | import com.palantir.spark.benchmark.paths.BenchmarkPaths; 21 | import com.palantir.spark.benchmark.queries.SortBenchmarkQuery; 22 | import com.palantir.spark.benchmark.schemas.Schemas; 23 | import java.io.IOException; 24 | import java.util.stream.Stream; 25 | import org.apache.hadoop.fs.FileSystem; 26 | import org.apache.hadoop.fs.Path; 27 | import org.apache.spark.sql.SparkSession; 28 | import org.apache.spark.sql.types.StructType; 29 | 30 | public final class TableRegistration { 31 | private final BenchmarkPaths paths; 32 | private final FileSystem dataFileSystem; 33 | private final SparkSession spark; 34 | private final Schemas schemas; 35 | 36 | public TableRegistration(BenchmarkPaths paths, FileSystem dataFileSystem, SparkSession spark, Schemas schemas) { 37 | this.paths = paths; 38 | this.dataFileSystem = dataFileSystem; 39 | this.spark = spark; 40 | this.schemas = schemas; 41 | } 42 | 43 | public void registerTpcdsTables(int scale) { 44 | Stream.of(TpcdsTable.values()).forEach(table -> { 45 | registerTable(table.tableName(), schemas.getSchema(table), scale); 46 | }); 47 | } 48 | 49 | public void registerGensortTable(int scale) { 50 | registerTable(SortBenchmarkQuery.TABLE_NAME, schemas.getGensortSchema(), scale); 51 | } 52 | 53 | private void registerTable(String tableName, StructType schema, int scale) { 54 | String tableLocation = paths.tableParquetLocation(scale, tableName); 55 | Path tablePath = new Path(tableLocation); 56 | try { 57 | if (!dataFileSystem.isDirectory(tablePath)) { 58 | throw new IllegalArgumentException(String.format( 59 | "Table %s not found in Parquet format at %s; was the data generated accordingly?", 60 | tableName, tableLocation)); 61 | } 62 | } catch (IOException e) { 63 | throw new RuntimeException(e); 64 | } 65 | spark.read().format("parquet").schema(schema).load(tableLocation).createOrReplaceTempView(tableName); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/test/java/com/palantir/spark/benchmark/datagen/TpcdsDataGeneratorIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.datagen; 18 | 19 | import static org.assertj.core.api.Assertions.assertThat; 20 | import static org.mockito.Mockito.mock; 21 | 22 | import com.google.common.collect.ImmutableList; 23 | import com.google.common.util.concurrent.MoreExecutors; 24 | import com.palantir.spark.benchmark.AbstractLocalSparkTest; 25 | import com.palantir.spark.benchmark.config.HadoopConfiguration; 26 | import com.palantir.spark.benchmark.paths.BenchmarkPaths; 27 | import com.palantir.spark.benchmark.schemas.Schemas; 28 | import com.palantir.spark.benchmark.util.FileSystems; 29 | import java.nio.file.Files; 30 | import java.nio.file.Path; 31 | import java.nio.file.Paths; 32 | import java.util.stream.Stream; 33 | import org.apache.hadoop.fs.FileSystem; 34 | import org.junit.jupiter.api.Test; 35 | 36 | public final class TpcdsDataGeneratorIntegrationTest extends AbstractLocalSparkTest { 37 | @Test 38 | public void testGeneratesAndUploadsData() throws Exception { 39 | Path workingDir = createTemporaryWorkingDir("working_dir"); 40 | Path destinationDataDirectory = createTemporaryWorkingDir("data"); 41 | HadoopConfiguration hadoopConfiguration = getHadoopConfiguration(destinationDataDirectory); 42 | FileSystem dataFileSystem = FileSystems.createFileSystem( 43 | hadoopConfiguration.defaultFsBaseUri(), hadoopConfiguration.toHadoopConf()); 44 | 45 | BenchmarkPaths paths = new BenchmarkPaths("foo"); 46 | int scale = 1; 47 | TpcdsDataGenerator generator = new TpcdsDataGenerator( 48 | workingDir, 49 | ImmutableList.of(scale), 50 | false, 51 | dataFileSystem, 52 | mock(ParquetTransformer.class), 53 | sparkSession, 54 | paths, 55 | new Schemas(), 56 | MoreExecutors.newDirectExecutorService()); 57 | generator.generateData(); 58 | try (Stream generatedCsvFiles = Files.list( 59 | Paths.get(hadoopConfiguration.defaultFsBaseUri().getPath(), paths.csvDir(scale))) 60 | .filter(path -> path.toString().endsWith(".csv"))) { 61 | assertThat(generatedCsvFiles.count()).isEqualTo(25); 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /.policy.yml: -------------------------------------------------------------------------------- 1 | # Excavator auto-updates this file. Please contribute improvements to the central template. 2 | 3 | policy: 4 | approval: 5 | - or: 6 | - one admin has approved (PR contributors not allowed) 7 | - two admins have approved 8 | - changelog only and contributor approval 9 | - fixing excavator 10 | - excavator only touched baseline, circle, gradle files, godel files, docker-compose-rule config or versions.props 11 | - excavator only touched config files 12 | - bots updated package.json and lock files 13 | disapproval: 14 | requires: 15 | organizations: [ "palantir" ] 16 | 17 | approval_rules: 18 | - name: one admin has approved (PR contributors not allowed) 19 | options: 20 | allow_contributor: false 21 | requires: 22 | count: 1 23 | admins: true 24 | 25 | - name: two admins have approved 26 | options: 27 | allow_contributor: true 28 | requires: 29 | count: 2 30 | admins: true 31 | 32 | - name: changelog only and contributor approval 33 | options: 34 | allow_contributor: true 35 | requires: 36 | count: 1 37 | admins: true 38 | if: 39 | only_changed_files: 40 | paths: 41 | - "changelog/@unreleased/.*\\.yml" 42 | 43 | - name: fixing excavator 44 | options: 45 | allow_contributor: true 46 | requires: 47 | count: 1 48 | admins: true 49 | if: 50 | has_author_in: 51 | users: [ "svc-excavator-bot" ] 52 | 53 | - name: excavator only touched baseline, circle, gradle files, godel files, docker-compose-rule config or versions.props 54 | requires: 55 | count: 0 56 | if: 57 | has_author_in: 58 | users: [ "svc-excavator-bot" ] 59 | only_changed_files: 60 | # product-dependencies.lock should never go here, to force review of all product (SLS) dependency changes 61 | # this way excavator cannot change the deployability of a service or product via auto-merge 62 | paths: 63 | - "changelog/@unreleased/.*\\.yml" 64 | - "^\\.baseline/.*$" 65 | - "^\\.circleci/.*$" 66 | - "^\\.docker-compose-rule\\.yml$" 67 | - "^.*gradle$" 68 | - "^gradle/wrapper/.*" 69 | - "^gradlew$" 70 | - "^gradlew.bat$" 71 | - "^gradle.properties$" 72 | - "^settings.gradle$" 73 | - "^godelw$" 74 | - "^godel/config/godel.properties$" 75 | - "^versions.props$" 76 | - "^versions.lock$" 77 | 78 | - name: excavator only touched config files 79 | requires: 80 | count: 0 81 | if: 82 | has_author_in: 83 | users: [ "svc-excavator-bot" ] 84 | only_changed_files: 85 | paths: 86 | - "^\\..*.yml$" 87 | - "^\\.github/.*$" 88 | 89 | - name: bots updated package.json and lock files 90 | requires: 91 | count: 0 92 | if: 93 | has_author_in: 94 | users: 95 | - "svc-excavator-bot" 96 | - "dependabot[bot]" 97 | only_changed_files: 98 | paths: 99 | - "^.*yarn.lock$" 100 | - "^.*package.json$" 101 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | 17 | @if "%DEBUG%" == "" @echo off 18 | @rem ########################################################################## 19 | @rem 20 | @rem Gradle startup script for Windows 21 | @rem 22 | @rem ########################################################################## 23 | 24 | @rem Set local scope for the variables with windows NT shell 25 | if "%OS%"=="Windows_NT" setlocal 26 | 27 | set DIRNAME=%~dp0 28 | if "%DIRNAME%" == "" set DIRNAME=. 29 | set APP_BASE_NAME=%~n0 30 | set APP_HOME=%DIRNAME% 31 | 32 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 33 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 34 | 35 | @rem Find java.exe 36 | if defined JAVA_HOME goto findJavaFromJavaHome 37 | 38 | set JAVA_EXE=java.exe 39 | %JAVA_EXE% -version >NUL 2>&1 40 | if "%ERRORLEVEL%" == "0" goto init 41 | 42 | echo. 43 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 44 | echo. 45 | echo Please set the JAVA_HOME variable in your environment to match the 46 | echo location of your Java installation. 47 | 48 | goto fail 49 | 50 | :findJavaFromJavaHome 51 | set JAVA_HOME=%JAVA_HOME:"=% 52 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 53 | 54 | if exist "%JAVA_EXE%" goto init 55 | 56 | echo. 57 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 58 | echo. 59 | echo Please set the JAVA_HOME variable in your environment to match the 60 | echo location of your Java installation. 61 | 62 | goto fail 63 | 64 | :init 65 | @rem Get command-line arguments, handling Windows variants 66 | 67 | if not "%OS%" == "Windows_NT" goto win9xME_args 68 | 69 | :win9xME_args 70 | @rem Slurp the command line arguments. 71 | set CMD_LINE_ARGS= 72 | set _SKIP=2 73 | 74 | :win9xME_args_slurp 75 | if "x%~1" == "x" goto execute 76 | 77 | set CMD_LINE_ARGS=%* 78 | 79 | :execute 80 | @rem Setup the command line 81 | 82 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 83 | 84 | @rem Execute Gradle 85 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% 86 | 87 | :end 88 | @rem End local scope for the variables with windows NT shell 89 | if "%ERRORLEVEL%"=="0" goto mainEnd 90 | 91 | :fail 92 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 93 | rem the _cmd.exe /c_ return code! 94 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 95 | exit /b 1 96 | 97 | :mainEnd 98 | if "%OS%"=="Windows_NT" endlocal 99 | 100 | :omega 101 | -------------------------------------------------------------------------------- /scripts/azure_mount_and_encrypt_ephemeral_disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | function die() { 6 | local MSG="${1}" 7 | echo "${MSG}" 8 | exit 1 9 | } 10 | 11 | function create_mount_path() { 12 | local MOUNT_PATH="${1}" 13 | mkdir $MOUNT_PATH 14 | chown root: $MOUNT_PATH 15 | chmod 1777 $MOUNT_PATH 16 | } 17 | 18 | function create_filesystem_and_mount() { 19 | local DEVICE="${1}" 20 | local MOUNT_PATH="${2}" 21 | [[ ! -d $MOUNT_PATH ]] && create_mount_path $MOUNT_PATH 22 | mkfs.xfs "${DEVICE}" 23 | mount -o "defaults,nosuid,noatime,nodev" "${DEVICE}" ${MOUNT_PATH} 24 | chmod 1777 "${MOUNT_PATH}" 25 | } 26 | 27 | function unmount_drive_if_mounted() { 28 | local DEVICE="${1}" 29 | set +e 30 | mount | grep "${DEVICE}" &>/dev/null 31 | [[ $? -ne 0 ]] && return 32 | set -e 33 | umount "${DEVICE}" 34 | } 35 | 36 | function luks_encrypt_drive() { 37 | local DEVICE="${1}" 38 | local KEY="${2}" 39 | local DEVICE_MAPPER_PATH="${3}" 40 | local DEVICE_MAPPER_BASENAME=$(basename $DEVICE_MAPPER_PATH) 41 | [[ -h ${DEVICE_MAPPER_PATH} ]] && cryptsetup -q luksClose "${DEVICE_MAPPER_PATH}" 42 | echo -n $KEY | cryptsetup -q luksFormat "${DEVICE}" - 43 | echo -n $KEY | cryptsetup -q luksOpen "${DEVICE}" "${DEVICE_MAPPER_BASENAME}" - 44 | } 45 | 46 | function ensure_drive_in_fstab() { 47 | local DEVICE_MAPPER_PATH="${1}" 48 | local EPHEMERAL_MOUNT_PATH="${2}" 49 | } 50 | 51 | function stop_raid() { 52 | local DEVICE="${1}" 53 | [[ -e "${DEVICE}" ]] && mdadm --stop "${DEVICE}" 54 | [[ -e "${DEVICE}_0" ]] && mdadm --stop "${DEVICE}_0" 55 | } 56 | 57 | function create_raid() { 58 | local MD_DEVICE="$1" 59 | shift 1 60 | local DISK_COUNT="$#" 61 | local DISKS="$@" 62 | udevadm settle 63 | udevadm control --stop-exec-queue 64 | mdadm --create $MD_DEVICE --level=0 --run --raid-devices=$DISK_COUNT $DISKS --verbose 65 | udevadm control --start-exec-queue 66 | udevadm settle 67 | } 68 | 69 | EPHEMERAL_MOUNT_PATH=/scratch 70 | LUKS_KEY=$(dd status=none bs=1 count=1024 if=/dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -1) 71 | DEVICE_MAPPER_PATH=/dev/mapper/ephemeral_drive 72 | EPHEMERAL_DEVICE=$(readlink -f /dev/disk/azure/resource-part1) 73 | 74 | [[ -z ${EPHEMERAL_DEVICE} ]] && die "Unable to locate ephemeral device!" 75 | 76 | unmount_drive_if_mounted $EPHEMERAL_DEVICE 77 | luks_encrypt_drive $EPHEMERAL_DEVICE $LUKS_KEY $DEVICE_MAPPER_PATH 78 | 79 | create_filesystem_and_mount $DEVICE_MAPPER_PATH $EPHEMERAL_MOUNT_PATH 80 | ensure_drive_in_fstab $DEVICE_MAPPER_PATH $EPHEMERAL_MOUNT_PATH 81 | 82 | NVME_EPHEMERAL_DEVICES=$(ls /dev/disk/by-id/nvme-Microsoft_NVMe_Direct_Disk_*) 83 | 84 | if [[ -z ${NVME_EPHEMERAL_DEVICES} ]]; then 85 | echo "No NVMe devices found." 86 | else 87 | NVME_DEVICE_MD_PATH=/dev/md/ephemeral_drive 88 | NVME_DEVICE_MAPPER_PATH=/dev/mapper/nvme_ephemeral_drive 89 | NVME_EPHEMERAL_MOUNT_PATH=/nvme_scratch 90 | fi 91 | 92 | unmount_drive_if_mounted $NVME_DEVICE_MAPPER_PATH 93 | stop_raid $NVME_DEVICE_MD_PATH 94 | create_raid $NVME_DEVICE_MD_PATH $NVME_EPHEMERAL_DEVICES 95 | 96 | RESOLVED_DEVICE_MD_PATH="$(readlink -f $NVME_DEVICE_MD_PATH)" 97 | luks_encrypt_drive $RESOLVED_DEVICE_MD_PATH $LUKS_KEY $NVME_DEVICE_MAPPER_PATH 98 | 99 | create_filesystem_and_mount $NVME_DEVICE_MAPPER_PATH $NVME_EPHEMERAL_MOUNT_PATH 100 | ensure_drive_in_fstab $NVME_DEVICE_MAPPER_PATH $NVME_EPHEMERAL_MOUNT_PATH 101 | 102 | echo "Done." 103 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/paths/BenchmarkPaths.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.paths; 18 | 19 | import com.palantir.spark.benchmark.constants.TpcdsTable; 20 | import com.palantir.spark.benchmark.queries.QuerySessionIdentifier; 21 | import java.io.File; 22 | import org.apache.commons.lang3.StringUtils; 23 | 24 | public final class BenchmarkPaths { 25 | private final String experimentName; 26 | 27 | public BenchmarkPaths(String experimentName) { 28 | this.experimentName = experimentName; 29 | } 30 | 31 | public String rootDataDir(int scale) { 32 | return String.join(File.separator, "benchmark_data", String.format("scale=%d", scale)); 33 | } 34 | 35 | public String tableCsvFile(int scale, TpcdsTable table) { 36 | return tableCsvFile(scale, table.tableName()); 37 | } 38 | 39 | public String tableCsvFile(int scale, String tableName) { 40 | return String.join(File.separator, csvDir(scale), String.format("%s.csv", tableName)); 41 | } 42 | 43 | public String tableCsvFile(int scale, String tableName, long partitionIndex) { 44 | return String.join(File.separator, csvDir(scale), String.format("%s_%s.csv", tableName, partitionIndex)); 45 | } 46 | 47 | public String tableParquetLocation(int scale, TpcdsTable table) { 48 | return tableParquetLocation(scale, table.tableName()); 49 | } 50 | 51 | public String tableParquetLocation(int scale, String tableName) { 52 | return String.join(File.separator, parquetDir(scale), tableName); 53 | } 54 | 55 | public String csvDir(int scale) { 56 | return String.join(File.separator, rootDataDir(scale), "raw_csv"); 57 | } 58 | 59 | public String parquetDir(int scale) { 60 | return String.join(File.separator, rootDataDir(scale), "raw_parquet"); 61 | } 62 | 63 | public String experimentResultLocation(QuerySessionIdentifier identifier, int attempt) { 64 | return String.join( 65 | File.separator, 66 | "experiments_query_results", 67 | experimentName, 68 | String.format("scale=%d", identifier.scale()), 69 | StringUtils.removeEnd(identifier.queryName(), ".sql"), 70 | String.format("session=%s", identifier.session()), 71 | String.format("attempt=%d", attempt)); 72 | } 73 | 74 | public String experimentCorrectnessHashesRoot(int scale) { 75 | return String.join( 76 | File.separator, "experiments_correctness_hashes", experimentName, String.format("scale=%d", scale)); 77 | } 78 | 79 | public String experimentCorrectnessHashesLocation(int scale, String queryName) { 80 | return String.join( 81 | File.separator, experimentCorrectnessHashesRoot(scale), StringUtils.removeEnd(queryName, ".sql")); 82 | } 83 | 84 | public String metricsDir() { 85 | return String.join(File.separator, "benchmark_results", experimentName); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/test/java/com/palantir/spark/benchmark/datagen/BenchmarkMetricsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.datagen; 18 | 19 | import static org.assertj.core.api.Assertions.assertThat; 20 | 21 | import com.palantir.spark.benchmark.AbstractLocalSparkTest; 22 | import com.palantir.spark.benchmark.TestIdentifiers; 23 | import com.palantir.spark.benchmark.config.SparkConfiguration; 24 | import com.palantir.spark.benchmark.metrics.BenchmarkMetric; 25 | import com.palantir.spark.benchmark.metrics.BenchmarkMetrics; 26 | import com.palantir.spark.benchmark.paths.BenchmarkPaths; 27 | import com.palantir.spark.benchmark.queries.QuerySessionIdentifier; 28 | import java.nio.file.Path; 29 | import java.nio.file.Paths; 30 | import java.util.List; 31 | import java.util.UUID; 32 | import org.apache.spark.sql.Row; 33 | import org.assertj.core.util.Files; 34 | import org.junit.jupiter.api.Test; 35 | 36 | public final class BenchmarkMetricsTest extends AbstractLocalSparkTest { 37 | @Test 38 | public void testMetrics() throws Exception { 39 | String experimentName = "test-experiment-" + UUID.randomUUID().toString(); 40 | BenchmarkPaths paths = new BenchmarkPaths(experimentName); 41 | Path metricsDir = createTemporaryWorkingDir("metrics_dir"); 42 | String metricsBaseUri = "file://" + metricsDir.toAbsolutePath(); 43 | BenchmarkMetrics metrics = new BenchmarkMetrics( 44 | SparkConfiguration.builder().build(), metricsBaseUri, experimentName, paths, sparkSession); 45 | QuerySessionIdentifier identifier1 = TestIdentifiers.create("q1", 10); 46 | metrics.startBenchmark(identifier1, 0); 47 | metrics.stopBenchmark(identifier1, 0); 48 | metrics.markVerificationFailed(identifier1); 49 | 50 | QuerySessionIdentifier identifier2 = TestIdentifiers.create("q2", 10); 51 | metrics.startBenchmark(identifier2, 0); 52 | metrics.stopBenchmark(identifier2, 0); 53 | 54 | // drop sparkConf for legibility on test failures 55 | List metricsRows = metrics.getMetricsDataset().drop("sparkConf").collectAsList(); 56 | assertThat(metricsRows).hasSize(2); 57 | assertThat(metrics.getMetricsDataset().selectExpr("failedVerification").collectAsList().stream() 58 | .map(row -> row.getBoolean(0))) 59 | .containsExactlyInAnyOrder(true, false); 60 | assertThat(metrics.getMetricsDataset().selectExpr("sessionId").collectAsList().stream() 61 | .map(row -> row.getString(0)) 62 | .map(UUID::fromString) 63 | .distinct()) 64 | .hasSize(2); 65 | 66 | metrics.flushMetrics(); 67 | assertThat(metrics.getMetricsDataset().collectAsList()).isEmpty(); 68 | assertThat(sparkSession 69 | .read() 70 | .schema(BenchmarkMetric.schema()) 71 | .json(Paths.get(metricsBaseUri, paths.metricsDir()).toString()) 72 | .drop("sparkConf") 73 | .collectAsList()) 74 | .containsExactlyInAnyOrderElementsOf(metricsRows); 75 | 76 | // clean up 77 | Files.delete(Paths.get(metricsBaseUri, paths.metricsDir()).toFile()); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/var/conf/config.yml: -------------------------------------------------------------------------------- 1 | dataGeneration: 2 | gensort: 3 | # Whether to generate data for the sort benchmark. 4 | enabled: true 5 | tpcds: 6 | # Whether to generate data for the TPC-DS benchmark. 7 | enabled: true 8 | tempWorkingDir: /tmp/spark-benchmark/datagen-working-dir 9 | 10 | # Whether to overwrite any previously generated data. If set to false, the program will fail with an exception. 11 | overwriteData: false 12 | 13 | # The number of threads used for data generation. 14 | parallelism: 5 15 | 16 | benchmarks: 17 | # Optional. Defaults to current timestamp. 18 | experimentName: experiment-1 19 | 20 | gensort: 21 | # Whether to run the sort benchmark. 22 | enabled: false 23 | tpcds: 24 | # Whether to run the TPC-DS benchmark suite. 25 | enabled: false 26 | 27 | # The number of iterations to run each benchmark. 28 | iterations: 10 29 | 30 | # A list of sizes in Gigabytes to generate the data and run the benchmarks. 31 | dataScalesGb: 32 | - 10 33 | - 1024 34 | 35 | hadoop: 36 | # A map of a filesystem name to its settings. The names are used to reference the filesystem in the settings below. 37 | # Supported types are "azure", "s3a" and "simple". Use "simple" for HDFS and local file systems. 38 | filesystems: 39 | azure: 40 | accessKey: __PLACEHOLDER__ 41 | accountName: __PLACEHOLDER__ 42 | containerName: spark-benchmark 43 | type: azure 44 | local: 45 | baseUri: 'file:///home/palantir/spark-benchmark' 46 | type: simple 47 | s3a: 48 | baseUri: 's3a://spark-benchmark/' 49 | accessKey: __PLACEHOLDER__ # Optional 50 | secretKey: __PLACEHOLDER__ # Optional 51 | type: s3a 52 | 53 | # The filesystem to be used to generate data, read the inputs from and write the query results to. 54 | defaultFilesystem: s3a 55 | 56 | # The filesystem to be used to write the query duration related metrics to. We recommend using a "simple" filesystem for this. 57 | # This is optional and will default to "defaultFilesystem" if empty. 58 | metricsFileSystem: local 59 | 60 | hadoopConf: 61 | hadoop.tmp.dir: /scratch/hadoop-tmp 62 | 63 | # S3 specific settings to commit data consistently. 64 | fs.s3a.committer.name: directory 65 | fs.s3a.committer.staging.conflict-mode: append 66 | 67 | hadoopConfDirs: 68 | # Directory containing core-site.xml, hdfs-site.xml and yarn-site.xml if necessary. 69 | - /path/to/hadoop/conf 70 | spark: 71 | # Run local spark by default. 72 | master: local[4] 73 | 74 | # If running a distributed benchmark: 75 | # executorCores: 2 76 | # executorInstances: 48 77 | # executorMemory: 8G 78 | 79 | sparkConf: 80 | # # If running a distributed benchmark on YARN: 81 | # master: yarn 82 | # spark.yarn.executor.memoryOverhead: 2G 83 | # spark.executorEnv.JAVA_HOME: /path/to/java/home 84 | # spark.yarn.jars: service/lib/* 85 | # spark.yarn.stagingDir: 'hdfs://my-hdfs/user/palantir' 86 | 87 | spark.hadoop.hadoop.tmp.dir: /scratch/hadoop-tmp 88 | 89 | # Use a more reliable committer settings. 90 | spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version: 2 91 | spark.sql.parquet.output.committer.class: org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter 92 | spark.sql.sources.commitProtocolClass: org.apache.spark.internal.io.cloud.PathOutputCommitProtocol 93 | 94 | # S3 specific settings to commit data consistently. 95 | spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a: org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory 96 | 97 | # Use adaptive query execution for partitioning generated data better, as well as performing joins more efficiently. 98 | spark.sql.adaptive.enabled: true 99 | 100 | # Set a very high number, so that the benchmark can continue even if certain queries fail. 101 | spark.yarn.max.executor.failures: 1000 102 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | 2 | 3 |

4 | Autorelease 5 |

6 | 7 | Spark Benchmark Runner 8 | ====================== 9 | 10 | This repo contains tools to run 2 industry standard benchmark suites: 11 | 12 | 1. [TPC-DS](http://www.tpc.org/tpcds/) is an open benchmark suite for structured data systems. This utility aims to make it easy to generate TPC-DS data, and to run TPC-DS benchmarks against different versions of Spark. The main use case for this utility is to test performance at scale when evaluating changes to Spark or to its underlying infrastructure. The benchmarks run SQL queries against structured datasets. This utility is thus not useful for running tests in streaming workflows. 13 | 14 | 2. [Sort Benchmark](http://sortbenchmark.org/): This is a single benchmark that sorts a large amount of data generated by the [gensort](http://www.ordinal.com/gensort.html) program. 15 | 16 | The benchmark suite can be run on MacOS or CentOS 6+. It does not currently support running on Windows. 17 | 18 | # Usage 19 | 20 | The benchmark suite requires a storage layer, distributed (such as HDFS/S3/Azure Blob Storage) or local to store the generated test data, as well as the computation results. This tool supports running the benchmarks either in local spark mode on a single JVM, or with a cluster manager, such as YARN when running distributed benchmarks on several machines. 21 | 22 | - Download the latest version of the distribution from https://bintray.com/palantir/releases/spark-tpcds-benchmark. 23 | - Upload and unpack the distribution to a node in the cluster. 24 | - In the distribution, edit `var/conf/config.yml` to match the benchmarking environment you will run with. **Documentation for the various configurable options are described in the [config.yml](https://github.com/palantir/spark-tpcds-benchmark/blob/develop/spark-tpcds-benchmark-runner/var/conf/config.yml) file.** 25 | - Storage Layer: 26 | - This tool supports any Hadoop compatible storage layer (eg S3/ABS/HDFS). Once that is setup, the credentials and account details can be updated in the `hadoop` configuration section. Placeholder configuration blocks are provided for S3, ABS and HDFS. 27 | - Compute Layer: 28 | - When running with local spark, the `spark` configuration section in config.yml should work out of the box. 29 | - When running on a cluster manager, the cluster first needs to be installed and configured. If you use YARN, [this](https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html#:~:text=core%2Ddefault.xml-,Apache%20Hadoop%20YARN,or%20a%20DAG%20of%20jobs.) and [this](http://spark.apache.org/docs/latest/running-on-yarn.html) are good places to start. Once that is done, the `spark` and `hadoop` configuration sections need to be changed to point to the cluster manager. 30 | - Ephemeral Disks 31 | - We recommend setting `hadoop.tmp.dir` to a fast SSD drive for each machine. It is set to a subfolder in `/scratch` by default. 32 | - On AWS, we typically use m5d/r5d instance types, which already come with NVMe SSD ephemeral disks, but are not mounted anywhere. We use this [script](https://github.com/palantir/spark-tpcds-benchmark/blob/develop/scripts/aws_mount_ephemeral_disk.sh) to mount it to `scratch`. These already come with hardware level encryption, so no LUKS encryption is necessary. 33 | - On Azure, we typically use hc44rs or d48ds_v4 instance types. These come with SSD ephemeral disks and aren't mounted either. They also do not have hardware level encryption as of the time of writing (July 2020). We use this [script](https://github.com/palantir/spark-tpcds-benchmark/blob/develop/scripts/azure_mount_and_encrypt_ephemeral_disk.sh) to mount and LUKS encrypt them. 34 | - Set the JAVA_HOME environment variable to point to Java 11. 35 | - Run `service/bin/init.sh start`. The benchmarks will begin running in the background. The driver exits upon 36 | completing the benchmark suite. 37 | 38 | The performance results of running the benchmarks can be found in JSON files located under `benchmark_results/` in the specified metrics filesystem. 39 | You may use a Spark shell to load these JSON files into DataFrames for additional analysis, or download these JSON files 40 | for processing by other tools. Results are grouped by data scale defined by the configuration's `dataScalesGb`, described below. 41 | 42 | For TPC-DS, the correctness of the computation is checked against the results of previous executions of the benchmark against the 43 | same set of data. If the source data is regenerated and the previous source data is overwritten, the computation results 44 | from previous runs are also invalidated. 45 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/test/java/com/palantir/spark/benchmark/datagen/GenSortTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.datagen; 18 | 19 | import static org.assertj.core.api.Assertions.assertThat; 20 | 21 | import com.google.common.collect.ImmutableList; 22 | import com.google.common.util.concurrent.MoreExecutors; 23 | import com.palantir.spark.benchmark.AbstractLocalSparkTest; 24 | import com.palantir.spark.benchmark.TestIdentifiers; 25 | import com.palantir.spark.benchmark.config.HadoopConfiguration; 26 | import com.palantir.spark.benchmark.datagen.GenSortDataGenerator.ScaleAndRecords; 27 | import com.palantir.spark.benchmark.paths.BenchmarkPaths; 28 | import com.palantir.spark.benchmark.queries.SortBenchmarkQuery; 29 | import com.palantir.spark.benchmark.registration.TableRegistration; 30 | import com.palantir.spark.benchmark.schemas.Schemas; 31 | import com.palantir.spark.benchmark.util.FileSystems; 32 | import java.nio.file.Path; 33 | import java.nio.file.Paths; 34 | import java.util.List; 35 | import java.util.stream.Collectors; 36 | import org.apache.hadoop.fs.FileSystem; 37 | import org.junit.jupiter.api.Test; 38 | 39 | public final class GenSortTest extends AbstractLocalSparkTest { 40 | @Test 41 | public void testGeneratesData() throws Exception { 42 | Path workingDir = createTemporaryWorkingDir("working_dir"); 43 | Path destinationDataDirectory = createTemporaryWorkingDir("data"); 44 | HadoopConfiguration hadoopConfiguration = getHadoopConfiguration(destinationDataDirectory); 45 | FileSystem dataFileSystem = FileSystems.createFileSystem( 46 | hadoopConfiguration.defaultFsBaseUri(), hadoopConfiguration.toHadoopConf()); 47 | 48 | BenchmarkPaths paths = new BenchmarkPaths("foo"); 49 | int scale = 1; 50 | int numRecords = 100; 51 | Schemas schemas = new Schemas(); 52 | TableRegistration tableRegistration = new TableRegistration(paths, dataFileSystem, sparkSession, schemas); 53 | 54 | GenSortDataGenerator genSortDataGenerator = new GenSortDataGenerator( 55 | ImmutableList.of(ScaleAndRecords.builder() 56 | .scale(scale) 57 | .numRecords(numRecords) 58 | .build()), 59 | sparkSession, 60 | dataFileSystem, 61 | new DefaultParquetTransformer(), // test that our schema works by copying for real. 62 | paths, 63 | schemas, 64 | workingDir, 65 | true, 66 | MoreExecutors.newDirectExecutorService()); 67 | genSortDataGenerator.generate(); 68 | tableRegistration.registerGensortTable(scale); 69 | 70 | List generatedLines = read( 71 | Paths.get( 72 | hadoopConfiguration.defaultFsBaseUri().getPath(), paths.tableCsvFile(scale, "gensort_data", 0)), 73 | "csv"); 74 | assertThat(generatedLines).hasSize(numRecords); 75 | 76 | List copiedParquet = read( 77 | Paths.get( 78 | hadoopConfiguration.defaultFsBaseUri().getPath(), 79 | paths.tableParquetLocation(scale, "gensort_data")), 80 | "parquet"); 81 | assertThat(copiedParquet).hasSameElementsAs(generatedLines); 82 | 83 | SortBenchmarkQuery query = new SortBenchmarkQuery(sparkSession); 84 | // Should not throw. We can't assert sortedness since the data could be saved in multiple partitions. 85 | query.save(paths.experimentResultLocation(TestIdentifiers.create("gensort", scale), 0)); 86 | } 87 | 88 | private List read(Path path, String format) { 89 | return sparkSession 90 | .read() 91 | .option("delimiter", "\n") 92 | .format(format) 93 | .load(path.toString()) 94 | .collectAsList() 95 | .stream() 96 | .map(row -> row.getString(0)) 97 | .collect(Collectors.toList()); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/config/HadoopConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.config; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.palantir.common.streams.KeyedStream; 21 | import com.palantir.logsafe.SafeArg; 22 | import com.palantir.logsafe.exceptions.SafeIllegalArgumentException; 23 | import com.palantir.logsafe.exceptions.SafeRuntimeException; 24 | import com.palantir.spark.benchmark.immutables.ImmutablesConfigStyle; 25 | import java.io.File; 26 | import java.net.MalformedURLException; 27 | import java.net.URI; 28 | import java.nio.file.Path; 29 | import java.util.List; 30 | import java.util.Map; 31 | import java.util.Optional; 32 | import org.apache.hadoop.conf.Configuration; 33 | import org.apache.hadoop.fs.CommonConfigurationKeys; 34 | import org.immutables.value.Value; 35 | 36 | @Value.Immutable 37 | @ImmutablesConfigStyle 38 | @JsonDeserialize(as = ImmutableHadoopConfiguration.class) 39 | public interface HadoopConfiguration { 40 | List hadoopConfDirs(); 41 | 42 | Map hadoopConf(); 43 | 44 | String defaultFilesystem(); 45 | 46 | Optional metricsFileSystem(); 47 | 48 | Map filesystems(); 49 | 50 | @Value.Derived 51 | default String defaultFsBaseUriString() { 52 | return getFilesystemBaseUriOrThrow(defaultFilesystem()); 53 | } 54 | 55 | @Value.Derived 56 | default String defaultMetricsBaseUriString() { 57 | return getFilesystemBaseUriOrThrow(metricsFileSystem().orElseGet(this::defaultFilesystem)); 58 | } 59 | 60 | default String getFilesystemBaseUriOrThrow(String filesystemName) { 61 | return Optional.ofNullable(filesystems().get(filesystemName)) 62 | .orElseThrow(() -> new SafeIllegalArgumentException( 63 | "Specified filesystem is not configured", SafeArg.of("filesystem", filesystemName))) 64 | .baseUri(); 65 | } 66 | 67 | @Value.Derived 68 | default URI defaultFsBaseUri() { 69 | return new org.apache.hadoop.fs.Path(defaultFsBaseUriString()).toUri(); 70 | } 71 | 72 | @Value.Derived 73 | default Configuration toHadoopConf() { 74 | Configuration hadoopConf = new Configuration(); 75 | 76 | // first load the values from xml in the provided directories 77 | for (Path hadoopConfDir : hadoopConfDirs()) { 78 | try { 79 | hadoopConf = loadConfFromFile(hadoopConf, hadoopConfDir.toFile()); 80 | } catch (MalformedURLException e) { 81 | throw new SafeRuntimeException("Malformed URL when parsing Hadoop config", e); 82 | } 83 | } 84 | 85 | // then load the free-form config overrides 86 | hadoopConf().forEach(hadoopConf::set); 87 | 88 | // finally, apply the filesystem settings 89 | KeyedStream.ofEntries( 90 | filesystems().values().stream().flatMap(fsConf -> fsConf.toHadoopConf().entrySet().stream())) 91 | .collectToMap() 92 | .forEach(hadoopConf::set); 93 | hadoopConf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, defaultFsBaseUriString()); 94 | return hadoopConf; 95 | } 96 | 97 | static Configuration loadConfFromFile(Configuration conf, File confFile) throws MalformedURLException { 98 | Configuration resolvedConfiguration = conf; 99 | if (confFile.isDirectory()) { 100 | for (File child : Optional.ofNullable(confFile.listFiles()).orElse(new File[0])) { 101 | resolvedConfiguration = loadConfFromFile(resolvedConfiguration, child); 102 | } 103 | } else if (confFile.isFile() && confFile.getName().endsWith(".xml")) { 104 | resolvedConfiguration.addResource(confFile.toURI().toURL()); 105 | } 106 | return resolvedConfiguration; 107 | } 108 | 109 | class Builder extends ImmutableHadoopConfiguration.Builder {} 110 | 111 | static Builder builder() { 112 | return new Builder(); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/schemas/Schemas.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.schemas; 18 | 19 | import com.google.common.base.CharMatcher; 20 | import com.google.common.base.Splitter; 21 | import com.google.common.base.Suppliers; 22 | import com.google.common.collect.ImmutableList; 23 | import com.google.common.io.CharStreams; 24 | import com.palantir.logsafe.Preconditions; 25 | import com.palantir.spark.benchmark.constants.TpcdsTable; 26 | import com.palantir.spark.benchmark.datagen.TpcdsDataGenerator; 27 | import java.io.IOException; 28 | import java.io.InputStream; 29 | import java.io.InputStreamReader; 30 | import java.nio.charset.StandardCharsets; 31 | import java.util.List; 32 | import java.util.Map; 33 | import java.util.concurrent.ConcurrentHashMap; 34 | import java.util.function.Supplier; 35 | import java.util.regex.Matcher; 36 | import java.util.regex.Pattern; 37 | import java.util.stream.Collectors; 38 | import org.apache.spark.sql.types.DataType; 39 | import org.apache.spark.sql.types.DataTypes; 40 | import org.apache.spark.sql.types.StructField; 41 | import org.apache.spark.sql.types.StructType; 42 | 43 | public final class Schemas { 44 | private static final Pattern DECIMAL_PATTERN = Pattern.compile("decimal\\((\\d+),(\\d+)\\)"); 45 | 46 | private final Map schemas = new ConcurrentHashMap<>(); 47 | private final Supplier cachedSqlSchemaDefinition = Suppliers.memoize(Schemas::getSqlSchemaDefinition); 48 | 49 | public StructType getSchema(TpcdsTable table) { 50 | return schemas.computeIfAbsent(table, this::doGetSchema); 51 | } 52 | 53 | public StructType getGensortSchema() { 54 | return DataTypes.createStructType( 55 | ImmutableList.of(DataTypes.createStructField("record", DataTypes.StringType, false))); 56 | } 57 | 58 | private StructType doGetSchema(TpcdsTable table) { 59 | String sqlSchemaDefinition = cachedSqlSchemaDefinition.get(); 60 | 61 | Pattern pattern = Pattern.compile(String.format("create table %s\\n\\((.*?)\\);", table), Pattern.DOTALL); 62 | Matcher matcher = pattern.matcher(sqlSchemaDefinition); 63 | Preconditions.checkArgument(matcher.find(), "SQL schema definition is ill-formatted"); 64 | String group = matcher.group(1); 65 | 66 | List lines = Splitter.on('\n').splitToList(group); 67 | List structFields = lines.stream() 68 | .filter(line -> !line.contains("primary key")) 69 | .filter(line -> !line.isEmpty()) 70 | .map(line -> 71 | Splitter.on(CharMatcher.whitespace()).omitEmptyStrings().splitToList(line)) 72 | .map(groups -> DataTypes.createStructField(groups.get(0), toSparkType(groups.get(1)), true)) 73 | .collect(Collectors.toList()); 74 | 75 | return DataTypes.createStructType(structFields); 76 | } 77 | 78 | private static DataType toSparkType(String sqlType) { 79 | if (sqlType.equals("integer")) { 80 | return DataTypes.IntegerType; 81 | } else if (sqlType.equals("date")) { 82 | return DataTypes.DateType; 83 | } else if (sqlType.equals("time")) { 84 | return DataTypes.TimestampType; 85 | } else if (sqlType.contains("char")) { 86 | return DataTypes.StringType; 87 | } 88 | 89 | Matcher matcher = DECIMAL_PATTERN.matcher(sqlType); 90 | if (matcher.find()) { 91 | return DataTypes.createDecimalType(Integer.valueOf(matcher.group(1)), Integer.valueOf(matcher.group(2))); 92 | } 93 | throw new RuntimeException("Unknown sqlType: " + sqlType); 94 | } 95 | 96 | private static String getSqlSchemaDefinition() { 97 | try (InputStream schemaSqlDefinition = 98 | TpcdsDataGenerator.class.getClassLoader().getResourceAsStream("tpcds.sql"); 99 | InputStreamReader schemaSqlReader = 100 | new InputStreamReader(schemaSqlDefinition, StandardCharsets.UTF_8)) { 101 | return CharStreams.toString(schemaSqlReader); 102 | } catch (IOException e) { 103 | throw new RuntimeException(e); 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/metrics/BenchmarkMetric.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.metrics; 18 | 19 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 20 | import com.fasterxml.jackson.databind.annotation.JsonSerialize; 21 | import com.google.common.collect.ImmutableList; 22 | import com.palantir.spark.benchmark.immutables.ImmutablesStyle; 23 | import com.palantir.spark.benchmark.queries.QuerySessionIdentifier; 24 | import java.io.Serializable; 25 | import java.util.Map; 26 | import java.util.Optional; 27 | import java.util.stream.Stream; 28 | import org.apache.spark.sql.Row; 29 | import org.apache.spark.sql.Row$; 30 | import org.apache.spark.sql.types.DataTypes; 31 | import org.apache.spark.sql.types.Metadata; 32 | import org.apache.spark.sql.types.StructField; 33 | import org.apache.spark.sql.types.StructType; 34 | import org.immutables.value.Value; 35 | import scala.collection.JavaConverters; 36 | 37 | @Value.Immutable 38 | @ImmutablesStyle 39 | @JsonSerialize(as = ImmutableBenchmarkMetric.class) 40 | @JsonDeserialize(as = ImmutableBenchmarkMetric.class) 41 | public abstract class BenchmarkMetric implements Serializable { 42 | 43 | public abstract String experimentName(); 44 | 45 | public abstract String queryName(); 46 | 47 | public abstract int scale(); 48 | 49 | public abstract String sparkVersion(); 50 | 51 | public abstract int executorInstances(); 52 | 53 | public abstract long executorMemoryMb(); 54 | 55 | public abstract int executorCores(); 56 | 57 | public abstract String applicationId(); 58 | 59 | public abstract long durationMillis(); 60 | 61 | public abstract Map sparkConf(); 62 | 63 | public abstract long experimentStartTimestampMillis(); 64 | 65 | public abstract long experimentEndTimestampMillis(); 66 | 67 | public abstract Optional failedVerification(); 68 | 69 | public abstract Optional sessionId(); 70 | 71 | public abstract Optional iteration(); 72 | 73 | public abstract Optional attempt(); 74 | 75 | public static StructType schema() { 76 | return new StructType(Stream.of( 77 | new StructField("experimentName", DataTypes.StringType, false, Metadata.empty()), 78 | new StructField("queryName", DataTypes.StringType, false, Metadata.empty()), 79 | new StructField("scale", DataTypes.IntegerType, false, Metadata.empty()), 80 | new StructField("sparkVersion", DataTypes.StringType, false, Metadata.empty()), 81 | new StructField("executorInstances", DataTypes.IntegerType, true, Metadata.empty()), 82 | new StructField("executorMemoryMb", DataTypes.LongType, false, Metadata.empty()), 83 | new StructField("executorCores", DataTypes.IntegerType, false, Metadata.empty()), 84 | new StructField("applicationId", DataTypes.StringType, false, Metadata.empty()), 85 | new StructField("durationMillis", DataTypes.LongType, false, Metadata.empty()), 86 | new StructField( 87 | "sparkConf", 88 | DataTypes.createMapType(DataTypes.StringType, DataTypes.StringType), 89 | true, 90 | Metadata.empty()), 91 | new StructField("experimentStartTimestamp", DataTypes.TimestampType, false, Metadata.empty()), 92 | new StructField("experimentEndTimestamp", DataTypes.TimestampType, false, Metadata.empty()), 93 | new StructField("failedVerification", DataTypes.BooleanType, true, Metadata.empty()), 94 | new StructField("sessionId", DataTypes.StringType, true, Metadata.empty()), 95 | new StructField("iteration", DataTypes.IntegerType, true, Metadata.empty()), 96 | new StructField("attempt", DataTypes.IntegerType, true, Metadata.empty())) 97 | .toArray(StructField[]::new)); 98 | } 99 | 100 | public final Row toRow() { 101 | return Row$.MODULE$.apply(JavaConverters.asScalaBufferConverter(ImmutableList.of( 102 | experimentName(), 103 | queryName(), 104 | scale(), 105 | sparkVersion(), 106 | executorInstances(), 107 | executorMemoryMb(), 108 | executorCores(), 109 | applicationId(), 110 | durationMillis(), 111 | JavaConverters.mapAsScalaMapConverter(sparkConf()).asScala(), 112 | new java.sql.Timestamp(experimentStartTimestampMillis()), 113 | new java.sql.Timestamp(experimentEndTimestampMillis()), 114 | failedVerification().orElse(false), 115 | sessionId().orElse(QuerySessionIdentifier.NO_SESSION), 116 | iteration().orElse(-1), 117 | attempt().orElse(-1))) 118 | .asScala()); 119 | } 120 | 121 | public static Builder builder() { 122 | return new Builder(); 123 | } 124 | 125 | public static final class Builder extends ImmutableBenchmarkMetric.Builder {} 126 | } 127 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/BenchmarkRunner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark; 18 | 19 | import com.google.common.util.concurrent.ThreadFactoryBuilder; 20 | import com.palantir.spark.benchmark.config.BenchmarkRunnerConfig; 21 | import com.palantir.spark.benchmark.correctness.TpcdsQueryCorrectnessChecks; 22 | import com.palantir.spark.benchmark.datagen.DefaultParquetTransformer; 23 | import com.palantir.spark.benchmark.datagen.GenSortDataGenerator; 24 | import com.palantir.spark.benchmark.datagen.SortDataGenerator; 25 | import com.palantir.spark.benchmark.datagen.TpcdsDataGenerator; 26 | import com.palantir.spark.benchmark.metrics.BenchmarkMetrics; 27 | import com.palantir.spark.benchmark.paths.BenchmarkPaths; 28 | import com.palantir.spark.benchmark.registration.TableRegistration; 29 | import com.palantir.spark.benchmark.schemas.Schemas; 30 | import com.palantir.spark.benchmark.util.FileSystems; 31 | import java.nio.file.Path; 32 | import java.nio.file.Paths; 33 | import java.time.Instant; 34 | import java.util.concurrent.ExecutorService; 35 | import java.util.concurrent.Executors; 36 | import org.apache.hadoop.conf.Configuration; 37 | import org.apache.hadoop.fs.FileSystem; 38 | import org.apache.spark.SparkConf; 39 | import org.apache.spark.sql.SparkSession; 40 | 41 | /** The main entry point for the application. */ 42 | public final class BenchmarkRunner { 43 | 44 | private static final Path DEFAULT_CONFIG_FILE = Paths.get("var", "conf", "config.yml"); 45 | 46 | private BenchmarkRunner() {} 47 | 48 | public static void main(String[] args) throws Exception { 49 | Path configFile; 50 | if (args.length == 0) { 51 | configFile = DEFAULT_CONFIG_FILE; 52 | } else { 53 | configFile = Paths.get(args[0]); 54 | } 55 | BenchmarkRunnerConfig config = BenchmarkRunnerConfig.parse(configFile); 56 | Configuration hadoopConf = config.hadoop().toHadoopConf(); 57 | try (FileSystem dataFileSystem = 58 | FileSystems.createFileSystem(config.hadoop().defaultFsBaseUri(), hadoopConf)) { 59 | SparkConf sparkConf = new SparkConf().setMaster(config.spark().master()); 60 | config.spark().sparkConf().forEach(sparkConf::set); 61 | hadoopConf.forEach(confEntry -> 62 | sparkConf.set(String.format("spark.hadoop.%s", confEntry.getKey()), confEntry.getValue())); 63 | 64 | // Force turn off dynamic allocation for consistent results 65 | if (!config.spark().master().startsWith("local")) { 66 | sparkConf.set("spark.dynamicAllocation.enabled", "false"); 67 | sparkConf.set( 68 | "spark.executor.instances", 69 | Integer.toString(config.spark().executorInstances())); 70 | sparkConf.set( 71 | "spark.executor.cores", Integer.toString(config.spark().executorCores())); 72 | sparkConf.set("spark.executor.memory", config.spark().executorMemory()); 73 | } 74 | 75 | SparkSession spark = SparkSession.builder().config(sparkConf).getOrCreate(); 76 | String experimentName = config.benchmarks().experimentName().orElseGet(() -> Instant.now() 77 | .toString()); 78 | BenchmarkPaths paths = new BenchmarkPaths(experimentName); 79 | Schemas schemas = new Schemas(); 80 | TableRegistration registration = new TableRegistration(paths, dataFileSystem, spark, schemas); 81 | ExecutorService dataGeneratorThreadPool = Executors.newFixedThreadPool( 82 | config.dataGeneration().parallelism(), 83 | new ThreadFactoryBuilder() 84 | .setDaemon(true) 85 | .setNameFormat("data-generator-%d") 86 | .build()); 87 | DefaultParquetTransformer parquetTransformer = new DefaultParquetTransformer(); 88 | TpcdsDataGenerator dataGenerator = new TpcdsDataGenerator( 89 | Paths.get(config.dataGeneration().tempWorkingDir()), 90 | config.dataScalesGb(), 91 | config.dataGeneration().overwriteData(), 92 | dataFileSystem, 93 | parquetTransformer, 94 | spark, 95 | paths, 96 | schemas, 97 | dataGeneratorThreadPool); 98 | SortDataGenerator sortDataGenerator = new GenSortDataGenerator( 99 | spark, 100 | dataFileSystem, 101 | parquetTransformer, 102 | paths, 103 | schemas, 104 | Paths.get(config.dataGeneration().tempWorkingDir()), 105 | config.dataScalesGb(), 106 | config.dataGeneration().overwriteData(), 107 | dataGeneratorThreadPool); 108 | TpcdsQueryCorrectnessChecks correctness = new TpcdsQueryCorrectnessChecks(paths, dataFileSystem, spark); 109 | BenchmarkMetrics metrics = new BenchmarkMetrics( 110 | config.spark(), config.hadoop().defaultMetricsBaseUriString(), experimentName, paths, spark); 111 | new Benchmark( 112 | config, 113 | dataGenerator, 114 | sortDataGenerator, 115 | registration, 116 | paths, 117 | correctness, 118 | metrics, 119 | spark, 120 | dataFileSystem) 121 | .run(); 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | # 4 | # Copyright 2015 the original author or authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ############################################################################## 20 | ## 21 | ## Gradle start up script for UN*X 22 | ## 23 | ############################################################################## 24 | 25 | # Attempt to set APP_HOME 26 | # Resolve links: $0 may be a link 27 | PRG="$0" 28 | # Need this for relative symlinks. 29 | while [ -h "$PRG" ] ; do 30 | ls=`ls -ld "$PRG"` 31 | link=`expr "$ls" : '.*-> \(.*\)$'` 32 | if expr "$link" : '/.*' > /dev/null; then 33 | PRG="$link" 34 | else 35 | PRG=`dirname "$PRG"`"/$link" 36 | fi 37 | done 38 | SAVED="`pwd`" 39 | cd "`dirname \"$PRG\"`/" >/dev/null 40 | APP_HOME="`pwd -P`" 41 | cd "$SAVED" >/dev/null 42 | 43 | APP_NAME="Gradle" 44 | APP_BASE_NAME=`basename "$0"` 45 | 46 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 47 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 48 | 49 | # Use the maximum available, or set MAX_FD != -1 to use that value. 50 | MAX_FD="maximum" 51 | 52 | warn () { 53 | echo "$*" 54 | } 55 | 56 | die () { 57 | echo 58 | echo "$*" 59 | echo 60 | exit 1 61 | } 62 | 63 | # OS specific support (must be 'true' or 'false'). 64 | cygwin=false 65 | msys=false 66 | darwin=false 67 | nonstop=false 68 | case "`uname`" in 69 | CYGWIN* ) 70 | cygwin=true 71 | ;; 72 | Darwin* ) 73 | darwin=true 74 | ;; 75 | MINGW* ) 76 | msys=true 77 | ;; 78 | NONSTOP* ) 79 | nonstop=true 80 | ;; 81 | esac 82 | 83 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 84 | 85 | # Determine the Java command to use to start the JVM. 86 | if [ -n "$JAVA_HOME" ] ; then 87 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 88 | # IBM's JDK on AIX uses strange locations for the executables 89 | JAVACMD="$JAVA_HOME/jre/sh/java" 90 | else 91 | JAVACMD="$JAVA_HOME/bin/java" 92 | fi 93 | if [ ! -x "$JAVACMD" ] ; then 94 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 95 | 96 | Please set the JAVA_HOME variable in your environment to match the 97 | location of your Java installation." 98 | fi 99 | else 100 | JAVACMD="java" 101 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 102 | 103 | Please set the JAVA_HOME variable in your environment to match the 104 | location of your Java installation." 105 | fi 106 | 107 | # Increase the maximum file descriptors if we can. 108 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then 109 | MAX_FD_LIMIT=`ulimit -H -n` 110 | if [ $? -eq 0 ] ; then 111 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 112 | MAX_FD="$MAX_FD_LIMIT" 113 | fi 114 | ulimit -n $MAX_FD 115 | if [ $? -ne 0 ] ; then 116 | warn "Could not set maximum file descriptor limit: $MAX_FD" 117 | fi 118 | else 119 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 120 | fi 121 | fi 122 | 123 | # For Darwin, add options to specify how the application appears in the dock 124 | if $darwin; then 125 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 126 | fi 127 | 128 | # For Cygwin or MSYS, switch paths to Windows format before running java 129 | if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then 130 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 131 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 132 | JAVACMD=`cygpath --unix "$JAVACMD"` 133 | 134 | # We build the pattern for arguments to be converted via cygpath 135 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 136 | SEP="" 137 | for dir in $ROOTDIRSRAW ; do 138 | ROOTDIRS="$ROOTDIRS$SEP$dir" 139 | SEP="|" 140 | done 141 | OURCYGPATTERN="(^($ROOTDIRS))" 142 | # Add a user-defined pattern to the cygpath arguments 143 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 144 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 145 | fi 146 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 147 | i=0 148 | for arg in "$@" ; do 149 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 150 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 151 | 152 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 153 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 154 | else 155 | eval `echo args$i`="\"$arg\"" 156 | fi 157 | i=$((i+1)) 158 | done 159 | case $i in 160 | (0) set -- ;; 161 | (1) set -- "$args0" ;; 162 | (2) set -- "$args0" "$args1" ;; 163 | (3) set -- "$args0" "$args1" "$args2" ;; 164 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 165 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 166 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 167 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 168 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 169 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 170 | esac 171 | fi 172 | 173 | # Escape application args 174 | save () { 175 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done 176 | echo " " 177 | } 178 | APP_ARGS=$(save "$@") 179 | 180 | # Collect all arguments for the java command, following the shell quoting and substitution rules 181 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" 182 | 183 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong 184 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then 185 | cd "$(dirname "$0")" 186 | fi 187 | 188 | exec "$JAVACMD" "$@" 189 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/correctness/TpcdsQueryCorrectnessChecks.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.correctness; 18 | 19 | import com.google.common.hash.HashCode; 20 | import com.google.common.hash.Hashing; 21 | import com.google.common.io.ByteStreams; 22 | import com.palantir.spark.benchmark.paths.BenchmarkPaths; 23 | import java.io.IOException; 24 | import java.io.InputStream; 25 | import java.io.ObjectInputStream; 26 | import java.io.ObjectOutputStream; 27 | import java.io.OutputStream; 28 | import java.io.Serializable; 29 | import java.util.Arrays; 30 | import java.util.Optional; 31 | import java.util.stream.Stream; 32 | import org.apache.hadoop.fs.FileSystem; 33 | import org.apache.hadoop.fs.Path; 34 | import org.apache.spark.api.java.function.Function; 35 | import org.apache.spark.api.java.function.Function2; 36 | import org.apache.spark.sql.Dataset; 37 | import org.apache.spark.sql.Row; 38 | import org.apache.spark.sql.SparkSession; 39 | import org.apache.spark.sql.types.StructType; 40 | import org.slf4j.Logger; 41 | import org.slf4j.LoggerFactory; 42 | 43 | public final class TpcdsQueryCorrectnessChecks { 44 | 45 | private static final Logger log = LoggerFactory.getLogger(TpcdsQueryCorrectnessChecks.class); 46 | 47 | private final BenchmarkPaths paths; 48 | private final FileSystem dataFileSystem; 49 | private final SparkSession spark; 50 | 51 | public TpcdsQueryCorrectnessChecks(BenchmarkPaths paths, FileSystem dataFileSystem, SparkSession spark) { 52 | this.paths = paths; 53 | this.dataFileSystem = dataFileSystem; 54 | this.spark = spark; 55 | } 56 | 57 | public void verifyCorrectness( 58 | int scale, String queryName, String sqlStatement, StructType resultSchema, String resultsPath) 59 | throws IOException { 60 | spark.sparkContext().setJobDescription(String.format("%s-table-hash-correctness", queryName)); 61 | Dataset writtenResult = 62 | spark.read().format("parquet").schema(resultSchema).load(resultsPath); 63 | byte[] resultHash = writtenResult 64 | .javaRDD() 65 | .map(SingleHashFunction.INSTANCE) 66 | .map(SerializableOptional::of) 67 | .fold(SerializableOptional.empty(), CombineHashFunction.INSTANCE) 68 | .optional 69 | .map(HashCode::asBytes) 70 | .orElse(new byte[] {}); 71 | Path hashCodePath = new Path(paths.experimentCorrectnessHashesLocation(scale, queryName)); 72 | if (dataFileSystem.isFile(hashCodePath)) { 73 | try (InputStream previousHashCodeInput = dataFileSystem.open(hashCodePath)) { 74 | byte[] previousHashCodeBytes = ByteStreams.toByteArray(previousHashCodeInput); 75 | if (!Arrays.equals(resultHash, previousHashCodeBytes)) { 76 | throw new ExperimentResultsIncorrectException(String.format( 77 | "Experiment results were incorrect.\n" 78 | + "Experiment name: %s\n" 79 | + "Experiment scale: %d\n" 80 | + "Experiment results path: %s\n" 81 | + "Experiment sql:\n\n%s", 82 | queryName, scale, resultsPath, sqlStatement)); 83 | } 84 | } 85 | } else { 86 | try (OutputStream currentHashCodeOutput = dataFileSystem.create(hashCodePath, true)) { 87 | currentHashCodeOutput.write(resultHash); 88 | } 89 | } 90 | } 91 | 92 | private static final class SingleHashFunction implements Function, Serializable { 93 | 94 | static final Function INSTANCE = new SingleHashFunction(); 95 | 96 | @Override 97 | public HashCode call(Row row) { 98 | return HashCode.fromLong(row.hashCode()); 99 | } 100 | } 101 | 102 | private static final class CombineHashFunction 103 | implements Function2< 104 | SerializableOptional, 105 | SerializableOptional, 106 | SerializableOptional>, 107 | Serializable { 108 | 109 | static final CombineHashFunction INSTANCE = new CombineHashFunction(); 110 | 111 | @Override 112 | public SerializableOptional call( 113 | SerializableOptional first, SerializableOptional second) { 114 | if (first.optional.isPresent() && second.optional.isPresent()) { 115 | return SerializableOptional.of(Hashing.combineUnordered(() -> 116 | Stream.of(first.optional.get(), second.optional.get()).iterator())); 117 | } else if (!first.optional.isPresent() && !second.optional.isPresent()) { 118 | return SerializableOptional.empty(); 119 | } else if (first.optional.isPresent()) { 120 | return first; 121 | } else { 122 | return second; 123 | } 124 | } 125 | } 126 | 127 | private static final class ExperimentResultsIncorrectException extends RuntimeException { 128 | 129 | ExperimentResultsIncorrectException(String message) { 130 | super(message); 131 | } 132 | } 133 | 134 | private static final class SerializableOptional implements Serializable { 135 | private Optional optional; 136 | 137 | private SerializableOptional(T value) { 138 | this.optional = Optional.ofNullable(value); 139 | } 140 | 141 | private SerializableOptional(Optional value) { 142 | this.optional = value; 143 | } 144 | 145 | static SerializableOptional empty() { 146 | return of(Optional.empty()); 147 | } 148 | 149 | static SerializableOptional of(T value) { 150 | return new SerializableOptional<>(value); 151 | } 152 | 153 | static SerializableOptional of(Optional value) { 154 | return new SerializableOptional<>(value); 155 | } 156 | 157 | private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { 158 | boolean isPresent = input.readBoolean(); 159 | if (isPresent) { 160 | optional = Optional.of((T) input.readObject()); 161 | } else { 162 | optional = Optional.empty(); 163 | } 164 | } 165 | 166 | private void writeObject(ObjectOutputStream output) throws IOException { 167 | if (optional.isPresent()) { 168 | output.writeBoolean(true); 169 | output.writeObject(optional.get()); 170 | } else { 171 | output.writeBoolean(false); 172 | } 173 | } 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /spark-tpcds-benchmark-runner/src/main/java/com/palantir/spark/benchmark/util/DataGenUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * (c) Copyright 2020 Palantir Technologies Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.palantir.spark.benchmark.util; 18 | 19 | import com.google.common.util.concurrent.ListenableFuture; 20 | import com.google.common.util.concurrent.ListeningExecutorService; 21 | import com.palantir.logsafe.SafeArg; 22 | import com.palantir.logsafe.exceptions.SafeRuntimeException; 23 | import java.io.BufferedInputStream; 24 | import java.io.File; 25 | import java.io.FileInputStream; 26 | import java.io.FileNotFoundException; 27 | import java.io.FileOutputStream; 28 | import java.io.IOException; 29 | import java.nio.file.Files; 30 | import java.nio.file.Path; 31 | import java.util.Optional; 32 | import java.util.stream.Collectors; 33 | import java.util.stream.Stream; 34 | import java.util.zip.GZIPInputStream; 35 | import org.apache.commons.compress.archivers.tar.TarArchiveEntry; 36 | import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; 37 | import org.apache.commons.io.IOUtils; 38 | import org.apache.hadoop.fs.FileSystem; 39 | import org.apache.hadoop.fs.FileUtil; 40 | import org.slf4j.Logger; 41 | import org.slf4j.LoggerFactory; 42 | 43 | public final class DataGenUtils { 44 | private static final Logger log = LoggerFactory.getLogger(DataGenUtils.class); 45 | 46 | private DataGenUtils() {} 47 | 48 | public static void makeFileExecutable(Path genSortFile) { 49 | if (!genSortFile.toFile().canExecute() && !genSortFile.toFile().setExecutable(true, true)) { 50 | throw new IllegalStateException( 51 | String.format("Could not make the gensort binary at %s executable.", genSortFile)); 52 | } 53 | } 54 | 55 | /** 56 | * Extracts the tar ball at tgzPath into binDir. Returns the file inside the tgz 57 | * that is equal to binaryName. If no file exists that matches binaryName, FileNotFoundException is thrown. 58 | */ 59 | public static Path extractBinary(Path tgzPath, String binaryName, Path binDir) throws IOException { 60 | Path dsdgenFile = null; 61 | try (FileInputStream rawTarInput = new FileInputStream(tgzPath.toFile()); 62 | BufferedInputStream bufferingInput = new BufferedInputStream(rawTarInput); 63 | GZIPInputStream decompressingInput = new GZIPInputStream(bufferingInput); 64 | TarArchiveInputStream untarringInput = new TarArchiveInputStream(decompressingInput)) { 65 | TarArchiveEntry entry; 66 | while ((entry = untarringInput.getNextTarEntry()) != null) { 67 | Path outputPath = binDir.resolve(entry.getName()); 68 | if (entry.isDirectory()) { 69 | Files.createDirectory(outputPath); 70 | } else { 71 | try (FileOutputStream output = new FileOutputStream(outputPath.toFile())) { 72 | IOUtils.copy(untarringInput, output); 73 | } 74 | } 75 | if (outputPath.toFile().getName().equals(binaryName)) { 76 | dsdgenFile = outputPath; 77 | } 78 | } 79 | } catch (Exception e) { 80 | throw new RuntimeException( 81 | String.format( 82 | "Failed to extract tpcds tar at %s", 83 | tgzPath.toFile().getAbsolutePath()), 84 | e); 85 | } 86 | if (dsdgenFile == null) { 87 | throw new FileNotFoundException( 88 | "Dsdgen binary was not found in the tarball; was this benchmark runner packaged correctly?"); 89 | } 90 | return dsdgenFile; 91 | } 92 | 93 | public static boolean shouldWriteData( 94 | FileSystem destinationFileSystem, 95 | int scale, 96 | org.apache.hadoop.fs.Path destinationPath, 97 | boolean shouldOverwriteData) { 98 | try { 99 | if (!destinationFileSystem.exists(destinationPath) || shouldOverwriteData) { 100 | deleteDestinationPathIfNecessary(destinationFileSystem, destinationPath); 101 | log.info( 102 | "Writing data at path {} for the given scale of {}.", 103 | SafeArg.of("dataPath", destinationPath), 104 | SafeArg.of("dataScale", scale)); 105 | return true; 106 | } else { 107 | log.info( 108 | "Not overwriting data at path {} for the given scale of {}.", 109 | SafeArg.of("dataPath", destinationPath), 110 | SafeArg.of("dataScale", scale)); 111 | return false; 112 | } 113 | } catch (IOException e) { 114 | throw new SafeRuntimeException(e); 115 | } 116 | } 117 | 118 | private static void deleteDestinationPathIfNecessary( 119 | FileSystem destinationFileSystem, org.apache.hadoop.fs.Path destinationPath) throws IOException { 120 | if (destinationFileSystem.isDirectory(destinationPath) 121 | && !destinationFileSystem.delete(destinationPath, true)) { 122 | throw new IllegalStateException( 123 | String.format("Failed to clear data file directory at %s.", destinationPath)); 124 | } 125 | } 126 | 127 | public static void uploadFiles( 128 | FileSystem destinationFileSystem, 129 | String rootDestinationPath, 130 | File sourceDir, 131 | ListeningExecutorService uploaderThreadPool) { 132 | Optional.ofNullable(sourceDir.listFiles()) 133 | .map(Stream::of) 134 | .orElseGet(Stream::empty) 135 | .map(file -> { 136 | ListenableFuture uploadTask = uploaderThreadPool.submit(() -> { 137 | try { 138 | FileUtil.copy( 139 | file, 140 | destinationFileSystem, 141 | new org.apache.hadoop.fs.Path(rootDestinationPath, file.getName()), 142 | true, 143 | destinationFileSystem.getConf()); 144 | } catch (IOException e) { 145 | throw new RuntimeException(e); 146 | } 147 | }); 148 | uploadTask.addListener( 149 | () -> log.info( 150 | "Finished uploading file to the Hadoop File System: {}, {}", 151 | SafeArg.of("localFilePath", file), 152 | SafeArg.of( 153 | "destination", 154 | new org.apache.hadoop.fs.Path(rootDestinationPath, file.getName()))), 155 | uploaderThreadPool); 156 | return uploadTask; 157 | }) 158 | .collect(Collectors.toList()) 159 | .forEach(MoreFutures::join); 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /.baseline/eclipse/static/dotfile.settings/org.eclipse.jdt.ui.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true 3 | formatter_profile=_Baseline Profile 4 | formatter_settings_version=12 5 | org.eclipse.jdt.ui.exception.name=e 6 | org.eclipse.jdt.ui.gettersetter.use.is=false 7 | org.eclipse.jdt.ui.ignorelowercasenames=true 8 | org.eclipse.jdt.ui.importorder=; 9 | org.eclipse.jdt.ui.javadoc=false 10 | org.eclipse.jdt.ui.keywordthis=false 11 | org.eclipse.jdt.ui.ondemandthreshold=999 12 | org.eclipse.jdt.ui.overrideannotation=true 13 | org.eclipse.jdt.ui.staticondemandthreshold=999 14 | org.eclipse.jdt.ui.text.custom_code_templates=