├── .travis.yml ├── project ├── plugins.sbt └── build.properties ├── .gitignore ├── src ├── test │ ├── resources │ │ └── log4j.properties │ └── scala │ │ └── com │ │ └── granturing │ │ └── spark │ │ └── powerbi │ │ ├── ClientSuite.scala │ │ └── PowerBISuite.scala └── main │ └── scala │ └── com │ └── granturing │ └── spark │ └── powerbi │ ├── OAuthToken.scala │ ├── DefaultSource.scala │ ├── ClientConf.scala │ ├── package.scala │ └── Client.scala ├── README.md ├── scalastyle-config.xml └── LICENSE /.travis.yml: -------------------------------------------------------------------------------- 1 | language: scala 2 | scala: 3 | - 2.10.4 -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | resolvers += "Spark Package Main Repo" at "https://dl.bintray.com/spark-packages/maven" 2 | 3 | addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "0.6.0") 4 | 5 | addSbtPlugin("org.spark-packages" % "sbt-spark-package" % "0.2.0") -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | *.log 3 | 4 | # sbt specific 5 | .cache/ 6 | .history/ 7 | .lib/ 8 | dist/* 9 | target/ 10 | lib_managed/ 11 | src_managed/ 12 | project/boot/ 13 | project/plugins/project/ 14 | 15 | # Scala-IDE specific 16 | .scala_dependencies 17 | .worksheet 18 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | # 13 | sbt.version = 0.13.6 14 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Set everything to be logged to the console 2 | log4j.rootCategory=WARN, console 3 | log4j.appender.console=org.apache.log4j.ConsoleAppender 4 | log4j.appender.console.target=System.err 5 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n 7 | 8 | # Settings to quiet third party logs that are too verbose 9 | log4j.logger.org.eclipse.jetty=WARN 10 | log4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR 11 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO 12 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO 13 | 14 | log4j.logger.com.granturing.spark.powerbi=INFO 15 | -------------------------------------------------------------------------------- /src/test/scala/com/granturing/spark/powerbi/ClientSuite.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.granturing.spark.powerbi 15 | 16 | import org.apache.spark.SparkConf 17 | import org.scalatest.{BeforeAndAfterAll, Matchers, FunSuite} 18 | import scala.concurrent.Await 19 | 20 | class ClientSuite extends FunSuite with Matchers with BeforeAndAfterAll { 21 | 22 | val clientConf = ClientConf.fromSparkConf(new SparkConf()) 23 | val client = new Client(clientConf) 24 | 25 | val dataset = "PowerBI Spark Test" 26 | var datasetId: String = _ 27 | val group = sys.env.get("POWERBI_GROUP") 28 | var groupId: Option[String] = None 29 | val table = "People" 30 | val tableSchema = Table( 31 | table, Seq( 32 | Column("name", "string"), 33 | Column("age", "Int64"), 34 | Column("birthday", "Datetime"), 35 | Column("timestamp", "Datetime") 36 | )) 37 | 38 | override def beforeAll = { 39 | groupId = group match { 40 | case Some(grp) => { 41 | val grpOpt = Await.result(client.getGroups, clientConf.timeout).filter(g => grp.equals(g.name)).map(_.id).headOption 42 | 43 | grpOpt match { 44 | case Some(g) => Some(g) 45 | case None => sys.error(s"group $grp not found") 46 | } 47 | } 48 | case None => None 49 | } 50 | } 51 | 52 | test("client can list groups") { 53 | val groups = Await.result(client.getGroups, clientConf.timeout) 54 | 55 | groups should not be null 56 | } 57 | 58 | test("client can list datasets") { 59 | val ds = Await.result(client.getDatasets(groupId), clientConf.timeout) 60 | 61 | ds should not be null 62 | } 63 | 64 | } 65 | -------------------------------------------------------------------------------- /src/main/scala/com/granturing/spark/powerbi/OAuthToken.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.granturing.spark.powerbi 15 | 16 | import java.util.concurrent.{ExecutionException, TimeUnit, Executors} 17 | import com.microsoft.aad.adal4j.{AuthenticationResult, AuthenticationCallback, AuthenticationContext} 18 | import dispatch._ 19 | import org.apache.spark.Logging 20 | import scala.concurrent.{Await, promise} 21 | import scala.util.{Try, Failure, Success} 22 | 23 | private class OAuthReq(token: OAuthTokenHandler) extends (Req => Req) { 24 | 25 | override def apply(req: Req): Req = { 26 | req <:< Map("Authorization" -> s"Bearer ${token()}") 27 | } 28 | 29 | } 30 | 31 | private class OAuthTokenHandler(authConf: ClientConf, initialToken: Option[String] = None) extends Logging { 32 | 33 | private var _token: Option[String] = initialToken 34 | 35 | def apply(refresh: Boolean = false): String = { 36 | _token match { 37 | case Some(s) if !refresh => s 38 | case _ => { 39 | refreshToken match { 40 | case Success(s) => { 41 | _token = Some(s) 42 | s 43 | } 44 | case Failure(e) => throw e 45 | } 46 | } 47 | } 48 | } 49 | 50 | private def refreshToken: Try[String] = { 51 | log.info("refreshing OAuth token") 52 | 53 | val service = Executors.newFixedThreadPool(1); 54 | val context = new AuthenticationContext(authConf.token_uri, true, service) 55 | 56 | val p = promise[AuthenticationResult] 57 | val future = p.future 58 | 59 | context.acquireToken(authConf.resource, authConf.clientid, authConf.username, authConf.password, new AuthenticationCallback { 60 | def onSuccess(result: AuthenticationResult): Unit = { 61 | p.success(result) 62 | } 63 | 64 | def onFailure(ex: Throwable): Unit = { 65 | p.failure(ex) 66 | } 67 | }) 68 | 69 | try { 70 | val result = Await.result(future, authConf.timeout) 71 | 72 | log.info("OAuth token refresh successful") 73 | 74 | Success(result.getAccessToken) 75 | } catch { 76 | case e: ExecutionException => Failure(e.getCause) 77 | case t: Throwable => Failure(t) 78 | } finally { 79 | service.shutdown() 80 | } 81 | 82 | } 83 | 84 | } 85 | 86 | -------------------------------------------------------------------------------- /src/main/scala/com/granturing/spark/powerbi/DefaultSource.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.granturing.spark.powerbi 15 | 16 | import org.apache.spark.sql.{DataFrame, SaveMode, SQLContext} 17 | import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider} 18 | import scala.concurrent._ 19 | import scala.concurrent.ExecutionContext.Implicits._ 20 | import scala.concurrent.duration.Duration 21 | 22 | class DefaultSource extends CreatableRelationProvider with PowerBISink { 23 | 24 | override def createRelation( 25 | sqlContext: SQLContext, 26 | mode: SaveMode, 27 | parameters: Map[String, String], 28 | data: DataFrame): BaseRelation = { 29 | 30 | val conf = ClientConf.fromSparkConf(sqlContext.sparkContext.getConf) 31 | implicit val client = new Client(conf) 32 | 33 | val dataset = parameters.getOrElse("dataset", sys.error("'dataset' must be specified")) 34 | val table = parameters.getOrElse("table", sys.error("'table' must be specified")) 35 | val batchSize = parameters.getOrElse("batchSize", conf.batchSize.toString).toInt 36 | val group = parameters.get("group") 37 | 38 | val step = for { 39 | groupId <- getGroupId(group) 40 | ds <- getOrCreateDataset(mode, groupId, dataset, table, data.schema) 41 | } yield (groupId, ds) 42 | 43 | val result = step map { case (groupId, ds) => 44 | val fields = data.schema.fieldNames.zipWithIndex 45 | val _conf = conf 46 | val _token = Some(client.currentToken) 47 | val _table = table 48 | val _batchSize = batchSize 49 | 50 | val coalesced = data.rdd.partitions.size > _conf.maxPartitions match { 51 | case true => data.coalesce(_conf.maxPartitions) 52 | case false => data 53 | } 54 | 55 | coalesced foreachPartition { p => 56 | val rows = p map { r => 57 | fields map { case(name, index) => (name -> r(index)) } toMap 58 | } toSeq 59 | 60 | val _client = new Client(_conf, _token) 61 | 62 | val submit = rows. 63 | sliding(_batchSize, _batchSize). 64 | foldLeft(future()) { (fAccum, batch) => 65 | fAccum flatMap { _ => _client.addRows(ds.id, _table, batch, groupId) } } 66 | 67 | submit.onComplete { _ => _client.shutdown() } 68 | 69 | Await.result(submit, _conf.timeout) 70 | } 71 | } 72 | 73 | result.onComplete { _ => client.shutdown() } 74 | 75 | Await.result(result, Duration.Inf) 76 | 77 | new BaseRelation { 78 | val sqlContext = data.sqlContext 79 | 80 | val schema = data.schema 81 | } 82 | } 83 | 84 | } 85 | -------------------------------------------------------------------------------- /src/main/scala/com/granturing/spark/powerbi/ClientConf.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.granturing.spark.powerbi 15 | 16 | import org.apache.spark.SparkConf 17 | import scala.concurrent.duration._ 18 | 19 | /** 20 | * Specifies the client configuration options for connecting to the PowerBI service. 21 | * See [[com.granturing.spark.powerbi.ClientConf.fromSparkConf]] for generating 22 | * from Spark configuration. 23 | * 24 | * @param token_uri the OAuth token URI to authenticate against 25 | * @param resource the OAuth resource to authenticate for 26 | * @param uri the PowerBI API URI 27 | * @param username the Azure Active Directory account name to authenticate with 28 | * @param password the Azure Active Directory account password to authenticate with 29 | * @param clientid the OAuth client id 30 | * @param timeout the response timeout in seconds for API calls 31 | * @param maxPartitions max number of partitions when saving 32 | */ 33 | case class ClientConf( 34 | token_uri: String, 35 | resource: String, 36 | uri: String, 37 | username: String, 38 | password: String, 39 | clientid: String, 40 | timeout: Duration, 41 | maxPartitions: Int, 42 | batchSize: Int 43 | ) 44 | 45 | object ClientConf { 46 | 47 | val TOKEN_URI_DEFAULT = "https://login.windows.net/common/oauth2/token" 48 | val TOKEN_RESOURCE_DEFAULT = "https://analysis.windows.net/powerbi/api" 49 | val API_URI_DEFAULT = "https://api.powerbi.com/v1.0/myorg" 50 | val BATCH_SIZE = 10000 51 | val MAX_PARTITIONS = 5 52 | 53 | private val POWERBI_USERNAME = "POWERBI_USERNAME" 54 | private val POWERBI_PASSWORD = "POWERBI_PASSWORD" 55 | private val POWERBI_CLIENTID = "POWERBI_CLIENTID" 56 | 57 | /** 58 | * Generates a PowerBI client configuration for credentials, URIs, and OAuth client id. 59 | * 60 | * Settings are: 61 | * 62 | * spark.powerbi.token.uri - The OAuth token URI to authenticate against (default: https://login.windows.net/common/oauth2/token) 63 | * 64 | * spark.powerbi.token.resource - The OAuth resource to authenticate for (default: https://analysis.windows.net/powerbi/api) 65 | * 66 | * spark.powerbi.uri - The PowerBI API URI (default: https://api.powerbi.com/beta/myorg) 67 | * 68 | * spark.powerbi.username - The Azure Active Directory account name to authenticate with 69 | * 70 | * spark.powerbi.password - The Azure Active Directory account password to authenticate with 71 | * 72 | * spark.pwerbi.clientid - The OAuth client id 73 | * 74 | * spark.powerbi.timeout - The response timeout in seconds for API calls (default: 30 seconds) 75 | * 76 | * spark.powerbi.max_partitions - The max number of partitions when saving (default: 5) 77 | * 78 | * @param conf a Spark configuration object with the application settings 79 | * @return a PowerBI client configuration 80 | */ 81 | def fromSparkConf(conf: SparkConf): ClientConf = { 82 | val token = conf.get("spark.powerbi.token.uri", TOKEN_URI_DEFAULT) 83 | val resource = conf.get("spark.powerbi.token.resource", TOKEN_RESOURCE_DEFAULT) 84 | val api = conf.get("spark.powerbi.uri", API_URI_DEFAULT) 85 | val username = sys.env.getOrElse(POWERBI_USERNAME, conf.get("spark.powerbi.username")) 86 | val password = sys.env.getOrElse(POWERBI_PASSWORD, conf.get("spark.powerbi.password")) 87 | val clientid = sys.env.getOrElse(POWERBI_CLIENTID, conf.get("spark.powerbi.clientid")) 88 | val timeout = Duration(conf.get("spark.powerbi.timeout", "30").toInt, SECONDS) 89 | val maxPartitions = conf.get("spark.powerbi.max_partitions", MAX_PARTITIONS.toString).toInt 90 | val batchSize = conf.get("spark.powerbi.batch_size", BATCH_SIZE.toString).toInt 91 | 92 | ClientConf(token, resource, api, username, password, clientid, timeout, maxPartitions, batchSize) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![build](https://travis-ci.org/granturing/spark-power-bi.svg) 2 | 3 | # This is no longer being maintained 4 | Please see Azure Databricks for the latest [instructions](https://docs.azuredatabricks.net/user-guide/clusters/power-bi.html) on using Apache Spark with Power BI 5 | 6 | # spark-power-bi 7 | A library for pushing data from Spark, SparkSQL, or Spark Streaming to [Power BI](https://powerbi.com/). 8 | 9 | ## Requirements 10 | This library is supported on Apache Spark 1.4 and 1.5. The versions of the library match to the Spark version. So v1.4.0_0.0.7 is for Apache Spark 1.4 and v1.5.0_0.0.7 is for Apache Spark 1.5. 11 | 12 | ## Power BI API 13 | Additional details regarding the Power BI API are available in the [developer center](https://msdn.microsoft.com/en-us/library/dn877544.aspx). Authentication is handled via OAuth2 with your Azure AD credentials specified via Spark properties. More details on registering an app and authenticating are available in the Power BI dev center. When pushing rows to Power BI the library will create the target dataset with table if necessary. The current Power BI service is limited to 10,000 rows per call so the library handles batching internally. The service also limits to no more than 5 concurrent calls at a time when adding rows. This is handled by the library using coalesce and can be tuned by with the `spark.powerbi.max_partitions` property. 14 | 15 | ## Scaladoc 16 | Scaladoc is available [here](http://granturing.github.io/spark-power-bi/docs) 17 | 18 | ## Configuration 19 | A few of the key properties are related to OAuth2. These depend on your application's registration in Azure AD. 20 | ``` 21 | spark.powerbi.username 22 | spark.powerbi.password 23 | spark.powerbi.clientid 24 | ``` 25 | 26 | Rather than using your personal AD credentials for publishing data, you may want to create a service account instead. Then you can logon to Power BI using that account and share the data sets and dashboards with other users in your organization. Unfortunately, there's currently no other way of authenticating to Power BI. Hopefully in the future there'll be an organization-level API token that can publish shared data sets, without having to use an actual AD account. You can also use a Power BI group when publishing data. 27 | 28 | ### Setting Up Azure Active Directory 29 | You'll need to create an application within your Azure AD in order to have a client id to publish data sets. 30 | 31 | 1. Using the Azure management portal, open up your directory and add a new Application (under the Apps tab) 32 | 2. Select "Add an application my organization is developing" 33 | ![step0](http://granturing.github.io/spark-power-bi/images/AD_Setup_0.png) 34 | 3. Enter any name you want and select "Native Client Application" 35 | ![step1](http://granturing.github.io/spark-power-bi/images/AD_Setup_1.png) 36 | 4. Enter a redirect URI, this can be anything since it won't be used 37 | ![step2](http://granturing.github.io/spark-power-bi/images/AD_Setup_2.png) 38 | 5. Once the app has been added you need to grant permissions, click "Add application" 39 | ![step3](http://granturing.github.io/spark-power-bi/images/AD_Setup_3.png) 40 | 6. Select the "Power BI Service" 41 | ![step4](http://granturing.github.io/spark-power-bi/images/AD_Setup_4.png) 42 | 7. Add all 3 of the delegated permissions 43 | ![step5](http://granturing.github.io/spark-power-bi/images/AD_Setup_5.png) 44 | 8. Save your changes and use the newly assigned client id for the `spark.powerbi.clientid` property 45 | ![step6](http://granturing.github.io/spark-power-bi/images/AD_Setup_6.png) 46 | 47 | ## Spark Core 48 | ``` 49 | import com.granturing.spark.powerbi._ 50 | 51 | case class Person(name: String, age: Int) 52 | 53 | val input = sc.textFile("examples/src/main/resources/people.txt") 54 | val people = input.map(_.split(",")).map(l => Person(l(0), l(1).trim.toInt)) 55 | 56 | people.saveToPowerBI("Test", "People") 57 | ``` 58 | 59 | ## SparkSQL 60 | ``` 61 | import com.granturing.spark.powerbi._ 62 | import org.apache.spark.sql._ 63 | 64 | val sqlCtx = new SQLContext(sc) 65 | val people = sqlCtx.jsonFile("examples/src/main/resources/people.json") 66 | 67 | people.write.format("com.granturing.spark.powerbi").options(Map("dataset" -> "Test", "table" -> "People")).save 68 | ``` 69 | 70 | ## Spark Streaming 71 | ``` 72 | val sc = new SparkContext(new SparkConf()) 73 | val ssc = new StreamingContext(sc, Seconds(5)) 74 | 75 | val filters = args 76 | 77 | val input = TwitterUtils.createStream(ssc, None, filters) 78 | 79 | val tweets = input.map(t => Tweet(t.getId, t.getCreatedAt, t.getText, t.getUser.getScreenName)) 80 | val hashTags = input.flatMap(t => t.getHashtagEntities.map(h => HashTag(t.getId, h.getText, t.getUser.getScreenName))) 81 | 82 | tweets.saveToPowerBI(dataset, "Tweets") 83 | hashTags.saveToPowerBI(dataset, "HashTags") 84 | 85 | ssc.start() 86 | ssc.awaitTermination() 87 | ``` 88 | 89 | ## Referencing As A Dependency 90 | You can also easily reference dependencies using the `--packages` argument: 91 | ```bash 92 | spark-shell --package com.granturing:spark-power-bi_2.10:1.5.0_0.0.7 93 | ``` 94 | 95 | ## Building From Source 96 | The library uses SBT and can be built by running ```sbt package```. 97 | -------------------------------------------------------------------------------- /src/test/scala/com/granturing/spark/powerbi/PowerBISuite.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.granturing.spark.powerbi 15 | 16 | import org.apache.spark.sql.{SaveMode, SQLContext} 17 | import org.apache.spark.{SparkContext, SparkConf} 18 | import org.scalatest.{BeforeAndAfterAll, FunSuite} 19 | import scala.concurrent.Await 20 | 21 | case class Person(name: String, age: Int, birthday: java.sql.Date, timestamp: java.sql.Timestamp) 22 | 23 | class PowerBISuite extends FunSuite with BeforeAndAfterAll { 24 | 25 | val dataset = "PowerBI Spark Test" 26 | var datasetId: String = _ 27 | 28 | val table = "People" 29 | val tableSchema = Table( 30 | table, Seq( 31 | Column("name", "string"), 32 | Column("age", "Int64"), 33 | Column("birthday", "Datetime"), 34 | Column("timestamp", "Datetime") 35 | )) 36 | 37 | val group = sys.env.get("POWERBI_GROUP") 38 | val opts = { 39 | val _opts = Map("dataset" -> dataset, "table" -> table) 40 | 41 | group match { 42 | case Some(g) => _opts ++ Map("group" -> g) 43 | case None => _opts 44 | } 45 | } 46 | 47 | val conf = new SparkConf(). 48 | setAppName("spark-powerbi-test"). 49 | setMaster("local[1]"). 50 | set("spark.task.maxFailures", "1") 51 | val clientConf = ClientConf.fromSparkConf(new SparkConf()) 52 | val client: Client = new Client(clientConf) 53 | val sc: SparkContext = new SparkContext(conf) 54 | val sqlContext = new SQLContext(sc) 55 | 56 | import sqlContext.implicits._ 57 | 58 | override def beforeAll: Unit = { 59 | val groupId = group match { 60 | case Some(grp) => { 61 | val grpOpt = Await.result(client.getGroups, clientConf.timeout).filter(g => grp.equals(g.name)).map(_.id).headOption 62 | 63 | grpOpt match { 64 | case Some(g) => Some(g) 65 | case None => sys.error(s"group $grp not found") 66 | } 67 | } 68 | case None => None 69 | } 70 | 71 | val ds = Await.result(client.getDatasets(groupId), clientConf.timeout) 72 | 73 | datasetId = ds.filter(_.name == dataset).headOption match { 74 | case Some(d) => { 75 | Await.result(client.clearTable(d.id, table, groupId), clientConf.timeout) 76 | Await.result(client.updateTableSchema(d.id, table, tableSchema, groupId), clientConf.timeout) 77 | d.id 78 | } 79 | case None => { 80 | val result = Await.result(client.createDataset(Schema(dataset, Seq(tableSchema)), groupId), clientConf.timeout) 81 | result.id 82 | } 83 | } 84 | } 85 | 86 | override def afterAll: Unit = { 87 | sc.stop() 88 | } 89 | 90 | test("RDD saves to PowerBI") { 91 | val data = sc.parallelize(Seq(Person("Joe", 24, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)))) 92 | 93 | data.saveToPowerBI(dataset, table, group = group) 94 | } 95 | 96 | test(s"RDD with over ${clientConf.maxPartitions} partitions saves to PowerBI") { 97 | val data = sc.parallelize( 98 | 0 to clientConf.maxPartitions map { i => 99 | Person(s"Person$i", i, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)) 100 | }, 101 | clientConf.maxPartitions+1) 102 | 103 | data.saveToPowerBI(dataset, table, group = group) 104 | } 105 | 106 | test("RDD over batch size saves to PowerBI") { 107 | val data = sc.parallelize( 108 | 1 to clientConf.batchSize + 1 map { i => 109 | Person(s"Person$i", i, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)) 110 | } 111 | , 1) 112 | 113 | data.saveToPowerBI(dataset, table, group = group) 114 | } 115 | 116 | test("DataFrame saves with overwrite to PowerBI") { 117 | val data = sc.parallelize(Seq(Person("Joe", 24, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)))).toDF 118 | 119 | data. 120 | write. 121 | format("com.granturing.spark.powerbi"). 122 | options(opts). 123 | mode(SaveMode.Overwrite).save 124 | } 125 | 126 | test("DataFrame saves with append to PowerBI") { 127 | val data = sc.parallelize(Seq(Person("Joe", 24, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)))).toDF 128 | 129 | data. 130 | write. 131 | format("com.granturing.spark.powerbi"). 132 | options(opts). 133 | mode(SaveMode.Append).save 134 | } 135 | 136 | test("DataFrame save fails if exists") { 137 | val data = sc.parallelize(Seq(Person("Joe", 24, new java.sql.Date(1420088400000L), new java.sql.Timestamp(new java.util.Date().getTime)))).toDF 138 | 139 | val ex = intercept[RuntimeException] { 140 | data. 141 | write. 142 | format("com.granturing.spark.powerbi"). 143 | options(opts). 144 | mode(SaveMode.ErrorIfExists).save 145 | } 146 | 147 | assertResult(ex.getMessage())(s"table $table already exists") 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /scalastyle-config.xml: -------------------------------------------------------------------------------- 1 | 2 | Scalastyle standard configuration 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /src/main/scala/com/granturing/spark/powerbi/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.granturing.spark 15 | 16 | import java.util.Date 17 | import org.apache.spark.rdd.RDD 18 | import org.apache.spark.sql.SaveMode 19 | import org.apache.spark.sql.types._ 20 | import org.apache.spark.sql.types.BooleanType 21 | import org.apache.spark.sql.types.DateType 22 | import org.apache.spark.sql.types.StringType 23 | import org.apache.spark.sql.types.TimestampType 24 | import org.apache.spark.sql.types.{DataType, StructType} 25 | import org.apache.spark.streaming.dstream.DStream 26 | import scala.concurrent.{Future, Await, future} 27 | import scala.concurrent.ExecutionContext.Implicits._ 28 | import scala.concurrent.duration._ 29 | import scala.reflect.runtime.universe._ 30 | 31 | package object powerbi { 32 | 33 | private[powerbi] trait PowerBISink extends Serializable { 34 | 35 | protected def getGroupId(group: Option[String])(implicit client: Client): Future[Option[String]] = group match { 36 | case Some(grp) => { 37 | client.getGroups map { list => 38 | val grpOpt = list.filter(g => grp.equals(g.name)).map(_.id).headOption 39 | 40 | grpOpt match { 41 | case Some(g) => Some(g) 42 | case None => sys.error(s"group $grp not found") 43 | } 44 | } 45 | } 46 | case None => future(None) 47 | } 48 | 49 | protected def getOrCreateDataset[A <: Product: TypeTag]( 50 | groupId: Option[String], 51 | dataset: String, 52 | table: String, 53 | tag: TypeTag[A])(implicit client: Client): Future[Dataset] = { 54 | 55 | client.getDatasets(groupId) flatMap { list => 56 | val dsOpt = list.filter(d => dataset.equals(d.name)).headOption 57 | 58 | dsOpt match { 59 | case Some(d) => future(d) 60 | case None => client.createDataset(Schema(dataset, Seq(Table(table, schemaFor))), groupId, RetentionPolicy.BasicFIFO) 61 | } 62 | } 63 | 64 | } 65 | 66 | protected def getOrCreateDataset( 67 | mode: SaveMode, 68 | groupId: Option[String], 69 | dataset: String, 70 | table: String, 71 | schema: StructType)(implicit client: Client): Future[Dataset] = { 72 | 73 | client.getDatasets(groupId) flatMap { list => 74 | val dsOpt = list.filter(d => dataset.equals(d.name)).headOption 75 | 76 | (dsOpt, mode) match { 77 | case (Some(d), SaveMode.ErrorIfExists) => sys.error(s"table $table already exists") 78 | case (Some(d), SaveMode.Overwrite) => client.clearTable(d.id, table, groupId) map { _ => d } 79 | case (Some(d), _) => future(d) 80 | case (None, _) => client.createDataset(Schema(dataset, Seq(Table(table, schemaFor(schema)))), groupId, RetentionPolicy.BasicFIFO) 81 | } 82 | } 83 | 84 | } 85 | 86 | protected def schemaFor[A <: Product: TypeTag](implicit tag: TypeTag[A]) = { 87 | val values = tag.tpe.members.collect { case m:MethodSymbol if m.isCaseAccessor => m } 88 | val columns = values.map(v => Column(v.name.toString, typeToBIType(v.typeSignature.baseType(v.typeSignature.typeSymbol)))).toSeq 89 | 90 | columns 91 | } 92 | 93 | protected def schemaFor(schema: StructType) = { 94 | val columns = schema.fields.map(f => Column(f.name, sparkTypeToBIType(f.dataType))) 95 | 96 | columns 97 | } 98 | 99 | protected def sparkTypeToBIType(myType: DataType) = myType match { 100 | case ByteType | ShortType | IntegerType | LongType => "Int64" 101 | case FloatType | DoubleType => "Double" 102 | case _: DecimalType => "Double" 103 | case StringType => "String" 104 | case BooleanType => "Boolean" 105 | case TimestampType | DateType => "Datetime" 106 | case _ => throw new Exception(s"Unsupported type $myType") 107 | } 108 | 109 | // scalastyle:off cyclomatic.complexity 110 | protected def typeToBIType(myType: Type) = myType match { 111 | case t if t =:= typeOf[Byte] => "Int64" 112 | case t if t =:= typeOf[Short] => "Int64" 113 | case t if t =:= typeOf[Int] => "Int64" 114 | case t if t =:= typeOf[Integer] => "Int64" 115 | case t if t =:= typeOf[Long] => "Int64" 116 | case t if t =:= typeOf[Float] => "Double" 117 | case t if t =:= typeOf[Double] => "Double" 118 | case t if t =:= typeOf[String] => "String" 119 | case t if t =:= typeOf[Boolean] => "Boolean" 120 | case t if t =:= typeOf[Date] => "Datetime" 121 | case _ => throw new Exception(s"Unsupported type $myType") 122 | } 123 | 124 | } 125 | 126 | implicit class PowerBIDStream[A <: Product: TypeTag](stream: DStream[A]) extends PowerBISink { 127 | 128 | private val conf = ClientConf.fromSparkConf(stream.context.sparkContext.getConf) 129 | 130 | private implicit val client = new Client(conf) 131 | 132 | /** 133 | * Inserts data into a PowerBI table. If the dataset does not already exist it will be created 134 | * along with the specified table and schema based on the incoming data. Optionally clears existing 135 | * data in the table. 136 | * 137 | * @param dataset The dataset name in PowerBI 138 | * @param table The target table name 139 | * @param append Whether to append data or clear the table before inserting (default: true) 140 | * @param group Power BI group to use when performing operations (default: None) 141 | */ 142 | def saveToPowerBI(dataset: String, table: String, append: Boolean = true, group: Option[String] = None): Unit = { 143 | 144 | val step = for { 145 | groupId <- getGroupId(group) 146 | ds <- getOrCreateDataset(groupId, dataset, table, typeTag[A]) flatMap { d => 147 | append match { 148 | case true => future(d) 149 | case false => client.clearTable(d.id, table, groupId) map { _ => d} 150 | } 151 | } 152 | } yield (groupId, ds) 153 | 154 | val (groupId, ds) = Await.result(step, conf.timeout) // have to await here otherwise Spark won't see the foreachRDD below 155 | 156 | // we need local copies, workaround for TypeTag serialization issue (see: https://issues.scala-lang.org/browse/SI-5919) 157 | val _conf = conf 158 | val _token = Some(client.currentToken) 159 | val _table = table 160 | 161 | stream foreachRDD { rdd => 162 | 163 | val coalesced = rdd.partitions.size > _conf.maxPartitions match { 164 | case true => rdd.coalesce(_conf.maxPartitions) 165 | case false => rdd 166 | } 167 | 168 | coalesced foreachPartition { p => 169 | val _client = new Client(_conf, _token) 170 | val rows = p.toSeq.sliding(_conf.batchSize, _conf.batchSize) 171 | 172 | val submit = rows. 173 | foldLeft(future()) { (fAccum, batch) => 174 | fAccum flatMap { _ => _client.addRows(ds.id, _table, batch, groupId) } } 175 | 176 | submit.onComplete { _ => _client.shutdown() } 177 | 178 | Await.result(submit, _conf.timeout) 179 | } 180 | } 181 | } 182 | } 183 | 184 | implicit class PowerBIRDD[A <: Product : TypeTag](rdd: RDD[A]) extends PowerBISink { 185 | 186 | private val conf = ClientConf.fromSparkConf(rdd.sparkContext.getConf) 187 | 188 | private implicit val client = new Client(conf) 189 | 190 | /** 191 | * Inserts data into a PowerBI table. If the dataset does not already exist it will be created 192 | * along with the specified table and schema based on the incoming data. Optionally clears existing 193 | * data in the table. 194 | * 195 | * @param dataset The dataset name in PowerBI 196 | * @param table The target table name 197 | * @param append Whether to append data or clear the table before inserting (default: true) 198 | * @param group Power BI group to use when performing operations (default: None) 199 | */ 200 | def saveToPowerBI(dataset: String, table: String, append: Boolean = true, group: Option[String] = None): Unit = { 201 | 202 | val step = for { 203 | groupId <- getGroupId(group) 204 | ds <- getOrCreateDataset(groupId, dataset, table, typeTag[A]) flatMap { d => 205 | append match { 206 | case true => future(d) 207 | case false => client.clearTable(d.id, table, groupId) map { _ => d} 208 | } 209 | } 210 | } yield (groupId, ds) 211 | 212 | val result = step map { case (groupId, ds) => 213 | // we need local copies, workaround for TypeTag serialization issue (see: https://issues.scala-lang.org/browse/SI-5919) 214 | val _conf = conf 215 | val _token = Some(client.currentToken) 216 | val _table = table 217 | 218 | val coalesced = rdd.partitions.size > _conf.maxPartitions match { 219 | case true => rdd.coalesce(_conf.maxPartitions) 220 | case false => rdd 221 | } 222 | 223 | coalesced foreachPartition { p => 224 | val _client = new Client(_conf, _token) 225 | val rows = p.toSeq.sliding(_conf.batchSize, _conf.batchSize) 226 | 227 | val submit = rows. 228 | foldLeft(future()) { (fAccum, batch) => 229 | fAccum flatMap { _ => _client.addRows(ds.id, _table, batch, groupId) } 230 | } 231 | 232 | submit.onComplete { _ => _client.shutdown() } 233 | 234 | Await.result(submit, _conf.timeout) 235 | } 236 | } 237 | 238 | Await.result(result, Duration.Inf) 239 | } 240 | 241 | } 242 | 243 | } 244 | -------------------------------------------------------------------------------- /src/main/scala/com/granturing/spark/powerbi/Client.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.granturing.spark.powerbi 15 | 16 | import java.net.URLEncoder 17 | import java.text.SimpleDateFormat 18 | import java.util.concurrent.Executors 19 | import com.ning.http.client.{AsyncHttpClient, AsyncHttpClientConfig, Response} 20 | import dispatch._, Defaults._ 21 | import org.apache.spark.Logging 22 | import org.json4s.JsonAST.{JString, JNull} 23 | import org.json4s.jackson.JsonMethods._ 24 | import org.json4s.{NoTypeHints, CustomSerializer, JValue, DefaultFormats} 25 | import org.json4s.jackson.Serialization._ 26 | import scala.concurrent.Future 27 | 28 | object RetentionPolicy { 29 | sealed trait EnumVal 30 | 31 | case object None extends EnumVal { 32 | override def toString: String = "none" 33 | } 34 | 35 | case object BasicFIFO extends EnumVal { 36 | override def toString: String = "basicFIFO" 37 | } 38 | } 39 | 40 | case class Group(id: String, name: String) 41 | 42 | case class Dataset(id: String, name: String) 43 | 44 | case class Column(name: String, dataType: String) 45 | 46 | case class Table(name: String, columns: Seq[Column]) 47 | 48 | case class Schema(name: String, tables: Seq[Table]) 49 | 50 | private[powerbi] object PowerBIResult extends (Response => JValue) { 51 | 52 | implicit private val formats = DefaultFormats 53 | 54 | override def apply(response: Response): JValue = response.getStatusCode match { 55 | case x if (200 until 300 contains x) && response.hasResponseBody => parse(response.getResponseBody) 56 | case x if 200 until 300 contains x => JNull 57 | case _ if s"${response.getContentType}".startsWith("application/json") && response.hasResponseBody => { 58 | val json = parse(response.getResponseBody) 59 | val error = (json \ "error" \ "message").extract[String] 60 | 61 | throw new Exception(s"$error") 62 | } 63 | case _ if response.getResponseBody.size == 0 => throw new Exception(response.getStatusText) 64 | case _ => throw new Exception(response.getResponseBody) 65 | } 66 | } 67 | 68 | private class JavaSqlDateSerializer extends CustomSerializer[java.sql.Date](format => ( 69 | { 70 | case x: JString => new java.sql.Date(format.dateFormat.parse(x.values).get.getTime) 71 | }, 72 | { 73 | case x: java.sql.Date => JString(format.dateFormat.format(x)) 74 | } 75 | )) 76 | 77 | private class JavaSqlTimestampSerializer extends CustomSerializer[java.sql.Timestamp](format => ( 78 | { 79 | case x: JString => new java.sql.Timestamp(format.dateFormat.parse(x.values).get.getTime) 80 | }, 81 | { 82 | case x: java.sql.Timestamp => JString(format.dateFormat.format(x)) 83 | } 84 | )) 85 | 86 | /** 87 | * A very basic PowerBI client using the Scala Dispatch HTTP library. Requires that an app be registered 88 | * in your Azure Active Directory to allow access to your PowerBI service. 89 | * 90 | * @param conf a client configuration 91 | * @see [[com.granturing.spark.powerbi.ClientConf]] 92 | */ 93 | class Client(conf: ClientConf, initialToken: Option[String] = None) extends Logging { 94 | 95 | implicit private val formats = new DefaultFormats { 96 | override def dateFormatter: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") 97 | 98 | override val typeHints = NoTypeHints 99 | } ++ Seq(new JavaSqlDateSerializer, new JavaSqlTimestampSerializer) 100 | 101 | private val threadPool = Executors.newCachedThreadPool() 102 | 103 | private val httpConfig = new AsyncHttpClientConfig.Builder() 104 | .setExecutorService(threadPool) 105 | .setConnectionTimeoutInMs(conf.timeout.toMillis.toInt) 106 | .setRequestTimeoutInMs(conf.timeout.toMillis.toInt) 107 | .setCompressionEnabled(true) 108 | .build() 109 | 110 | @transient lazy private val http = new Http(new AsyncHttpClient(httpConfig)) 111 | 112 | private val token = new OAuthTokenHandler(conf, initialToken) 113 | 114 | private val oauth = new OAuthReq(token) 115 | 116 | private def getBaseUri(group: Option[String]) = group match { 117 | case Some(g) => s"${conf.uri}/groups/${URLEncoder.encode(g, "UTF-8")}" 118 | case None => conf.uri 119 | } 120 | 121 | /** 122 | * Gets the current OAuth token being used for authentication. 123 | * 124 | * @return an OAuth authorization token 125 | */ 126 | def currentToken: String = token() 127 | 128 | def getGroups: Future[List[Group]] = { 129 | val groups_req = url(conf.uri + "/groups") 130 | 131 | val request = http(oauth(groups_req) > PowerBIResult) 132 | 133 | val response = request map { json => (json \ "value").extract[List[Group]] } 134 | 135 | response 136 | } 137 | 138 | /** 139 | * Gets a list of datasets for the current account. 140 | * 141 | * @param group optional id of group 142 | * @return a list of datasets 143 | * @see [[com.granturing.spark.powerbi.Dataset]] 144 | */ 145 | def getDatasets(group: Option[String] = None): Future[List[Dataset]] = { 146 | val base_uri = getBaseUri(group) 147 | 148 | val datasets_req = url(s"$base_uri/datasets") 149 | 150 | val request = http(oauth(datasets_req) > PowerBIResult) 151 | 152 | val response = request map { json => (json \ "value").extract[List[Dataset]] } 153 | 154 | response 155 | } 156 | 157 | /** 158 | * Creates a new dataset with the specified schema 159 | * 160 | * @param schema a schema for the new dataset 161 | * @param group optional id of group 162 | * @param retentionPolicy data retention policy to use for dataset 163 | * @return a dataset object for the newly created dataset 164 | * @see [[com.granturing.spark.powerbi.Schema]] 165 | * @see [[com.granturing.spark.powerbi.Dataset]] 166 | */ 167 | def createDataset(schema: Schema, 168 | group: Option[String] = None, 169 | retentionPolicy: RetentionPolicy.EnumVal = RetentionPolicy.None): Future[Dataset] = { 170 | val body = write(schema) 171 | 172 | val base_uri = getBaseUri(group) 173 | 174 | val create_req = url(s"$base_uri/datasets?retentionPolicy=$retentionPolicy") 175 | .POST 176 | .setContentType("application/json", "UTF-8") << 177 | body 178 | 179 | val request = http(oauth(create_req) > PowerBIResult) 180 | 181 | val response = request map { json => json.extract[Dataset] } 182 | 183 | response 184 | } 185 | 186 | /** 187 | * Gets a list of tables for the specified dataset. 188 | * 189 | * @param dataset a dataset GUID 190 | * @param group optional id of group 191 | * @return a list of tables 192 | */ 193 | def getTables(dataset: String, group: Option[String] = None): Future[Seq[String]] = { 194 | val base_uri = getBaseUri(group) 195 | 196 | val tables_req = url(s"$base_uri/datasets/${URLEncoder.encode(dataset, "UTF-8")}/tables") 197 | 198 | val request = http(oauth(tables_req) > PowerBIResult) 199 | 200 | val response = request map { json => (json \ "value" \ "name").extract[Seq[String]] } 201 | 202 | response 203 | } 204 | 205 | /** 206 | * Updates the schema of an existing table 207 | * 208 | * @param dataset a dataset GUID 209 | * @param table the table name which to update 210 | * @param group optional id of group 211 | * @return a success or failure result 212 | */ 213 | def updateTableSchema(dataset: String, table: String, schema: Table, group: Option[String] = None): Future[Unit] = { 214 | val body = write(schema) 215 | 216 | val base_uri = getBaseUri(group) 217 | 218 | val add_req = url(s"$base_uri/datasets/${URLEncoder.encode(dataset, "UTF-8")}/tables/${URLEncoder.encode(table, "UTF-8")}") 219 | .PUT 220 | .setContentType("application/json", "UTF-8") << 221 | body 222 | 223 | val request = http(oauth(add_req) > PowerBIResult) 224 | 225 | request.map(_ => ()) 226 | } 227 | 228 | /** 229 | * Adds a collection of rows to the specified dataset and table. If the dataset or table 230 | * do not exist an error will be returned. 231 | * 232 | * @param dataset a dataset GUID 233 | * @param table a table name within the dataset 234 | * @param rows a sequence of JSON serializable objects with property names matching the schema 235 | * @param group optional id of group 236 | * @return a success or failure result 237 | */ 238 | def addRows(dataset: String, table: String, rows: Seq[_], group: Option[String] = None): Future[Unit] = { 239 | val body = write("rows" -> rows) 240 | 241 | val base_uri = getBaseUri(group) 242 | 243 | val add_req = url(s"$base_uri/datasets/${URLEncoder.encode(dataset, "UTF-8")}/tables/${URLEncoder.encode(table, "UTF-8")}/rows") 244 | .POST 245 | .setContentType("application/json", "UTF-8") << 246 | body 247 | 248 | val request = http(oauth(add_req) > PowerBIResult) 249 | 250 | request.map(_ => ()) 251 | } 252 | 253 | /** 254 | * Clears all rows in the specified table. 255 | * 256 | * @param dataset a dataset GUID 257 | * @param table a table name within the dataset 258 | * @param group optional id of group 259 | * @return a success or failure result 260 | */ 261 | def clearTable(dataset: String, table: String, group: Option[String] = None): Future[Unit] = { 262 | val base_uri = getBaseUri(group) 263 | 264 | val add_req = url(s"${base_uri}/datasets/${URLEncoder.encode(dataset, "UTF-8")}/tables/${URLEncoder.encode(table, "UTF-8")}/rows") 265 | .DELETE 266 | 267 | val request = http(oauth(add_req) > PowerBIResult) 268 | 269 | request.map(_ => ()) 270 | } 271 | 272 | def shutdown(): Unit = { 273 | http.shutdown() 274 | threadPool.shutdown() 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | --------------------------------------------------------------------------------