├── LICENSE ├── README.md ├── examples └── src │ ├── main │ └── java │ │ └── com │ │ └── zoho │ │ └── ml │ │ └── explainer │ │ └── client │ │ └── Client.java │ └── resources │ └── Iris.csv ├── pom.xml └── src └── main └── java └── com └── zoho └── ml └── explainer ├── .gitignore ├── Explainer.java ├── ExplainerParameters.java ├── ExplainerResults.java ├── ExplainerUtils.java └── SparkUtils.java /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Explainer 2 | 3 | One liner : A machine learning model explainer that works on top of Apache Spark! 4 | 5 | This project is inspired from the python version hosted here : https://github.com/marcotcr/lime , which is based on a paper mentioned here : https://arxiv.org/abs/1602.04938 6 | 7 | Today ML/AI is being used in mission critical applications. However, it is still difficult for a human being to trust a black-boxy ML algorithm. Wouldn’t it be cool if an algorithm could explain why it had predicted a particular result and strengthen it’s voice? Well, that is exactly what this project has achieved to do. 8 | 9 | We at ZOHOCorp, heavily use Apache Spark for our Machine Learning activities, and since this explainer should work very close to the actual ML engine, we thought of rewriting the explainer that makes the best use of Apache Spark APIs and also resides closer to the actual ML engine. We are using a forked version of this in production now. 10 | 11 | This project would be very useful for any ML practitioner using Apache Spark. Contributions welcome! 12 | -------------------------------------------------------------------------------- /examples/src/main/java/com/zoho/ml/explainer/client/Client.java: -------------------------------------------------------------------------------- 1 | package com.zoho.ml.explainer.client; 2 | 3 | import com.zoho.ml.explainer.Explainer; 4 | import com.zoho.ml.explainer.ExplainerParameters; 5 | import com.zoho.ml.explainer.ExplainerResults; 6 | 7 | 8 | 9 | public class Client { 10 | 11 | public static void main(String[] args) throws Exception { 12 | ExplainerParameters expParams = 13 | new ExplainerParameters().setDataPath("/data/Iris.csv").setColumnNameSpecified(true) 14 | .setNumClasses(3); 15 | Explainer exp = new Explainer(expParams); 16 | ExplainerResults expResult = exp.explain("5.4,3.4,1.7,0.2"); 17 | System.out.println(expResult.toString()); 18 | System.out.println(expResult.getPredictionProbabilities()); 19 | System.out.println(expResult.getFeatureWeights()); 20 | System.out.println(expResult.getFeatureValues()); 21 | } 22 | 23 | } 24 | -------------------------------------------------------------------------------- /examples/src/resources/Iris.csv: -------------------------------------------------------------------------------- 1 | Species,SepalLengthCm,SepalWidthCm,PetalLengthCm,PetalWidthCm 2 | 0,5.1,3.5,1.4,0.2 3 | 0,4.9,3,1.4,0.2 4 | 0,4.7,3.2,1.3,0.2 5 | 0,4.6,3.1,1.5,0.2 6 | 0,5,3.6,1.4,0.2 7 | 0,5.4,3.9,1.7,0.4 8 | 0,4.6,3.4,1.4,0.3 9 | 0,5,3.4,1.5,0.2 10 | 0,4.4,2.9,1.4,0.2 11 | 0,4.9,3.1,1.5,0.1 12 | 0,5.4,3.7,1.5,0.2 13 | 0,4.8,3.4,1.6,0.2 14 | 0,4.8,3,1.4,0.1 15 | 0,4.3,3,1.1,0.1 16 | 0,5.8,4,1.2,0.2 17 | 0,5.7,4.4,1.5,0.4 18 | 0,5.4,3.9,1.3,0.4 19 | 0,5.1,3.5,1.4,0.3 20 | 0,5.7,3.8,1.7,0.3 21 | 0,5.1,3.8,1.5,0.3 22 | 0,5.4,3.4,1.7,0.2 23 | 0,5.1,3.7,1.5,0.4 24 | 0,4.6,3.6,1,0.2 25 | 0,5.1,3.3,1.7,0.5 26 | 0,4.8,3.4,1.9,0.2 27 | 0,5,3,1.6,0.2 28 | 0,5,3.4,1.6,0.4 29 | 0,5.2,3.5,1.5,0.2 30 | 0,5.2,3.4,1.4,0.2 31 | 0,4.7,3.2,1.6,0.2 32 | 0,4.8,3.1,1.6,0.2 33 | 0,5.4,3.4,1.5,0.4 34 | 0,5.2,4.1,1.5,0.1 35 | 0,5.5,4.2,1.4,0.2 36 | 0,4.9,3.1,1.5,0.1 37 | 0,5,3.2,1.2,0.2 38 | 0,5.5,3.5,1.3,0.2 39 | 0,4.9,3.1,1.5,0.1 40 | 0,4.4,3,1.3,0.2 41 | 0,5.1,3.4,1.5,0.2 42 | 0,5,3.5,1.3,0.3 43 | 0,4.5,2.3,1.3,0.3 44 | 0,4.4,3.2,1.3,0.2 45 | 0,5,3.5,1.6,0.6 46 | 0,5.1,3.8,1.9,0.4 47 | 0,4.8,3,1.4,0.3 48 | 0,5.1,3.8,1.6,0.2 49 | 0,4.6,3.2,1.4,0.2 50 | 0,5.3,3.7,1.5,0.2 51 | 0,5,3.3,1.4,0.2 52 | 1,7,3.2,4.7,1.4 53 | 1,6.4,3.2,4.5,1.5 54 | 1,6.9,3.1,4.9,1.5 55 | 1,5.5,2.3,4,1.3 56 | 1,6.5,2.8,4.6,1.5 57 | 1,5.7,2.8,4.5,1.3 58 | 1,6.3,3.3,4.7,1.6 59 | 1,4.9,2.4,3.3,1 60 | 1,6.6,2.9,4.6,1.3 61 | 1,5.2,2.7,3.9,1.4 62 | 1,5,2,3.5,1 63 | 1,5.9,3,4.2,1.5 64 | 1,6,2.2,4,1 65 | 1,6.1,2.9,4.7,1.4 66 | 1,5.6,2.9,3.6,1.3 67 | 1,6.7,3.1,4.4,1.4 68 | 1,5.6,3,4.5,1.5 69 | 1,5.8,2.7,4.1,1 70 | 1,6.2,2.2,4.5,1.5 71 | 1,5.6,2.5,3.9,1.1 72 | 1,5.9,3.2,4.8,1.8 73 | 1,6.1,2.8,4,1.3 74 | 1,6.3,2.5,4.9,1.5 75 | 1,6.1,2.8,4.7,1.2 76 | 1,6.4,2.9,4.3,1.3 77 | 1,6.6,3,4.4,1.4 78 | 1,6.8,2.8,4.8,1.4 79 | 1,6.7,3,5,1.7 80 | 1,6,2.9,4.5,1.5 81 | 1,5.7,2.6,3.5,1 82 | 1,5.5,2.4,3.8,1.1 83 | 1,5.5,2.4,3.7,1 84 | 1,5.8,2.7,3.9,1.2 85 | 1,6,2.7,5.1,1.6 86 | 1,5.4,3,4.5,1.5 87 | 1,6,3.4,4.5,1.6 88 | 1,6.7,3.1,4.7,1.5 89 | 1,6.3,2.3,4.4,1.3 90 | 1,5.6,3,4.1,1.3 91 | 1,5.5,2.5,4,1.3 92 | 1,5.5,2.6,4.4,1.2 93 | 1,6.1,3,4.6,1.4 94 | 1,5.8,2.6,4,1.2 95 | 1,5,2.3,3.3,1 96 | 1,5.6,2.7,4.2,1.3 97 | 1,5.7,3,4.2,1.2 98 | 1,5.7,2.9,4.2,1.3 99 | 1,6.2,2.9,4.3,1.3 100 | 1,5.1,2.5,3,1.1 101 | 1,5.7,2.8,4.1,1.3 102 | 2,6.3,3.3,6,2.5 103 | 2,5.8,2.7,5.1,1.9 104 | 2,7.1,3,5.9,2.1 105 | 2,6.3,2.9,5.6,1.8 106 | 2,6.5,3,5.8,2.2 107 | 2,7.6,3,6.6,2.1 108 | 2,4.9,2.5,4.5,1.7 109 | 2,7.3,2.9,6.3,1.8 110 | 2,6.7,2.5,5.8,1.8 111 | 2,7.2,3.6,6.1,2.5 112 | 2,6.5,3.2,5.1,2 113 | 2,6.4,2.7,5.3,1.9 114 | 2,6.8,3,5.5,2.1 115 | 2,5.7,2.5,5,2 116 | 2,5.8,2.8,5.1,2.4 117 | 2,6.4,3.2,5.3,2.3 118 | 2,6.5,3,5.5,1.8 119 | 2,7.7,3.8,6.7,2.2 120 | 2,7.7,2.6,6.9,2.3 121 | 2,6,2.2,5,1.5 122 | 2,6.9,3.2,5.7,2.3 123 | 2,5.6,2.8,4.9,2 124 | 2,7.7,2.8,6.7,2 125 | 2,6.3,2.7,4.9,1.8 126 | 2,6.7,3.3,5.7,2.1 127 | 2,7.2,3.2,6,1.8 128 | 2,6.2,2.8,4.8,1.8 129 | 2,6.1,3,4.9,1.8 130 | 2,6.4,2.8,5.6,2.1 131 | 2,7.2,3,5.8,1.6 132 | 2,7.4,2.8,6.1,1.9 133 | 2,7.9,3.8,6.4,2 134 | 2,6.4,2.8,5.6,2.2 135 | 2,6.3,2.8,5.1,1.5 136 | 2,6.1,2.6,5.6,1.4 137 | 2,7.7,3,6.1,2.3 138 | 2,6.3,3.4,5.6,2.4 139 | 2,6.4,3.1,5.5,1.8 140 | 2,6,3,4.8,1.8 141 | 2,6.9,3.1,5.4,2.1 142 | 2,6.7,3.1,5.6,2.4 143 | 2,6.9,3.1,5.1,2.3 144 | 2,5.8,2.7,5.1,1.9 145 | 2,6.8,3.2,5.9,2.3 146 | 2,6.7,3.3,5.7,2.5 147 | 2,6.7,3,5.2,2.3 148 | 2,6.3,2.5,5,1.9 149 | 2,6.5,3,5.2,2 150 | 2,6.2,3.4,5.4,2.3 151 | 2,5.9,3,5.1,1.8 152 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | ZOHO-Labs 5 | Explainer 6 | 0.0.1-SNAPSHOT 7 | Explainer 8 | A machine learning model explainer that works on top of Apache Spark 9 | 10 | src 11 | install 12 | 13 | 14 | maven-compiler-plugin 15 | 3.1 16 | 17 | 1.8 18 | 1.8 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | org.apache.spark 27 | spark-core_2.10 28 | 1.5.2 29 | 30 | 31 | org.apache.spark 32 | spark-sql_2.10 33 | 1.5.2 34 | 35 | 36 | 37 | org.apache.spark 38 | spark-mllib_2.10 39 | 1.5.2 40 | provided 41 | 42 | 43 | 44 | 45 | 46 | 47 | org.apache.spark 48 | spark-core_2.10 49 | 50 | 51 | org.apache.spark 52 | spark-sql_2.10 53 | 54 | 55 | org.apache.spark 56 | spark-mllib_2.10 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /src/main/java/com/zoho/ml/explainer/.gitignore: -------------------------------------------------------------------------------- 1 | /Explainer.class 2 | /ExplainerParameters.class 3 | /ExplainerResults.class 4 | /ExplainerUtils.class 5 | /SparkUtils.class 6 | -------------------------------------------------------------------------------- /src/main/java/com/zoho/ml/explainer/Explainer.java: -------------------------------------------------------------------------------- 1 | package com.zoho.ml.explainer; 2 | 3 | import java.io.Serializable; 4 | import java.text.DecimalFormat; 5 | import java.util.ArrayList; 6 | import java.util.Arrays; 7 | import java.util.Collections; 8 | import java.util.HashMap; 9 | import java.util.LinkedHashMap; 10 | import java.util.List; 11 | import java.util.Map; 12 | import java.util.regex.Pattern; 13 | 14 | import org.apache.spark.api.java.JavaRDD; 15 | import org.apache.spark.api.java.function.Function; 16 | import org.apache.spark.mllib.linalg.Vector; 17 | import org.apache.spark.mllib.linalg.Vectors; 18 | import org.apache.spark.mllib.regression.LabeledPoint; 19 | import org.apache.spark.mllib.regression.RidgeRegressionModel; 20 | import org.apache.spark.mllib.regression.RidgeRegressionWithSGD; 21 | import org.apache.spark.mllib.tree.RandomForest; 22 | import org.apache.spark.mllib.tree.model.DecisionTreeModel; 23 | import org.apache.spark.mllib.tree.model.RandomForestModel; 24 | import org.apache.spark.sql.DataFrame; 25 | import org.apache.spark.sql.Row; 26 | 27 | public class Explainer implements Serializable { 28 | 29 | private static final long serialVersionUID = 1L; 30 | 31 | private final ExplainerParameters expParams; 32 | 33 | public Explainer(ExplainerParameters expParams) throws Exception { 34 | if (expParams == null) { 35 | throw new Exception("ExplainerParameters cannot be null. Specify the necessary parameters."); 36 | } 37 | this.expParams = expParams; 38 | } 39 | 40 | public ExplainerResults explain(String stringQuery) throws Exception { 41 | DataFrame inputData = 42 | SparkUtils.getInstance().getSQLContext().read().format("com.databricks.spark.csv") 43 | .option("inferSchema", "true").option("delimiter", this.expParams.getDelimiter()) 44 | .option("header", String.valueOf(this.expParams.isColumnNameSpecified())) 45 | .load(this.expParams.getDataPath()); 46 | 47 | final String impurity = this.expParams.getImpurity(); 48 | final int maxDepth = this.expParams.getMaxDepth(); 49 | final int maxBins = this.expParams.getMaxBins(); 50 | final int numTrees = this.expParams.getNumTrees(); 51 | final int seed = this.expParams.getSeed(); 52 | final int numClasses = this.expParams.getNumClasses(); 53 | final String featureSubsetStrategy = this.expParams.getFeatureSubsetStrategy(); 54 | final String delimiter = Pattern.quote(this.expParams.getDelimiter()); 55 | 56 | Map categoricalFeaturesInfo = new HashMap(); 57 | 58 | JavaRDD javardd = inputData.toJavaRDD().map(new Function() { 59 | private static final long serialVersionUID = 1L; 60 | 61 | public String call(Row row) { 62 | return row.mkString(expParams.getDelimiter()); 63 | } 64 | }); 65 | 66 | JavaRDD data = ExplainerUtils.convertRDDStringToLabeledPoint(javardd, delimiter); 67 | 68 | final RandomForestModel model = 69 | RandomForest.trainClassifier(data, numClasses, categoricalFeaturesInfo, numTrees, 70 | featureSubsetStrategy, impurity, maxDepth, maxBins, seed); 71 | 72 | String splitter[] = stringQuery.split(delimiter); 73 | double[] features = new double[splitter.length]; 74 | for (int i = 0; i < splitter.length; i++) { 75 | splitter[i] = splitter[i].trim(); 76 | if (splitter[i].isEmpty()) { 77 | throw new Exception(this.getClass() + " : Value missing in " + i 78 | + " column in the given query \"" + stringQuery + "\""); 79 | } 80 | features[i] = Double.parseDouble(splitter[i].trim()); 81 | } 82 | Vector featureVector = Vectors.dense(features); 83 | double labelToBeExplained = model.predict(featureVector); 84 | 85 | ExplainerResults expResult = explainerImpl(model, stringQuery, labelToBeExplained, inputData); 86 | return expResult; 87 | } 88 | 89 | private ExplainerResults explainerImpl(RandomForestModel model, String stringQuery, 90 | double labelToBeExplained, DataFrame inputData) { 91 | 92 | // Input Parameters 93 | // Number of samples 94 | final int numSamples = this.expParams.getNumSamples(); 95 | // Number of iterations used in ridge reg when 'discretize = false' 96 | final int numIterations = this.expParams.getNumberOfIterations(); 97 | // Step size used in ridge reg when 'discretize = false 98 | final double stepSize = this.expParams.getStepSize(); 99 | // Percentiles for discretization 100 | final int[] percentileValues = this.expParams.getPercentileValues(); 101 | final String delimiter = Pattern.quote(this.expParams.getDelimiter()); 102 | 103 | // Removing first column if column name is specified 104 | String[] actualColNames = inputData.columns(); 105 | StringBuilder builder; 106 | if (this.expParams.isColumnNameSpecified()) { 107 | for (int i = 0; i < actualColNames.length; i++) { 108 | builder = new StringBuilder("C").append(i); 109 | inputData = inputData.withColumnRenamed(actualColNames[i], builder.toString()); 110 | } 111 | } 112 | 113 | // Categorical columns 114 | List categoricalColumns = this.expParams.getCategoricalColumns(); 115 | List categoricalList = new ArrayList(); 116 | if (!(categoricalColumns == null || categoricalColumns.isEmpty())) { 117 | categoricalList.addAll(categoricalColumns); 118 | } 119 | Collections.sort(categoricalList); 120 | 121 | // Removing Label 122 | String[] colNames = inputData.columns(); 123 | String label = colNames[0]; 124 | DataFrame featuresDataFrame = inputData.drop(label); 125 | String[] columns = featuresDataFrame.columns(); 126 | int numColumns = columns.length; 127 | List> featuresList = ExplainerUtils.dataframeToList(featuresDataFrame); 128 | 129 | // Number of features needed for explanation 130 | int numFeatures = this.expParams.getNumberOfFeatures(); 131 | if (numFeatures <= 0 || numFeatures > numColumns) { 132 | numFeatures = numColumns; 133 | } 134 | 135 | // Preparing Query 136 | List> listQuery; 137 | DataFrame dataFrameQuery; 138 | 139 | List queryList = new ArrayList<>(); 140 | String[] querySplit = stringQuery.split(delimiter); 141 | for (String s : querySplit) { 142 | queryList.add(Double.valueOf(s.trim())); 143 | } 144 | listQuery = ExplainerUtils.getSubLists(queryList, 1); 145 | dataFrameQuery = ExplainerUtils.dataframeFromList(listQuery, this.expParams.getDelimiter()); 146 | 147 | // Continuous and categorical split 148 | DataFrame continuousFeatures = featuresDataFrame; 149 | DataFrame categoricalFeatures; 150 | String[] categoricalNames, continuousNames; 151 | int n = 0; 152 | categoricalNames = new String[categoricalList.size()]; 153 | for (Integer col : categoricalList) { 154 | categoricalNames[n] = inputData.columns()[col]; 155 | continuousFeatures = continuousFeatures.drop(categoricalNames[n++]); 156 | } 157 | continuousNames = continuousFeatures.columns(); 158 | categoricalFeatures = featuresDataFrame.selectExpr(categoricalNames); 159 | 160 | // Discretization 161 | List> discretizedContinuous = new ArrayList<>(); 162 | List> listDiscretized, listContinuous, queryDiscretized; 163 | listContinuous = ExplainerUtils.dataframeToList(continuousFeatures); 164 | if (this.expParams.isDiscretized() && (listContinuous.size() != 0)) { 165 | List> listPercentiles = 166 | ExplainerUtils.calculatePercentiles(listContinuous, percentileValues); 167 | discretizedContinuous = ExplainerUtils.discretize(listContinuous, listPercentiles); 168 | listDiscretized = 169 | ExplainerUtils.replaceContinuousSamples(featuresList, discretizedContinuous, 170 | categoricalList); 171 | List> queryContinuous = 172 | ExplainerUtils.constructListWithColumnNames(dataFrameQuery, continuousNames); 173 | queryDiscretized = 174 | ExplainerUtils.replaceContinuousSamples(listQuery, 175 | ExplainerUtils.discretize(queryContinuous, listPercentiles), categoricalList); 176 | } else { 177 | listDiscretized = ExplainerUtils.dataframeToList(categoricalFeatures); // listCategorical 178 | queryDiscretized = 179 | ExplainerUtils.constructListWithColumnNames(dataFrameQuery, categoricalNames); // queryCategorical 180 | } 181 | 182 | // Values and Probabilities 183 | List> listProbabilities = new ArrayList>(); 184 | List> listValues = new ArrayList>(); 185 | long numRows = inputData.count(); 186 | for (List list : listDiscretized) { 187 | List distinctValues = new ArrayList(); 188 | List probability = new ArrayList(); 189 | for (Double d : list) { 190 | if (!distinctValues.contains(d)) { 191 | distinctValues.add(d); 192 | probability.add(Double.valueOf(Collections.frequency(list, d)) / numRows); 193 | } 194 | } 195 | listValues.add(distinctValues); 196 | listProbabilities.add(probability); 197 | } 198 | 199 | // Sampling Using Probabilities for Categorical Columns 200 | List> listSamples = new ArrayList<>(); 201 | for (int i = 0; i < listProbabilities.size(); i++) { 202 | listSamples.add(ExplainerUtils.weightedSamplingWithReplacement(listValues.get(i), 203 | listProbabilities.get(i), numSamples)); 204 | } 205 | 206 | // Binary column used in calculations 207 | List> listBinary; 208 | List binary = new ArrayList<>(); 209 | for (int i = 0; i < listSamples.size(); i++) { 210 | for (int j = 0; j < numSamples; j++) { 211 | if ((j == 0) || (listSamples.get(i).get(j).equals(queryDiscretized.get(i).get(0)))) { 212 | binary.add(1.0); 213 | } else { 214 | binary.add(0.0); 215 | } 216 | } 217 | } 218 | listBinary = ExplainerUtils.getSubLists(binary, numSamples); 219 | 220 | // Finding mean and standard deviation 221 | List listMean = ExplainerUtils.findMean(featuresList); 222 | List listStdDev = ExplainerUtils.findStdDev(featuresList); 223 | 224 | // Random Sampling for Continuous Columns 225 | if (!(this.expParams.isDiscretized())) { 226 | List> continuousSamples = new ArrayList<>(); 227 | for (int i = 0; i < featuresList.size(); i++) { 228 | continuousSamples.add(ExplainerUtils.randomSamplingFromNormal(listMean.get(i), 229 | listStdDev.get(i), numSamples)); 230 | } 231 | listSamples = 232 | ExplainerUtils.replaceCategoricalSamples(continuousSamples, listSamples, categoricalList); 233 | listBinary = 234 | ExplainerUtils.replaceCategoricalSamples(continuousSamples, listBinary, categoricalList); 235 | } 236 | 237 | // Undiscretize if discretized 238 | List> undiscretizedSamples; 239 | if ((this.expParams.isDiscretized()) && (listContinuous.size() != 0)) { 240 | List> sampleLists = 241 | ExplainerUtils.constructListWithColumnNames( 242 | ExplainerUtils.dataframeFromList(listSamples, this.expParams.getDelimiter()), 243 | continuousNames); 244 | undiscretizedSamples = 245 | ExplainerUtils.replaceContinuousSamples(listSamples, ExplainerUtils.undiscretize( 246 | discretizedContinuous, listContinuous, sampleLists, percentileValues), 247 | categoricalList); 248 | } else { 249 | undiscretizedSamples = listSamples; 250 | } 251 | 252 | // Replacing the first row of the undiscretized samples with the query 253 | for (int i = 0; i < undiscretizedSamples.size(); i++) { 254 | undiscretizedSamples.get(i).set(0, listQuery.get(i).get(0)); 255 | } 256 | 257 | // Altering means and standard deviations for categorical columns 258 | if ((this.expParams.isDiscretized()) && (listContinuous.size() != 0)) { 259 | for (int i = 0; i < listMean.size(); i++) { 260 | listMean.set(i, 0.0); 261 | listStdDev.set(i, 1.0); 262 | } 263 | } else { 264 | int l = 0; 265 | for (int i = 0; i < listMean.size(); i++) { 266 | if (categoricalList.size() != 0) { 267 | if (i == (categoricalList.get(l) - 1)) { 268 | listMean.set(i, 0.0); 269 | listStdDev.set(i, 1.0); 270 | if (l < (categoricalList.size() - 1)) { 271 | l++; 272 | } 273 | } 274 | } 275 | } 276 | } 277 | 278 | // Scaling 279 | List> listScaled = new ArrayList<>(); 280 | List scaled; 281 | Double mean; 282 | Double std; 283 | int k = 0; 284 | for (List list : listBinary) { 285 | scaled = new ArrayList<>(); 286 | mean = listMean.get(k); 287 | std = listStdDev.get(k); 288 | for (int i = 0; i < list.size(); i++) { 289 | scaled.add((list.get(i) - mean) / std); 290 | } 291 | listScaled.add(scaled); 292 | k++; 293 | } 294 | 295 | // Euclidean distance calculation 296 | List euclideanDistance = new ArrayList<>(); 297 | DataFrame scaledDataFrame = 298 | ExplainerUtils.dataframeFromList(listScaled, this.expParams.getDelimiter()); 299 | Row[] binaryRows = scaledDataFrame.collect(); 300 | double sum; 301 | for (int i = 0; i < binaryRows.length; i++) { 302 | sum = 0; 303 | for (int j = 0; j < numColumns; j++) { 304 | sum += 305 | Math.pow((Double.valueOf(binaryRows[0].get(j).toString()) - Double 306 | .valueOf(binaryRows[i].get(j).toString())), 2); 307 | } 308 | euclideanDistance.add(Math.sqrt(sum)); 309 | } 310 | 311 | // Calculating Weights 312 | List listWeights = new ArrayList<>(); 313 | double kernelWidth = Math.sqrt(numColumns) * 0.75; 314 | for (int i = 0; i < euclideanDistance.size(); i++) { 315 | listWeights.add(Math.sqrt(Math.exp(-(Math.pow(euclideanDistance.get(i), 2)) 316 | / Math.pow((kernelWidth), 2)))); 317 | } 318 | 319 | // Class Names 320 | List classNames = new ArrayList<>(); 321 | Row[] distinct = inputData.select(label).distinct().collect(); 322 | for (Row name : distinct) { 323 | classNames.add(name.get(0).toString()); 324 | } 325 | 326 | // Feature Names 327 | List featureNames = new ArrayList<>(); 328 | if (this.expParams.isColumnNameSpecified()) { 329 | featureNames.addAll(Arrays.asList(actualColNames)); 330 | featureNames.remove(0); 331 | } else { 332 | for (int i = 0; i < numColumns; i++) { 333 | builder = new StringBuilder("F").append(i + 1); 334 | featureNames.add(builder.toString()); 335 | } 336 | } 337 | 338 | List names = new ArrayList<>(); 339 | if ((this.expParams.isDiscretized()) && (listContinuous.size() != 0)) { 340 | List> listPercentiles = 341 | ExplainerUtils.calculatePercentiles(listContinuous, percentileValues); 342 | String name; 343 | for (int i = 0; i < continuousNames.length; i++) { 344 | name = 345 | featureNames 346 | .get(Integer.valueOf(continuousNames[i].replaceAll("[^0-9]", "").trim()) - 1); 347 | builder = new StringBuilder(name).append("<=").append(listPercentiles.get(i).get(0)); 348 | names.add(builder.toString()); 349 | for (int j = 0; j < (listPercentiles.get(i).size() - 1); j++) { 350 | builder = 351 | new StringBuilder().append(listPercentiles.get(i).get(j)).append("<").append(name) 352 | .append("<=").append(listPercentiles.get(i).get(j + 1)); 353 | names.add(builder.toString()); 354 | } 355 | builder = 356 | new StringBuilder(name).append(">").append( 357 | listPercentiles.get(i).get((listPercentiles.get(i).size()) - 1)); 358 | names.add(builder.toString()); 359 | } 360 | } else { 361 | for (int i = 0; i < continuousNames.length; i++) { 362 | for (int j = 0; j < (percentileValues.length + 1); j++) { 363 | names.add(featureNames.get(Integer.valueOf(continuousNames[i].replaceAll("[^0-9]", "") 364 | .trim()) - 1)); 365 | } 366 | } 367 | } 368 | 369 | List> listNames = new ArrayList>(); 370 | for (int i = 0; i < names.size(); i += (percentileValues.length + 1)) { 371 | listNames.add(new ArrayList(names.subList(i, 372 | Math.min(i + (percentileValues.length + 1), names.size())))); 373 | } 374 | 375 | // Feature Names Categorical 376 | List features = new ArrayList<>(featureNames); 377 | int y = 0; 378 | for (int i = 0; i < numColumns; i++) { 379 | if (categoricalList.size() != 0) { 380 | if (i == (categoricalList.get(y) - 1)) { 381 | builder = 382 | new StringBuilder(features.get(i)).append("=").append( 383 | listQuery.get(i).get(0).intValue()); 384 | features.set(i, builder.toString()); 385 | if (y < (categoricalList.size() - 1)) { 386 | y++; 387 | } 388 | } 389 | } 390 | } 391 | 392 | // Feature Names discretized 393 | List transformedFeatureNames = new ArrayList<>(); 394 | List featureValues = new ArrayList<>(); 395 | int u = 0; 396 | int v = 0; 397 | for (int i = 0; i < numColumns; i++) { 398 | if (categoricalList.size() != 0) { 399 | if (i == (categoricalList.get(v) - 1)) { 400 | transformedFeatureNames.add(features.get(i)); 401 | featureValues.add("True"); 402 | if (v < ((categoricalList.size()) - 1)) { 403 | v++; 404 | } 405 | } else { 406 | if ((this.expParams.isDiscretized()) && (listContinuous.size() != 0)) { 407 | transformedFeatureNames.add(listNames.get(u).get( 408 | queryDiscretized.get(i).get(0).intValue())); 409 | } else { 410 | transformedFeatureNames.add(listNames.get(u).get(0)); 411 | } 412 | featureValues.add(listQuery.get(i).get(0).toString()); 413 | if (u < (listNames.size() - 1)) { 414 | u++; 415 | } 416 | } 417 | } else { 418 | if (this.expParams.isDiscretized() && (listContinuous.size() != 0)) { 419 | transformedFeatureNames.add(listNames.get(i).get( 420 | queryDiscretized.get(i).get(0).intValue())); 421 | } else { 422 | transformedFeatureNames.add(listNames.get(i).get(0)); 423 | } 424 | featureValues.add(listQuery.get(i).get(0).toString()); 425 | } 426 | } 427 | 428 | // Class Probabilities 429 | List> predictProbability = new ArrayList<>(); 430 | List inverselist = 431 | ExplainerUtils.getAppendedList(undiscretizedSamples, this.expParams.getDelimiter()); 432 | List queryProbability = new ArrayList<>(); 433 | List probabilityValues; 434 | 435 | Map> probabilityMap = 436 | new LinkedHashMap>(); 437 | Map probMap; 438 | DecimalFormat twoDForm = new DecimalFormat("#.######"); 439 | org.apache.spark.mllib.linalg.Vector featureVector; 440 | DecisionTreeModel[] trees; 441 | double[] featuresArr; 442 | String[] splitter; 443 | String prediction; 444 | for (int i = 0; i < inverselist.size(); i++) { 445 | probMap = new LinkedHashMap<>(); 446 | splitter = inverselist.get(i).split(delimiter); 447 | featuresArr = new double[splitter.length]; 448 | for (int j = 0; j < splitter.length; j++) { 449 | featuresArr[j] = Double.parseDouble(splitter[j].trim()); 450 | } 451 | featureVector = Vectors.dense(featuresArr); 452 | trees = model.trees(); 453 | for (DecisionTreeModel tree : trees) { 454 | prediction = String.valueOf(tree.predict(featureVector)); 455 | if (probMap.containsKey(prediction)) { 456 | String value = 457 | twoDForm.format(Double.parseDouble(probMap.get(prediction)) + (1.00f / trees.length)); 458 | probMap.put(String.valueOf(prediction), value); 459 | } else { 460 | probMap.put(String.valueOf(prediction), 461 | String.valueOf(Double.valueOf(twoDForm.format(1.00f / trees.length)))); 462 | } 463 | for (int t = 0; t < classNames.size(); t++) { 464 | if (!probMap.keySet().contains(String.valueOf(Double.valueOf(classNames.get(t))))) { 465 | probMap.put(String.valueOf(Double.valueOf(classNames.get(t))), "0"); 466 | } 467 | } 468 | } 469 | probabilityMap.put(String.valueOf(i), probMap); 470 | } 471 | 472 | for (int t = 0; t < classNames.size(); t++) { 473 | probabilityValues = new ArrayList<>(); 474 | for (String s : probabilityMap.keySet()) { 475 | probabilityValues.add(Double.valueOf(probabilityMap.get(s).get( 476 | String.valueOf(Double.valueOf(classNames.get(t)))))); 477 | } 478 | queryProbability.add(probabilityValues.get(0)); 479 | predictProbability.add(probabilityValues); 480 | } 481 | 482 | // Weighted data and label 483 | List> wtdLabels = 484 | ExplainerUtils.dataWithSampleWeights(listWeights, predictProbability, 485 | this.expParams.getDelimiter()); 486 | List> wtdData = 487 | ExplainerUtils 488 | .dataWithSampleWeights(listWeights, listBinary, this.expParams.getDelimiter()); 489 | List appendedData = new ArrayList<>(); 490 | List labelNeeded = new ArrayList<>(); 491 | List appendedDataPoints; 492 | String str1; 493 | for (int i = 0; i < (wtdData.size()); i++) { 494 | appendedDataPoints = new ArrayList<>(); 495 | for (int j = 0; j < (wtdData.get(0).size() + 1); j++) { 496 | if (j == 0) { 497 | appendedDataPoints.add(wtdLabels.get(i).get((int) (labelToBeExplained))); 498 | labelNeeded.add(wtdLabels.get(i).get((int) (labelToBeExplained))); 499 | } else { 500 | appendedDataPoints.add(wtdData.get(i).get(j - 1)); 501 | } 502 | } 503 | str1 = appendedDataPoints.toString(); 504 | appendedData.add(str1.substring(str1.indexOf("[") + 1, str1.lastIndexOf("]"))); 505 | } 506 | 507 | List> appendedColumnlists = new ArrayList<>(); 508 | List appendedColumnDataPoints; 509 | String str2 = appendedData.toString(); 510 | String[] strArray; 511 | strArray = str2.substring(str2.indexOf("[") + 1, str2.lastIndexOf("]")).split(","); 512 | for (int i = 0; i < (numColumns + 1); i++) { 513 | appendedColumnDataPoints = new ArrayList<>(); 514 | for (int j = i; j < strArray.length; j = (j + numColumns + 1)) { 515 | appendedColumnDataPoints.add(Double.valueOf(strArray[j])); 516 | } 517 | appendedColumnlists.add(appendedColumnDataPoints); 518 | } 519 | 520 | List> appendedDataWithoutlabel = new ArrayList<>(appendedColumnlists); 521 | appendedDataWithoutlabel.remove(0); 522 | 523 | // Weights of each feature using ridge 524 | RidgeRegressionWithSGD obj; 525 | final RidgeRegressionModel ridgemodel; 526 | final Double[] weights; 527 | double[] featureWeights; 528 | Integer[] featureNos; 529 | if (this.expParams.isDiscretized()) { 530 | obj = new RidgeRegressionWithSGD(); 531 | } else { 532 | obj = new RidgeRegressionWithSGD(stepSize, numIterations, 0.1, 1.0); 533 | } 534 | obj.setIntercept(true); 535 | JavaRDD data = SparkUtils.getInstance().getJavaSparkContext().parallelize(appendedData); 536 | ridgemodel = obj.run(ExplainerUtils.convertRDDStringToLabeledPoint(data, ",").rdd()); 537 | featureWeights = ridgemodel.weights().toArray(); 538 | weights = new Double[featureWeights.length]; 539 | for (int i = 0; i < featureWeights.length; i++) { 540 | weights[i] = featureWeights[i]; 541 | } 542 | featureNos = new Integer[featureWeights.length]; 543 | for (int i = 0; i < featureNos.length; i++) { 544 | featureNos[i] = i; 545 | } 546 | Arrays.sort(featureNos, (wts1, wts2) -> weights[wts2].compareTo(weights[wts1])); 547 | 548 | // Prediction probabilities 549 | Map classMap = new LinkedHashMap(); 550 | for (int i = 0; i < classNames.size(); i++) { 551 | builder = new StringBuilder("'").append(classNames.get(i)).append("'"); 552 | classMap.put(builder.toString(), String.valueOf(queryProbability.get(i))); 553 | } 554 | 555 | // Feature probabilities 556 | Map featureMap = new LinkedHashMap(); 557 | for (int i = 0; i < numFeatures; i++) { 558 | builder = 559 | new StringBuilder("'").append(transformedFeatureNames.get(featureNos[i])).append("'"); 560 | featureMap.put(builder.toString(), String.valueOf(featureWeights[featureNos[i]])); 561 | } 562 | 563 | // Feature values 564 | Map featureQuery = new LinkedHashMap(); 565 | for (int i = 0; i < numFeatures; i++) { 566 | builder = new StringBuilder("'").append(features.get(featureNos[i])).append("'"); 567 | featureQuery.put(builder.toString(), featureValues.get(featureNos[i])); 568 | } 569 | 570 | ExplainerResults expResult = new ExplainerResults(); 571 | expResult.setPredictionProbabilities(classMap); 572 | expResult.setFeatureWeights(featureMap); 573 | expResult.setFeatureValues(featureQuery); 574 | return expResult; 575 | } 576 | 577 | } 578 | -------------------------------------------------------------------------------- /src/main/java/com/zoho/ml/explainer/ExplainerParameters.java: -------------------------------------------------------------------------------- 1 | package com.zoho.ml.explainer; 2 | 3 | import java.io.Serializable; 4 | import java.util.Arrays; 5 | import java.util.List; 6 | 7 | public class ExplainerParameters implements Serializable { 8 | 9 | private static final long serialVersionUID = 1L; 10 | private int numSamples = 5000; 11 | private int numFeatures = 0; 12 | private int numIterations = 10; 13 | private double stepSize = 0.001; 14 | private String delimiter = ","; 15 | private String dataPath = null; 16 | private boolean discretize = true; 17 | private boolean columnNameSpecified = false; 18 | private int[] percentileValues = {25, 50, 75}; 19 | private List categoricalColumns = null; 20 | 21 | // Classifier Parameters 22 | private String impurity = "gini"; 23 | private int maxDepth = 5; 24 | private int maxBins = 32; 25 | private int numClasses = 2; 26 | private int numTrees = 10; 27 | private int seed = 12345; 28 | private int minPartitions = 4; 29 | private String featureSubsetStrategy = "auto"; 30 | 31 | public boolean isDiscretized() { 32 | return discretize; 33 | } 34 | 35 | public ExplainerParameters setDiscretize(boolean discretize) { 36 | this.discretize = discretize; 37 | return this; 38 | } 39 | 40 | public int getNumSamples() { 41 | return this.numSamples; 42 | } 43 | 44 | public ExplainerParameters setNumSamples(int numSamples) { 45 | this.numSamples = numSamples; 46 | return this; 47 | } 48 | 49 | public int[] getPercentileValues() { 50 | return this.percentileValues; 51 | } 52 | 53 | public ExplainerParameters setPercentileValues(int[] percentileValues) { 54 | this.percentileValues = percentileValues; 55 | return this; 56 | } 57 | 58 | public ExplainerParameters setNumberOfFeatures(int numFeatures) throws Exception { 59 | if (numFeatures < 0) { 60 | throw new Exception(this.getClass() + " : Number of Features cannot be Negative"); 61 | } 62 | this.numFeatures = numFeatures; 63 | return this; 64 | } 65 | 66 | public int getNumberOfFeatures() { 67 | return this.numFeatures; 68 | } 69 | 70 | public ExplainerParameters setNumberOfIterations(int numIterations) throws Exception { 71 | if (numIterations < 0) { 72 | throw new Exception(this.getClass() + " : Number of Iterations cannot be Negative"); 73 | } 74 | this.numIterations = numIterations; 75 | return this; 76 | } 77 | 78 | public int getNumberOfIterations() { 79 | return this.numIterations; 80 | } 81 | 82 | public ExplainerParameters setStepSize(double stepSize) throws Exception { 83 | if (stepSize < 0) { 84 | throw new Exception(this.getClass() + " : Step Size cannot be Negative"); 85 | } 86 | this.stepSize = stepSize; 87 | return this; 88 | } 89 | 90 | public double getStepSize() { 91 | return this.stepSize; 92 | } 93 | 94 | public ExplainerParameters setDelimiter(String delimiter) { 95 | this.delimiter = delimiter; 96 | return this; 97 | } 98 | 99 | public String getDelimiter() { 100 | return this.delimiter; 101 | } 102 | 103 | public ExplainerParameters setCategoricalColumns(List columns) { 104 | this.categoricalColumns = columns; 105 | return this; 106 | } 107 | 108 | public List getCategoricalColumns() { 109 | return this.categoricalColumns; 110 | } 111 | 112 | public ExplainerParameters setColumnNameSpecified(boolean columnNameSpecified) { 113 | this.columnNameSpecified = columnNameSpecified; 114 | return this; 115 | } 116 | 117 | public boolean isColumnNameSpecified() { 118 | return this.columnNameSpecified; 119 | } 120 | 121 | public String getDataPath() { 122 | return this.dataPath; 123 | } 124 | 125 | public ExplainerParameters setDataPath(String dataPath) throws Exception { 126 | dataPath = dataPath != null ? dataPath.trim() : ""; 127 | if (dataPath.equals("")) { 128 | throw new Exception(this.getClass() + " : DataPath cannot be null."); 129 | } 130 | this.dataPath = dataPath; 131 | return this; 132 | } 133 | 134 | public int getNumTrees() { 135 | return this.numTrees; 136 | } 137 | 138 | public ExplainerParameters setNumTrees(int numTrees) throws Exception { 139 | if (numTrees < 0) { 140 | throw new Exception(this.getClass() 141 | + " : Trees count is less than 0. Count should be greater than 0."); 142 | } else { 143 | this.numTrees = numTrees; 144 | } 145 | return this; 146 | } 147 | 148 | public int getSeed() { 149 | return this.seed; 150 | } 151 | 152 | public ExplainerParameters setSeed(int seed) throws Exception { 153 | this.seed = seed; 154 | return this; 155 | } 156 | 157 | public String getFeatureSubsetStrategy() { 158 | return this.featureSubsetStrategy; 159 | } 160 | 161 | public ExplainerParameters setFeatureSubsetStrategy(String featureSubsetStrategy) 162 | throws Exception { 163 | List supportedStrategies = Arrays.asList("auto", "all", "sqrt", "log2", "onethird"); 164 | if ((featureSubsetStrategy == null) 165 | || !supportedStrategies.contains(featureSubsetStrategy.toLowerCase())) { 166 | throw new Exception( 167 | this.getClass() 168 | + " : Feature Subset Strategy is either null or not supported. Supported strategies are all/sqrt/log2/onethird/auto(recommended)."); 169 | } else { 170 | this.featureSubsetStrategy = featureSubsetStrategy.toLowerCase(); 171 | } 172 | return this; 173 | } 174 | 175 | public String getImpurity() { 176 | return this.impurity; 177 | } 178 | 179 | public ExplainerParameters setImpurity(String impurity) throws Exception { 180 | List supportedImpurities = Arrays.asList("entropy", "gini"); 181 | if (impurity == null || !supportedImpurities.contains(impurity.toLowerCase())) { 182 | throw new Exception( 183 | this.getClass() 184 | + " : Impurity is either null or not supported. Supported Impurities gini(Recommended) or entropy."); 185 | } else { 186 | this.impurity = impurity.toLowerCase(); 187 | } 188 | return this; 189 | } 190 | 191 | public int getMaxDepth() { 192 | return this.maxDepth; 193 | } 194 | 195 | public ExplainerParameters setMaxDepth(int maxDepth) throws Exception { 196 | if (maxDepth < 0 || maxDepth > 30) { 197 | throw new Exception(this.getClass() + " : Depth is not within the range. Depth range : 0-30."); 198 | } else { 199 | this.maxDepth = maxDepth; 200 | } 201 | return this; 202 | } 203 | 204 | public int getMaxBins() { 205 | return this.maxBins; 206 | } 207 | 208 | public ExplainerParameters setMaxBins(int maxBins) throws Exception { 209 | if (maxBins < 2) { 210 | throw new Exception(this.getClass() 211 | + " : Bins value is less than 2. The value should be >= 2."); 212 | } else { 213 | this.maxBins = maxBins; 214 | } 215 | return this; 216 | } 217 | 218 | public int getNumClasses() { 219 | return this.numClasses; 220 | } 221 | 222 | public ExplainerParameters setNumClasses(int numClasses) throws Exception { 223 | if (numClasses < 2) { 224 | throw new Exception(this.getClass() 225 | + " : Number of classes is less than 2. Value should be >= 2."); 226 | } else { 227 | this.numClasses = numClasses; 228 | } 229 | return this; 230 | } 231 | 232 | public int getMinPartitions() { 233 | 234 | return this.minPartitions; 235 | } 236 | 237 | public ExplainerParameters setMinPartitions(int minPartitions) throws Exception { 238 | if (minPartitions < 4) { 239 | throw new Exception(this.getClass() 240 | + " : Minimum number of Partitions is less than 4. Value should be >= 4"); 241 | } else { 242 | this.minPartitions = minPartitions; 243 | } 244 | return this; 245 | } 246 | 247 | } 248 | -------------------------------------------------------------------------------- /src/main/java/com/zoho/ml/explainer/ExplainerResults.java: -------------------------------------------------------------------------------- 1 | package com.zoho.ml.explainer; 2 | 3 | import java.util.Map; 4 | 5 | public class ExplainerResults { 6 | 7 | private Map predictionProbabilities; 8 | private Map featureWeights; 9 | private Map featureValues; 10 | 11 | public Map getPredictionProbabilities() { 12 | return predictionProbabilities; 13 | } 14 | 15 | public void setPredictionProbabilities(Map predictionProbabilities) { 16 | this.predictionProbabilities = predictionProbabilities; 17 | } 18 | 19 | public Map getFeatureWeights() { 20 | return featureWeights; 21 | } 22 | 23 | public void setFeatureWeights(Map featureWeights) { 24 | this.featureWeights = featureWeights; 25 | } 26 | 27 | public Map getFeatureValues() { 28 | return featureValues; 29 | } 30 | 31 | public void setFeatureValues(Map featureValues) { 32 | this.featureValues = featureValues; 33 | } 34 | 35 | @Override 36 | public String toString() { 37 | StringBuilder builder = 38 | new StringBuilder("PREDICTION PROBABILITIES" + " : " + predictionProbabilities + "\n"); 39 | builder.append("FEATURE WEIGHTS" + " : " + featureWeights + "\n"); 40 | builder.append("FEATURE VALUES" + " : " + featureValues); 41 | return builder.toString(); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/com/zoho/ml/explainer/ExplainerUtils.java: -------------------------------------------------------------------------------- 1 | package com.zoho.ml.explainer; 2 | 3 | import java.security.SecureRandom; 4 | import java.util.ArrayList; 5 | import java.util.Arrays; 6 | import java.util.Collections; 7 | import java.util.LinkedList; 8 | import java.util.List; 9 | import java.util.regex.Pattern; 10 | 11 | import org.apache.commons.math3.stat.StatUtils; 12 | import org.apache.commons.math3.stat.descriptive.rank.Percentile; 13 | import org.apache.commons.math3.util.FastMath; 14 | import org.apache.spark.api.java.JavaRDD; 15 | import org.apache.spark.api.java.function.Function; 16 | import org.apache.spark.mllib.linalg.Vectors; 17 | import org.apache.spark.mllib.regression.LabeledPoint; 18 | import org.apache.spark.sql.DataFrame; 19 | import org.apache.spark.sql.Row; 20 | import org.apache.spark.sql.RowFactory; 21 | import org.apache.spark.sql.types.DataTypes; 22 | import org.apache.spark.sql.types.Metadata; 23 | import org.apache.spark.sql.types.StructField; 24 | import org.apache.spark.sql.types.StructType; 25 | 26 | public class ExplainerUtils { 27 | 28 | public static List> discretize(List> list, 29 | List> percentileList) { 30 | 31 | List discrete = new ArrayList<>(); 32 | int numRows = list.get(0).size(); 33 | int in = 0; 34 | double[] a; 35 | for (int j = 0; j < list.size(); j++) { 36 | a = ExplainerUtils.listToDoubleArray(percentileList.get(j)); 37 | for (int i = 0; i < numRows; i++) { 38 | in = Arrays.binarySearch(a, list.get(j).get(i)); 39 | if (in < 0) { 40 | in = (-in) - 1; 41 | } 42 | discrete.add(Double.valueOf(in)); 43 | } 44 | } 45 | return ExplainerUtils.getSubLists(discrete, numRows); 46 | 47 | } 48 | 49 | public static List> undiscretize(List> discretizedContinuous, 50 | List> continuousList, List> sampleList, int[] percentileValues) { 51 | 52 | List selection, min, max, val; 53 | double[] array; 54 | SecureRandom random; 55 | int s; 56 | 57 | List mean = new ArrayList<>(); 58 | List std = new ArrayList<>(); 59 | for (int q = 0; q < discretizedContinuous.size(); q++) { 60 | for (int i = 0; i < (percentileValues.length + 1); i++) { 61 | selection = new ArrayList<>(); 62 | for (int j = 0; j < discretizedContinuous.get(0).size(); j++) { 63 | if (i == discretizedContinuous.get(q).get(j)) { 64 | selection.add(continuousList.get(q).get(j)); 65 | } 66 | } 67 | if (selection.size() != 0) { 68 | array = listToDoubleArray(selection); 69 | mean.add(StatUtils.mean(array)); 70 | std.add(Math.sqrt(StatUtils.variance(array))); 71 | } else { 72 | mean.add(0.0); 73 | std.add(0.0); 74 | } 75 | } 76 | } 77 | 78 | List> listPercentiles = calculatePercentiles(continuousList, percentileValues); 79 | List> listBoundaries = calculateBoundaries(continuousList); 80 | List> minLists = new ArrayList<>(); 81 | List> maxLists = new ArrayList<>(); 82 | for (int i = 0; i < continuousList.size(); i++) { 83 | min = new ArrayList<>(); 84 | min.add(listBoundaries.get(i).get(0)); 85 | for (int j = 0; j < percentileValues.length; j++) { 86 | min.add(listPercentiles.get(i).get(j)); 87 | } 88 | minLists.add(min); 89 | } 90 | for (int i = 0; i < continuousList.size(); i++) { 91 | max = new ArrayList<>(); 92 | for (int j = 0; j < percentileValues.length; j++) { 93 | max.add(listPercentiles.get(i).get(j)); 94 | } 95 | max.add(listBoundaries.get(i).get(1)); 96 | maxLists.add(max); 97 | } 98 | 99 | List> meanLists = getSubLists(mean, (percentileValues.length + 1)); 100 | List> stdLists = getSubLists(std, (percentileValues.length + 1)); 101 | List undiscretized = new ArrayList<>(); 102 | for (int i = 0; i < sampleList.size(); i++) { 103 | for (int j = 0; j < sampleList.get(0).size(); j++) { 104 | s = sampleList.get(i).get(j).intValue(); 105 | random = new SecureRandom(); 106 | val = new ArrayList<>(); 107 | val.add(minLists.get(i).get(s)); 108 | val.add(Math.round((random.nextGaussian() * stdLists.get(i).get(s)) 109 | + meanLists.get(i).get(s) * 100.0) / 100.0); 110 | val.add(maxLists.get(i).get(s)); 111 | undiscretized.add(Collections.max(val)); 112 | } 113 | } 114 | return getSubLists(undiscretized, sampleList.get(0).size()); 115 | 116 | } 117 | 118 | public static List> dataWithSampleWeights(List weights, 119 | List> weightLists, String delimiter) { 120 | 121 | List weightedRows, wtdDatapoints; 122 | double weightedDatapoints; 123 | int i; 124 | Row[] rows = dataframeFromList(weightLists, delimiter).collect(); 125 | 126 | List avgList = new ArrayList<>(); 127 | for (i = 0; i < weightLists.size(); i++) { 128 | avgList.add(0.0); 129 | } 130 | 131 | List> weightedData = new ArrayList<>(); 132 | for (i = 0; i < weights.size(); i++) { 133 | weightedRows = new ArrayList<>(); 134 | for (int j = 0; j < weightLists.size(); j++) { 135 | weightedDatapoints = Double.valueOf(rows[i].get(j).toString()) * (weights.get(i)); 136 | weightedRows.add(weightedDatapoints); 137 | avgList.set(j, (avgList.get(j) + weightedDatapoints)); 138 | } 139 | weightedData.add(weightedRows); 140 | } 141 | 142 | double sumOfWeights = 0.0; 143 | for (i = 0; i < weights.size(); i++) { 144 | sumOfWeights = sumOfWeights + weights.get(i); 145 | } 146 | for (i = 0; i < avgList.size(); i++) { 147 | avgList.set(i, avgList.get(i) / sumOfWeights); 148 | } 149 | 150 | List sqrtWts = new ArrayList<>(); 151 | for (i = 0; i < weights.size(); i++) { 152 | sqrtWts.add(Math.sqrt(weights.get(i))); 153 | } 154 | 155 | List> wtdData = new ArrayList<>(); 156 | for (i = 0; i < weightLists.get(0).size(); i++) { 157 | wtdDatapoints = new ArrayList<>(); 158 | for (int j = 0; j < weightLists.size(); j++) { 159 | wtdDatapoints.add((weightLists.get(j).get(i) - avgList.get(j)) * sqrtWts.get(i)); 160 | } 161 | wtdData.add(wtdDatapoints); 162 | } 163 | return wtdData; 164 | 165 | } 166 | 167 | public static List> replaceContinuousSamples(List> list, 168 | List> newList, List categoricalFeatures) { 169 | 170 | int l = 0; 171 | int k = 0; 172 | 173 | List> replacedList = new ArrayList<>(); 174 | for (int i = 0; i < list.size(); i++) { 175 | if (categoricalFeatures.size() == 0) { 176 | replacedList.add(newList.get(i)); 177 | } else { 178 | if (i == (categoricalFeatures.get(l) - 1)) { 179 | replacedList.add(list.get(i)); 180 | if (l < (categoricalFeatures.size() - 1)) { 181 | l++; 182 | } 183 | } else { 184 | replacedList.add(newList.get(k)); 185 | k++; 186 | } 187 | } 188 | } 189 | return replacedList; 190 | 191 | } 192 | 193 | public static List> replaceCategoricalSamples(List> list, 194 | List> newList, List categoricalFeatures) { 195 | 196 | int l = 0; 197 | 198 | List> replacedList = new ArrayList<>(); 199 | for (int i = 0; i < list.size(); i++) { 200 | if (categoricalFeatures.size() == 0) { 201 | replacedList.add(list.get(i)); 202 | } else { 203 | if (i == (categoricalFeatures.get(l) - 1)) { 204 | replacedList.add(newList.get(l)); 205 | if (l < (categoricalFeatures.size() - 1)) { 206 | l++; 207 | } 208 | } else { 209 | replacedList.add(list.get(i)); 210 | } 211 | } 212 | } 213 | return replacedList; 214 | 215 | } 216 | 217 | public static List findMean(List> list) { 218 | 219 | List mean = new ArrayList<>(); 220 | for (List l : list) { 221 | mean.add(StatUtils.mean(listToDoubleArray(l))); 222 | } 223 | return mean; 224 | 225 | } 226 | 227 | public static List findStdDev(List> list) { 228 | 229 | List stdDev = new ArrayList<>(); 230 | for (List l : list) { 231 | stdDev.add(FastMath.sqrt(StatUtils.variance(listToDoubleArray(l)))); 232 | } 233 | return stdDev; 234 | 235 | } 236 | 237 | public static List> calculatePercentiles(List> list, int[] percentiles) { 238 | 239 | double value = 0.0; 240 | 241 | List percentile = new ArrayList<>(); 242 | for (List l : list) { 243 | for (int p : percentiles) { 244 | value = new Percentile().evaluate(listToDoubleArray(l), p); 245 | value = Math.round(value * 100.0) / 100.0; 246 | percentile.add(value); 247 | } 248 | } 249 | return getSubLists(percentile, percentiles.length); 250 | 251 | } 252 | 253 | public static List> calculateBoundaries(List> list) { 254 | 255 | List boundaries = new ArrayList<>(); 256 | for (List l : list) { 257 | boundaries.add(Math.round(Collections.min(l) * 100.0) / 100.0); 258 | boundaries.add(Math.round(Collections.max(l) * 100.0) / 100.0); 259 | } 260 | return getSubLists(boundaries, 2); 261 | 262 | } 263 | 264 | public static List randomSamplingFromNormal(double meanValue, double stdValue, 265 | int numberOfSamples) { 266 | 267 | SecureRandom random = new SecureRandom(); 268 | List randomSamples = new ArrayList(); 269 | for (int j = 0; j < numberOfSamples; j++) { 270 | randomSamples.add((random.nextGaussian() * stdValue) + meanValue); 271 | } 272 | return randomSamples; 273 | 274 | } 275 | 276 | public static List weightedSamplingWithReplacement(List values, 277 | List weights, int numberOfSamples) { 278 | 279 | int minIndex, maxIndex, sampleIndex; 280 | int size = values.size(); 281 | // Calculating the next power of two 282 | int power = (int) Math.ceil(Math.log((double) size) / Math.log(2)); 283 | int numPartitions = (int) Math.pow(2, power); 284 | double minValue, maxValue, remainingCapacity, sample, w, elementWeight; 285 | double capacity = 1.00 / numPartitions; 286 | List partition, samplePartition; 287 | SecureRandom random = new SecureRandom(); 288 | 289 | List> listPartitions = new ArrayList<>(numPartitions); 290 | for (int i = 0; i < numPartitions; i++) { 291 | partition = new ArrayList(); 292 | minValue = Collections.min(weights); 293 | minIndex = weights.indexOf(minValue); 294 | partition.add((double) minIndex); 295 | if (minValue >= capacity) { 296 | weights.set(minIndex, (minValue - capacity)); 297 | partition.add(-1.00); 298 | partition.add(1.00); 299 | } else { 300 | remainingCapacity = capacity - minValue; 301 | maxValue = Collections.max(weights); 302 | maxIndex = weights.indexOf(maxValue); 303 | partition.add((double) maxIndex); 304 | partition.add(minValue / capacity); 305 | weights.set(minIndex, 0.0); 306 | weights.set(maxIndex, (maxValue - remainingCapacity)); 307 | } 308 | listPartitions.add(partition); 309 | } 310 | 311 | List samples = new ArrayList<>(); 312 | for (int i = 0; i < numberOfSamples; i++) { 313 | sample = random.nextDouble() * numPartitions; 314 | sampleIndex = (int) sample; 315 | w = sample - sampleIndex; 316 | samplePartition = listPartitions.get(sampleIndex); 317 | elementWeight = samplePartition.get(2); 318 | if (w <= elementWeight) { 319 | samples.add(values.get(samplePartition.get(0).intValue())); 320 | } else { 321 | samples.add(values.get(samplePartition.get(1).intValue())); 322 | } 323 | } 324 | return samples; 325 | 326 | } 327 | 328 | public static JavaRDD convertRDDStringToLabeledPoint(JavaRDD data, 329 | final String delimiter) { 330 | JavaRDD labeledPointData = data.map(new Function() { 331 | private static final long serialVersionUID = 1L; 332 | 333 | public LabeledPoint call(String data) throws Exception { 334 | String splitter[] = data.split(delimiter); 335 | double[] array = new double[splitter.length - 1]; 336 | for (int i = 0; i < array.length; i++) { 337 | try { 338 | array[i] = Double.parseDouble(splitter[i + 1]); 339 | } catch (Exception e) { 340 | throw new Exception(this.getClass() + " Cannot convert \"" + splitter[i + 1] 341 | + "\" to double"); 342 | } 343 | } 344 | return new LabeledPoint(Double.parseDouble(splitter[0]), Vectors.dense(array)); 345 | } 346 | }); 347 | return labeledPointData; 348 | } 349 | 350 | public static List> constructListWithColumnNames(DataFrame dataframe, 351 | String[] columnNames) { 352 | 353 | List l; 354 | Row[] rows; 355 | 356 | List> list = new ArrayList<>(); 357 | for (String name : columnNames) { 358 | l = new ArrayList<>(); 359 | rows = dataframe.select(name).collect(); 360 | for (Row r : rows) { 361 | l.add(Double.valueOf(r.get(0).toString())); 362 | } 363 | list.add(l); 364 | } 365 | return list; 366 | 367 | } 368 | 369 | public static List getAppendedList(List> list, String delimiter) { 370 | 371 | StringBuilder builder; 372 | String str; 373 | 374 | List appendedList = new ArrayList(); 375 | for (int i = 0; i < list.get(0).size(); i++) { 376 | builder = new StringBuilder(); 377 | str = ""; 378 | for (int j = 0; j < list.size(); j++) { 379 | builder.append(list.get(j).get(i)).append(delimiter); 380 | } 381 | str = builder.toString(); 382 | appendedList.add(str.substring(0, str.lastIndexOf(delimiter))); 383 | } 384 | return appendedList; 385 | 386 | } 387 | 388 | public static DataFrame dataframeFromList(List> list, String delimiter) { 389 | 390 | JavaRDD data = 391 | SparkUtils.getInstance().getJavaSparkContext() 392 | .parallelize(ExplainerUtils.getAppendedList(list, delimiter)); 393 | JavaRDD rawData = data.map(new Function() { 394 | private static final long serialVersionUID = 1L; 395 | 396 | public Row call(String data) { 397 | Row newRow = RowFactory.create(data); 398 | Object[] colArray = (String[]) newRow.getString(0).split(Pattern.quote(delimiter)); 399 | return RowFactory.create(colArray); 400 | } 401 | }); 402 | StructField[] structField = new StructField[rawData.first().size()]; 403 | StringBuilder builder; 404 | for (int i = 0; i < structField.length; i++) { 405 | builder = new StringBuilder("C").append(i + 1); 406 | structField[i] = 407 | new StructField(builder.toString(), DataTypes.StringType, false, Metadata.empty()); 408 | } 409 | StructType schema = new StructType(structField); 410 | return (SparkUtils.getInstance().getSQLContext().createDataFrame(rawData, schema)); 411 | 412 | } 413 | 414 | public static List> getSubLists(List list, int size) { 415 | 416 | List> listOfLists = new LinkedList>(); 417 | for (int i = 0; i < list.size(); i += size) { 418 | listOfLists.add(new ArrayList(list.subList(i, Math.min(i + size, list.size())))); 419 | } 420 | return listOfLists; 421 | 422 | } 423 | 424 | public static double[] listToDoubleArray(List list) { 425 | 426 | double[] array = new double[list.size()]; 427 | for (int i = 0; i < array.length; i++) { 428 | array[i] = list.get(i).doubleValue(); 429 | } 430 | return array; 431 | 432 | } 433 | 434 | public static List> dataframeToList(DataFrame dataframe) { 435 | 436 | List column; 437 | Row[] rows; 438 | 439 | List> listOfColumns = new ArrayList<>(); 440 | for (String s : dataframe.columns()) { 441 | column = new ArrayList<>(); 442 | rows = dataframe.select(s).collect(); 443 | for (Row r : rows) { 444 | column.add(Double.valueOf(r.get(0).toString())); 445 | } 446 | listOfColumns.add(column); 447 | } 448 | return listOfColumns; 449 | 450 | } 451 | } 452 | -------------------------------------------------------------------------------- /src/main/java/com/zoho/ml/explainer/SparkUtils.java: -------------------------------------------------------------------------------- 1 | package com.zoho.ml.explainer; 2 | 3 | import org.apache.spark.SparkConf; 4 | import org.apache.spark.api.java.JavaSparkContext; 5 | import org.apache.spark.sql.SQLContext; 6 | 7 | public final class SparkUtils { 8 | 9 | private static volatile SparkUtils instance; 10 | private static JavaSparkContext jsc; 11 | private static SQLContext sqlContext; 12 | 13 | private SparkUtils() { 14 | if (instance != null) { 15 | throw new IllegalStateException("Already initialized."); 16 | } 17 | } 18 | 19 | public static SparkUtils getInstance() { 20 | SparkUtils result = instance; 21 | if (result == null) { 22 | synchronized (SparkUtils.class) { 23 | result = instance; 24 | if (result == null) { 25 | instance = result = new SparkUtils(); 26 | SparkConf sparkConf = new SparkConf().setMaster("local").setAppName("Explainer"); 27 | jsc = new JavaSparkContext(sparkConf); 28 | sqlContext = new SQLContext(jsc); 29 | } 30 | } 31 | } 32 | return result; 33 | } 34 | 35 | public SQLContext getSQLContext() { 36 | return sqlContext; 37 | } 38 | 39 | public JavaSparkContext getJavaSparkContext() { 40 | return jsc; 41 | } 42 | } 43 | --------------------------------------------------------------------------------