├── .gitignore ├── CONTRIBUTORS ├── src ├── test │ ├── resources │ │ ├── report1.csv │ │ └── report.csv │ └── java │ │ └── io │ │ └── druid │ │ └── embedded │ │ └── EmbeddedDruidTest.java └── main │ └── java │ └── io │ └── druid │ └── embedded │ ├── load │ ├── Loader.java │ └── impl │ │ └── CSVLoader.java │ ├── Utils.java │ ├── IndexHelper.java │ └── QueryHelper.java ├── CONTRIBUTING ├── pom.xml ├── README.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | target/ 3 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | Ruchir Shah 2 | Mahesh Somani 3 | Venkatraman Sankar 4 | Ramachandran Ramesh 5 | -------------------------------------------------------------------------------- /src/test/resources/report1.csv: -------------------------------------------------------------------------------- 1 | 1234,JB,Abc,Male,SF,CharsAdded,1290 2 | 1235,JB,Xyz,Female,SJ,CharsAdded,3421 3 | 2356,AB,owe,Male,LA,CharsAdded,123 4 | 2345,AB,sfr,Female,LV,CharsAdded,123 5 | -------------------------------------------------------------------------------- /src/test/resources/report.csv: -------------------------------------------------------------------------------- 1 | slc,r1cart,URLTransaction,abc,1234,Duration,123,5,0,123,123 2 | phx,r1cart,URLTransaction,abc,1235,Duration,124,7,0,124,124 3 | slc,r1cart,URLTransaction,def,1234,Duration,123,3,0,123,123 4 | -------------------------------------------------------------------------------- /CONTRIBUTING: -------------------------------------------------------------------------------- 1 | Contributing to embedded-druid 2 | 3 | Contributions via GitHub pull requests are gladly accepted from their original author. 4 | Along with any pull requests, please state that the contribution is your original work and that you license the work to the project under the project's open source license. Whether or not you state this explicitly, by submitting any copyrighted material via pull request, email, or other means you agree to license the material under the project's open source license and warrant that you have the legal authority to do so. 5 | -------------------------------------------------------------------------------- /src/main/java/io/druid/embedded/load/Loader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 eBay Software Foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.druid.embedded.load; 18 | 19 | import java.util.List; 20 | 21 | import io.druid.data.input.InputRow; 22 | import io.druid.data.input.impl.DimensionSchema; 23 | 24 | /** 25 | * This abstract class is interface for loading data of various formats. Possible implementation can be 26 | * CSV, XML, JSON etc. Implementation class needs to provide iterator implementation. 27 | * 28 | */ 29 | public abstract class Loader implements Iterable { 30 | protected List columns; 31 | protected List dimensions; 32 | protected String timestampDimension; 33 | 34 | public Loader(List cols, List dims, String ts) { 35 | this.columns = cols; 36 | this.dimensions = dims; 37 | this.timestampDimension = ts; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | io.druid 5 | embedded-druid 6 | 1.0 7 | Embedded Druid 8 | 9 | 0.9.1 10 | 1.2.16 11 | 12 | 13 | 14 | 15 | io.druid 16 | druid-processing 17 | ${druid.version} 18 | 19 | 20 | io.druid.extensions 21 | druid-histogram 22 | ${druid.version} 23 | 24 | 25 | log4j 26 | log4j 27 | ${log4j.version} 28 | 29 | 30 | org.testng 31 | testng 32 | 6.8.8 33 | test 34 | 35 | 36 | 37 | 38 | 39 | 40 | org.apache.maven.plugins 41 | maven-compiler-plugin 42 | 3.1 43 | 44 | 1.7 45 | 1.7 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | pub-libs 54 | pub-libs-local 55 | https://metamx.artifactoryonline.com/metamx/pub-libs-releases-local 56 | 57 | 58 | thirdparty-uploads 59 | JBoss Thirdparty Uploads 60 | https://repository.jboss.org/nexus/content/repositories/thirdparty-uploads 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /src/main/java/io/druid/embedded/Utils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 eBay Software Foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.druid.embedded; 18 | 19 | import io.druid.collections.StupidPool; 20 | import io.druid.query.IntervalChunkingQueryRunnerDecorator; 21 | import io.druid.query.Query; 22 | import io.druid.query.QueryRunner; 23 | import io.druid.query.QueryToolChest; 24 | import io.druid.query.QueryWatcher; 25 | 26 | import java.nio.ByteBuffer; 27 | import java.util.Map; 28 | 29 | import com.google.common.base.Supplier; 30 | import com.google.common.util.concurrent.ListenableFuture; 31 | import com.metamx.common.guava.Sequence; 32 | import com.metamx.emitter.core.Event; 33 | import com.metamx.emitter.service.ServiceEmitter; 34 | 35 | public class Utils { 36 | public static final int MAX_TOTAL_BUFFER_SIZE = 1024*1024*1024; 37 | 38 | private static class ByteBufferSuplier implements Supplier { 39 | int capacity; 40 | 41 | public ByteBufferSuplier(int capacity) { 42 | this.capacity = capacity; 43 | } 44 | 45 | public ByteBuffer get() { 46 | return ByteBuffer.allocate(capacity); 47 | } 48 | } 49 | 50 | public static ServiceEmitter NOOP_SERVICE_EMITTER = new ServiceEmitter(null, null, null) { 51 | @Override 52 | public void emit(Event event) {} 53 | }; 54 | 55 | public static final QueryWatcher NOOP_QUERYWATCHER = new QueryWatcher() { 56 | @SuppressWarnings("rawtypes") 57 | public void registerQuery(Query query, ListenableFuture future) {} 58 | }; 59 | 60 | public static StupidPool getBufferPool() { 61 | return new StupidPool(new ByteBufferSuplier(MAX_TOTAL_BUFFER_SIZE / 2)); 62 | } 63 | 64 | public static IntervalChunkingQueryRunnerDecorator NoopIntervalChunkingQueryRunnerDecorator() { 65 | return new IntervalChunkingQueryRunnerDecorator(null, null, null) { 66 | @Override 67 | public QueryRunner decorate(final QueryRunner delegate, 68 | QueryToolChest> toolChest) { 69 | return new QueryRunner() { 70 | public Sequence run(Query query, Map responseContext) { 71 | return delegate.run(query, responseContext); 72 | } 73 | }; 74 | } 75 | }; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/main/java/io/druid/embedded/IndexHelper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 eBay Software Foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.druid.embedded; 18 | 19 | import java.io.File; 20 | import java.io.IOException; 21 | 22 | import com.fasterxml.jackson.databind.ObjectMapper; 23 | 24 | import io.druid.data.input.InputRow; 25 | import io.druid.query.aggregation.histogram.ApproximateHistogramFoldingSerde; 26 | import io.druid.segment.IndexIO; 27 | import io.druid.segment.IndexMerger; 28 | import io.druid.segment.IndexSpec; 29 | import io.druid.segment.QueryableIndex; 30 | import io.druid.segment.column.ColumnConfig; 31 | import io.druid.segment.incremental.IncrementalIndex; 32 | import io.druid.segment.incremental.IncrementalIndexSchema; 33 | import io.druid.segment.incremental.OnheapIncrementalIndex; 34 | import io.druid.segment.serde.ComplexMetrics; 35 | import io.druid.embedded.load.Loader; 36 | import io.druid.jackson.DefaultObjectMapper; 37 | 38 | /** 39 | * This is a Helper class which reads content of file and generates required index/segment files and persist it. 40 | * It also provides queryable index object after loading file into memory. 41 | * 42 | */ 43 | public class IndexHelper { 44 | 45 | /** 46 | * Initialization (handled by Guice in Druid system) 47 | */ 48 | static { 49 | ApproximateHistogramFoldingSerde serde = new ApproximateHistogramFoldingSerde(); 50 | ComplexMetrics.registerSerde(serde.getTypeName(), serde); 51 | } 52 | static ObjectMapper objectMapper = new DefaultObjectMapper(); 53 | static ColumnConfig columnConfig = new ColumnConfig() { 54 | 55 | @Override 56 | public int columnCacheSizeBytes() { 57 | // TODO Auto-generated method stub 58 | return 0; 59 | } 60 | }; 61 | static IndexIO indexIO = new IndexIO(objectMapper, columnConfig ); 62 | static IndexMerger merger = new IndexMerger(objectMapper, indexIO); 63 | 64 | /** 65 | * The only way to get a QueryableIndex from IncrementalIndex is to persist the IncrementalIndex 66 | * and reload it. This methods does that. 67 | * 68 | * @param loader 69 | * @param aggregates 70 | * @return 71 | * @throws IOException 72 | */ 73 | public static QueryableIndex getQueryableIndex(Loader loader, IncrementalIndexSchema indexSchema) 74 | throws IOException { 75 | // IncrementalIndex incIndex = 76 | // new OffheapIncrementalIndex(indexSchema, Utils.getBufferPool(), true, maxTotalBufferSize); 77 | IncrementalIndex incIndex = new OnheapIncrementalIndex(indexSchema, true ,Integer.MAX_VALUE); 78 | 79 | for (InputRow row : loader) { 80 | incIndex.add(row); 81 | } 82 | String tmpDir = System.getProperty("druid.segment.dir"); 83 | if(tmpDir == null) { 84 | tmpDir = System.getProperty("java.io.tmpdir") + File.separator + "druid-tmp-index-"; 85 | } 86 | File tmpIndexDir = new File(tmpDir + loader.hashCode()); 87 | 88 | merger.persist(incIndex, tmpIndexDir, new IndexSpec()); 89 | return indexIO.loadIndex(tmpIndexDir); 90 | } 91 | 92 | /** 93 | * Get QueryableIndex from index directory. 94 | * 95 | * @param indexDir 96 | * @return 97 | * @throws IOException 98 | */ 99 | public static QueryableIndex getQueryableIndex(File indexDir) throws IOException { 100 | QueryableIndex index = indexIO.loadIndex(indexDir); 101 | return index; 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/main/java/io/druid/embedded/load/impl/CSVLoader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 eBay Software Foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.druid.embedded.load.impl; 18 | 19 | import java.util.Iterator; 20 | import java.io.BufferedReader; 21 | import java.io.IOException; 22 | import java.io.Reader; 23 | import java.util.ArrayList; 24 | import java.util.List; 25 | import java.util.Map; 26 | import java.util.HashMap; 27 | import java.util.StringTokenizer; 28 | 29 | import io.druid.data.input.InputRow; 30 | import io.druid.data.input.MapBasedInputRow; 31 | import io.druid.data.input.impl.DimensionSchema; 32 | import io.druid.embedded.load.Loader; 33 | 34 | /** 35 | * This is CSV loader implementation where data are comma separated. 36 | * If column name is "value", then it is assuming containing metric value and will be converted as float. 37 | * For specifying any metric name, metric column should be present in each record. 38 | * Ex : For say wikipedia schema, having dimensions 39 | * "Timestamp, Page, Username, Gender, City" 40 | * and for Metric "CharsAdded" with value, CSV file can look like 41 | * 1234,JB,Abc,Male,SF,CharsAdded,1290 42 | * 1235,JB,Xyz,Female,SJ,CharsAdded,3421 43 | * where data in format "Timestamp, Page, Username, Gender, City, metric, value" 44 | */ 45 | public class CSVLoader extends Loader { 46 | 47 | protected Reader reader; 48 | 49 | /** 50 | * @param reader : Reader object pointing to CSV file 51 | * @param columns : List of all columns in CSV file (including metric and "value" column) 52 | * @param columns2 : List of dimensions (Excluding metric and value columns) 53 | * @param timestampDimension : Dimension which indicates timestamp field in CSV File. 54 | */ 55 | public CSVLoader(Reader reader, List columns, List dims, String timestampDimension) { 56 | super(columns, dims, timestampDimension); 57 | this.reader = reader; 58 | } 59 | 60 | @Override 61 | public Iterator iterator() { 62 | return new CSVReaderIterator(); 63 | } 64 | 65 | protected Map parse(String row) { 66 | List data = new ArrayList(); 67 | StringTokenizer stk = new StringTokenizer(row, ","); 68 | while(stk.hasMoreTokens()) { 69 | data.add(stk.nextToken()); 70 | } 71 | Map map = new HashMap(); 72 | if(data.size() != columns.size()) { 73 | return null; 74 | } 75 | for (int i = 0; i < columns.size(); i++) { 76 | if (data.get(i).equals("null") || data.get(i).isEmpty()) { 77 | continue; 78 | } else { 79 | //Column name "value" is treated as special column containing value of metric 80 | if (columns.get(i).equals("value")) { 81 | map.put(columns.get(i), Float.parseFloat(data.get(i))); 82 | } else { 83 | map.put(columns.get(i), data.get(i)); 84 | } 85 | } 86 | } 87 | return map; 88 | } 89 | 90 | private class CSVReaderIterator implements Iterator { 91 | String nextLine; 92 | protected BufferedReader breader; 93 | 94 | public CSVReaderIterator() { 95 | this.breader = new BufferedReader(reader); 96 | } 97 | 98 | protected Long getTimestamp(Map map) { 99 | if (timestampDimension == null) { 100 | return 1l; 101 | } else { 102 | return (Long) Long.valueOf((String)map.get(timestampDimension)); 103 | } 104 | } 105 | 106 | public boolean hasNext() { 107 | try { 108 | if (nextLine == null && (nextLine = breader.readLine()) == null) { 109 | close(); 110 | return false; 111 | } else { 112 | return true; 113 | } 114 | } catch (IOException e) { 115 | e.printStackTrace(); 116 | close(); 117 | return false; 118 | } 119 | } 120 | 121 | public InputRow next() { 122 | if (nextLine == null) { 123 | try { 124 | nextLine = breader.readLine(); 125 | } catch (IOException e) { 126 | e.printStackTrace(); 127 | close(); 128 | return null; 129 | } 130 | } 131 | Map map = parse(nextLine); 132 | if(map == null) { 133 | nextLine = null; 134 | next(); 135 | } 136 | InputRow row = new MapBasedInputRow(getTimestamp(map), dimensions, map); 137 | nextLine = null; 138 | return row; 139 | } 140 | 141 | public void remove() { 142 | throw new UnsupportedOperationException(); 143 | } 144 | 145 | public void close() { 146 | try { 147 | breader.close(); 148 | } catch(Exception e) { 149 | } 150 | } 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/main/java/io/druid/embedded/QueryHelper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 eBay Software Foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.druid.embedded; 18 | 19 | import io.druid.jackson.DefaultObjectMapper; 20 | import io.druid.query.DefaultQueryRunnerFactoryConglomerate; 21 | import io.druid.query.Query; 22 | import io.druid.query.QueryRunnerFactory; 23 | import io.druid.query.QueryRunnerFactoryConglomerate; 24 | import io.druid.query.groupby.GroupByQuery; 25 | import io.druid.query.groupby.GroupByQueryConfig; 26 | import io.druid.query.groupby.GroupByQueryEngine; 27 | import io.druid.query.groupby.GroupByQueryQueryToolChest; 28 | import io.druid.query.groupby.GroupByQueryRunnerFactory; 29 | import io.druid.query.metadata.SegmentMetadataQueryConfig; 30 | import io.druid.query.metadata.SegmentMetadataQueryQueryToolChest; 31 | import io.druid.query.metadata.SegmentMetadataQueryRunnerFactory; 32 | import io.druid.query.metadata.metadata.SegmentMetadataQuery; 33 | import io.druid.query.search.SearchQueryQueryToolChest; 34 | import io.druid.query.search.SearchQueryRunnerFactory; 35 | import io.druid.query.search.search.SearchQuery; 36 | import io.druid.query.search.search.SearchQueryConfig; 37 | import io.druid.query.select.SelectQuery; 38 | import io.druid.query.select.SelectQueryEngine; 39 | import io.druid.query.select.SelectQueryQueryToolChest; 40 | import io.druid.query.select.SelectQueryRunnerFactory; 41 | import io.druid.query.timeboundary.TimeBoundaryQuery; 42 | import io.druid.query.timeboundary.TimeBoundaryQueryRunnerFactory; 43 | import io.druid.query.timeseries.TimeseriesQuery; 44 | import io.druid.query.timeseries.TimeseriesQueryEngine; 45 | import io.druid.query.timeseries.TimeseriesQueryQueryToolChest; 46 | import io.druid.query.timeseries.TimeseriesQueryRunnerFactory; 47 | import io.druid.query.topn.TopNQuery; 48 | import io.druid.query.topn.TopNQueryConfig; 49 | import io.druid.query.topn.TopNQueryQueryToolChest; 50 | import io.druid.query.topn.TopNQueryRunnerFactory; 51 | import io.druid.segment.QueryableIndex; 52 | import io.druid.segment.QueryableIndexSegment; 53 | 54 | import java.io.IOException; 55 | import java.io.InputStream; 56 | import java.util.HashMap; 57 | 58 | import com.fasterxml.jackson.core.JsonParseException; 59 | import com.fasterxml.jackson.databind.JsonMappingException; 60 | import com.fasterxml.jackson.databind.ObjectMapper; 61 | import com.google.common.base.Supplier; 62 | import com.google.common.base.Suppliers; 63 | import com.google.common.collect.Maps; 64 | import com.metamx.common.guava.Sequence; 65 | 66 | /** 67 | * This Helper class maintains all required query interface related classes and invokes it based 68 | * on requested query. 69 | * 70 | */ 71 | public class QueryHelper { 72 | private static final QueryRunnerFactoryConglomerate conglomerate; 73 | 74 | /* 75 | * Initialize QueryRunnerFactoryConglomerate. 76 | */ 77 | static { 78 | HashMap, QueryRunnerFactory> map = 79 | Maps., QueryRunnerFactory>newHashMap(); 80 | 81 | // Register all query runner factories. 82 | map.put(GroupByQuery.class, getGroupByQueryRunnerFactory()); 83 | map.put(TopNQuery.class, getTopNQueryRunnerFactory()); 84 | map.put(SearchQuery.class, getSearchQueryRunnerFactory()); 85 | map.put(SelectQuery.class, getSelectQueryRunnerFactory()); 86 | map.put(SegmentMetadataQuery.class, getSegmentMetadataQueryRunnerFactory()); 87 | map.put(TimeseriesQuery.class, getTimeseriesQueryRunnerFactory()); 88 | map.put(TimeBoundaryQuery.class, getTimeBoundaryQueryRunnerFactory()); 89 | 90 | 91 | DefaultQueryRunnerFactoryConglomerate _conglomerate = 92 | new DefaultQueryRunnerFactoryConglomerate(map); 93 | conglomerate = _conglomerate; 94 | } 95 | 96 | @SuppressWarnings("unchecked") 97 | public static Sequence run(Query query, QueryableIndex index) { 98 | return findFactory(query).createRunner(new QueryableIndexSegment("", index)).run(query, null); 99 | } 100 | 101 | @SuppressWarnings("unchecked") 102 | public static QueryRunnerFactory findFactory(Query query) { 103 | return conglomerate.findFactory(query); 104 | } 105 | 106 | /* 107 | * All subclasses of Query with default configuration. 108 | */ 109 | 110 | private static TimeseriesQueryRunnerFactory getTimeseriesQueryRunnerFactory() { 111 | TimeseriesQueryQueryToolChest toolChest = 112 | new TimeseriesQueryQueryToolChest(Utils.NoopIntervalChunkingQueryRunnerDecorator()); 113 | TimeseriesQueryEngine engine = new TimeseriesQueryEngine(); 114 | final TimeseriesQueryRunnerFactory factory = 115 | new TimeseriesQueryRunnerFactory(toolChest, engine, Utils.NOOP_QUERYWATCHER); 116 | return factory; 117 | } 118 | 119 | private static TimeBoundaryQueryRunnerFactory getTimeBoundaryQueryRunnerFactory() { 120 | final TimeBoundaryQueryRunnerFactory factory = 121 | new TimeBoundaryQueryRunnerFactory(Utils.NOOP_QUERYWATCHER); 122 | return factory; 123 | } 124 | 125 | private static SegmentMetadataQueryRunnerFactory getSegmentMetadataQueryRunnerFactory() { 126 | SegmentMetadataQueryConfig smqc = new SegmentMetadataQueryConfig(); 127 | SegmentMetadataQueryQueryToolChest toolChest = new SegmentMetadataQueryQueryToolChest(smqc); 128 | final SegmentMetadataQueryRunnerFactory factory = 129 | new SegmentMetadataQueryRunnerFactory(toolChest, Utils.NOOP_QUERYWATCHER); 130 | return factory; 131 | } 132 | 133 | private static SelectQueryRunnerFactory getSelectQueryRunnerFactory() { 134 | SelectQueryQueryToolChest toolChest = 135 | new SelectQueryQueryToolChest(new ObjectMapper(), 136 | Utils.NoopIntervalChunkingQueryRunnerDecorator()); 137 | SelectQueryEngine engine = new SelectQueryEngine(); 138 | final SelectQueryRunnerFactory factory = 139 | new SelectQueryRunnerFactory(toolChest, engine, Utils.NOOP_QUERYWATCHER); 140 | return factory; 141 | } 142 | 143 | private static SearchQueryRunnerFactory getSearchQueryRunnerFactory() { 144 | SearchQueryQueryToolChest toolChest = 145 | new SearchQueryQueryToolChest(new SearchQueryConfig(), 146 | Utils.NoopIntervalChunkingQueryRunnerDecorator()); 147 | SearchQueryRunnerFactory factory = 148 | new SearchQueryRunnerFactory(toolChest, Utils.NOOP_QUERYWATCHER); 149 | return factory; 150 | } 151 | 152 | private static TopNQueryRunnerFactory getTopNQueryRunnerFactory() { 153 | TopNQueryQueryToolChest toolchest = 154 | new TopNQueryQueryToolChest(new TopNQueryConfig(), 155 | Utils.NoopIntervalChunkingQueryRunnerDecorator()); 156 | TopNQueryRunnerFactory factory = 157 | new TopNQueryRunnerFactory(Utils.getBufferPool(), toolchest, Utils.NOOP_QUERYWATCHER); 158 | return factory; 159 | } 160 | 161 | private static GroupByQueryRunnerFactory getGroupByQueryRunnerFactory() { 162 | ObjectMapper mapper = new DefaultObjectMapper(); 163 | GroupByQueryConfig config = new GroupByQueryConfig(); 164 | config.setMaxIntermediateRows(10000); 165 | 166 | Supplier configSupplier = Suppliers.ofInstance(config); 167 | GroupByQueryEngine engine = new GroupByQueryEngine(configSupplier, Utils.getBufferPool()); 168 | 169 | GroupByQueryRunnerFactory factory = 170 | new GroupByQueryRunnerFactory(engine, Utils.NOOP_QUERYWATCHER, configSupplier, 171 | new GroupByQueryQueryToolChest(configSupplier, mapper, engine, Utils.getBufferPool(), 172 | Utils.NoopIntervalChunkingQueryRunnerDecorator()), Utils.getBufferPool()); 173 | return factory; 174 | } 175 | 176 | public static Query getQuery(InputStream queryInputStream) throws JsonParseException, 177 | JsonMappingException, IOException { 178 | ObjectMapper jsonMapper = new DefaultObjectMapper(); 179 | return jsonMapper.readValue(queryInputStream, Query.class); 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/test/java/io/druid/embedded/EmbeddedDruidTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 eBay Software Foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.druid.embedded; 18 | 19 | import io.druid.data.input.Row; 20 | import io.druid.data.input.impl.DimensionSchema; 21 | import io.druid.data.input.impl.DimensionsSpec; 22 | import io.druid.data.input.impl.DimensionSchema.ValueType; 23 | import io.druid.data.input.impl.StringDimensionSchema; 24 | import io.druid.embedded.load.Loader; 25 | import io.druid.embedded.load.impl.CSVLoader; 26 | import io.druid.granularity.QueryGranularity; 27 | import io.druid.query.Result; 28 | import io.druid.query.aggregation.AggregatorFactory; 29 | import io.druid.query.aggregation.DoubleMaxAggregatorFactory; 30 | import io.druid.query.aggregation.DoubleMinAggregatorFactory; 31 | import io.druid.query.aggregation.DoubleSumAggregatorFactory; 32 | import io.druid.query.aggregation.LongMaxAggregatorFactory; 33 | import io.druid.query.aggregation.LongMinAggregatorFactory; 34 | import io.druid.query.aggregation.LongSumAggregatorFactory; 35 | import io.druid.query.aggregation.PostAggregator; 36 | import io.druid.query.aggregation.histogram.ApproximateHistogramAggregatorFactory; 37 | import io.druid.query.aggregation.histogram.ApproximateHistogramFoldingAggregatorFactory; 38 | import io.druid.query.aggregation.histogram.QuantilePostAggregator; 39 | import io.druid.query.aggregation.histogram.QuantilesPostAggregator; 40 | import io.druid.query.filter.DimFilter; 41 | import io.druid.query.filter.DimFilters; 42 | import io.druid.query.groupby.GroupByQuery; 43 | import io.druid.query.spec.QuerySegmentSpecs; 44 | import io.druid.query.topn.TopNQuery; 45 | import io.druid.query.topn.TopNQueryBuilder; 46 | import io.druid.segment.QueryableIndex; 47 | import io.druid.segment.incremental.IncrementalIndexSchema; 48 | 49 | import java.io.BufferedReader; 50 | import java.io.File; 51 | import java.io.FileReader; 52 | import java.io.IOException; 53 | import java.io.Reader; 54 | import java.util.ArrayList; 55 | import java.util.Arrays; 56 | import java.util.List; 57 | 58 | import org.joda.time.DateTime; 59 | import org.joda.time.Interval; 60 | 61 | import com.google.common.collect.Lists; 62 | import com.metamx.common.guava.Sequence; 63 | import com.metamx.common.guava.Sequences; 64 | 65 | import org.testng.Assert; 66 | import org.testng.annotations.Test; 67 | 68 | public class EmbeddedDruidTest { 69 | 70 | public static QueryableIndex createDruidSegments() throws IOException { 71 | // Create druid segments from raw data 72 | Reader reader = new BufferedReader(new FileReader(new File("./src/test/resources/report.csv"))); 73 | 74 | List columns = Arrays.asList("colo", "pool", "report", "URL", "TS", "metric", "value", "count", "min", "max", "sum"); 75 | List exclusions = Arrays.asList("_Timestamp", "_Machine", "_ThreadId", "_Query"); 76 | List metrics = Arrays.asList("value", "count", "min", "max", "sum"); 77 | List dimensions = new ArrayList(); 78 | for(String dim : columns){ 79 | dimensions.add(new StringDimensionSchema(dim)); 80 | } 81 | dimensions.removeAll(exclusions); 82 | dimensions.removeAll(metrics); 83 | Loader loader = new CSVLoader(reader, columns, columns, "TS"); 84 | 85 | DimensionsSpec dimensionsSpec = new DimensionsSpec(dimensions, null, null); 86 | AggregatorFactory[] metricsAgg = new AggregatorFactory[] { 87 | new LongSumAggregatorFactory("agg_count", "count"), 88 | new LongMaxAggregatorFactory("agg_max", "max"), 89 | new LongMinAggregatorFactory("agg_min", "min"), 90 | new DoubleSumAggregatorFactory("agg_sum", "sum"), 91 | }; 92 | IncrementalIndexSchema indexSchema = new IncrementalIndexSchema(0, QueryGranularity.fromString("ALL"), dimensionsSpec, metricsAgg); 93 | QueryableIndex index = IndexHelper.getQueryableIndex(loader, indexSchema); 94 | return index; 95 | } 96 | 97 | @Test 98 | public void groupByQuery() throws IOException { 99 | QueryableIndex index = createDruidSegments(); 100 | List filters = new ArrayList(); 101 | filters.add(DimFilters.dimEquals("report", "URLTransaction")); 102 | filters.add(DimFilters.dimEquals("pool", "r1cart")); 103 | filters.add(DimFilters.dimEquals("metric", "Duration")); 104 | GroupByQuery query = GroupByQuery.builder() 105 | .setDataSource("test") 106 | .setQuerySegmentSpec(QuerySegmentSpecs.create(new Interval(0, new DateTime().getMillis()))) 107 | .setGranularity(QueryGranularity.fromString("NONE")) 108 | .addDimension("URL") 109 | .addAggregator(new LongSumAggregatorFactory("agg_count", "agg_count")) 110 | .addAggregator(new LongMaxAggregatorFactory("agg_max", "agg_max")) 111 | .addAggregator(new LongMinAggregatorFactory("agg_min", "agg_min")) 112 | .addAggregator(new DoubleSumAggregatorFactory("agg_sum", "agg_sum")) 113 | 114 | .setDimFilter(DimFilters.and(filters)) 115 | .build(); 116 | 117 | @SuppressWarnings("unchecked") 118 | Sequence sequence = QueryHelper.run(query, index); 119 | ArrayList results = Sequences.toList(sequence, Lists.newArrayList()); 120 | Assert.assertEquals(results.size(), 2); 121 | 122 | if(results.get(0).getDimension("URL").get(0).equals("abc")) { 123 | Assert.assertEquals(results.get(0).getLongMetric("agg_sum"), 247); 124 | Assert.assertEquals(results.get(0).getLongMetric("agg_min"), 0); 125 | Assert.assertEquals(results.get(0).getLongMetric("agg_max"), 124); 126 | Assert.assertEquals(results.get(0).getLongMetric("agg_count"), 12); 127 | Assert.assertEquals(results.get(1).getLongMetric("agg_sum"), 123); 128 | Assert.assertEquals(results.get(1).getLongMetric("agg_min"), 0); 129 | Assert.assertEquals(results.get(1).getLongMetric("agg_max"), 123); 130 | Assert.assertEquals(results.get(1).getLongMetric("agg_count"), 3); 131 | 132 | } else { 133 | Assert.assertEquals(results.get(0).getLongMetric("agg_sum"), 123); 134 | Assert.assertEquals(results.get(0).getLongMetric("agg_min"), 0); 135 | Assert.assertEquals(results.get(0).getLongMetric("agg_max"), 123); 136 | Assert.assertEquals(results.get(0).getLongMetric("agg_count"), 3); 137 | Assert.assertEquals(results.get(1).getLongMetric("agg_sum"), 247); 138 | Assert.assertEquals(results.get(1).getLongMetric("agg_min"), 0); 139 | Assert.assertEquals(results.get(1).getLongMetric("agg_max"), 124); 140 | Assert.assertEquals(results.get(1).getLongMetric("agg_count"), 12); 141 | } 142 | } 143 | 144 | @Test 145 | public void topNQuery() throws IOException { 146 | QueryableIndex index = createDruidSegments(); 147 | List filters = new ArrayList(); 148 | filters.add(DimFilters.dimEquals("report", "URLTransaction")); 149 | filters.add(DimFilters.dimEquals("pool", "r1cart")); 150 | filters.add(DimFilters.dimEquals("metric", "Duration")); 151 | TopNQuery query = 152 | new TopNQueryBuilder() 153 | .threshold(5) 154 | .metric("agg_count") 155 | .dataSource("test") 156 | .intervals(QuerySegmentSpecs.create(new Interval(0, new DateTime().getMillis()))) 157 | .granularity(QueryGranularity.fromString("NONE")) 158 | .dimension("colo") 159 | .aggregators( 160 | Arrays.asList( 161 | new LongSumAggregatorFactory("agg_count", "agg_count"), 162 | new LongMaxAggregatorFactory("agg_max", "agg_max"), 163 | new LongMinAggregatorFactory("agg_min", "agg_min"), 164 | new DoubleSumAggregatorFactory("agg_sum", "agg_sum")) 165 | 166 | ) 167 | .filters(DimFilters.and(filters)).build(); 168 | @SuppressWarnings("unchecked") 169 | Sequence sequence = QueryHelper.run(query, index); 170 | ArrayList results = Sequences.toList(sequence, Lists.newArrayList()); 171 | Assert.assertEquals(results.size(), 1); 172 | } 173 | 174 | } 175 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #embedded-druid - Leveraging Druid capabilities in stand alone application 2 | 3 | Druid is an open source data store designed for real-time exploratory analytics on large data sets. The system combines a column-oriented storage layout, a distributed, shared-nothing architecture, and an advanced indexing structure to allow for the arbitrary exploration of billion-row tables with sub-second latencies. Druid supports fast aggregations and sub-second OLAP queries. This project aims to 4 | offer similar capability (with single JVM process) for reasonably small amount of data without involving complexity of multi nodes setup. 5 | 6 | ## Motivation 7 | Druid is proven technology for executing OLAP kind of queries involving billion-row data with sub-second response time. Given it is distributed, shared-nothing architecture involving large amount data, Druid has multiple components like real-time node, historical node, broker node, co-ordinator node, Deep storage, MySql, ZooKeeper etc. If input data size is small (say upto tens to hundreds of millions of rows), then amount of work involve to deploy Druid can be bigger overhead and one can prefer to use in-memory database systems like `derby` or `postgresql` if report requirement is very simple (like group by some dimension or retrieving topN values etc). But there are lots of use cases where input is not `Big Data` but medium or small data, but requires OLAP like capability (like group by multiple dimensions, different aggregation functions like percentile etc). For example, in eBay, we generate report for application operational metrics (which runs on multiple machines across data centers). This report contains information about various metrics like total request count, avg request duration etc across different dimensions like type of request, data center, request status, dependency etc. Each application owner would like to view different kind of information from this report like Top hosts with errors, Top slowest requests by request type / data center or requests by different error codes etc. Given dynamic nature of query, if Druid capability can be leveraged without deployment complexity, then it can make developer/debugger/analyzer life easy. 8 | 9 | ## Usage 10 | 11 | ### Requisites 12 | * Java, Maven 13 | 14 | ### Build 15 | The following steps need to be followed in order to build the jar file : 16 | * Clone the project on GitHub 17 | * Do a maven build at the top level of the project using `mvn clean install` 18 | * jar file will be available under embedded-druid/target/embedded-druid*.jar 19 | 20 | ### Running test cases 21 | * In order to run test cases use `mvn clean test` 22 | 23 | ### Maven dependency 24 | Following maven dependency needs to be added in pom.xml project file (if it is available in maven repo) : 25 | 26 | 27 | io.druid 28 | embedded-druid 29 | 1.0.0 30 | compile 31 | 32 | 33 | ## embedded-druid in action 34 | 35 | ### Create Loader 36 | Currently, for “embedded-druid”, there is support for loading CSV file (for which implementation class “CSVLoader” is available). One needs to first provide list of all columns available in CSV file (including metrics), list of dimensions and column specifying timestamp (if available). For example, for Wikipedia schema which provides information about # of characters added for a particular page by a user, CSV file can have data in format shown below: 37 | “Timestamp, Page, Username, Gender, City, metric,value” 38 | In order to load this data in memory, following code needs to be use for creating required Loader object: 39 | 40 | List columns = Arrays.asList("Timestamp", "Page", "Username", "Gender", "City", “metric”, “value”); 41 | List metrics = Arrays.asList("value"); 42 | List dimensions = new ArrayList(columns); 43 | dimensions.removeAll(metrics); 44 | Loader loader = new CSVLoader(reader, columns, dimensions, "Timestamp"); 45 | 46 | 47 | ### Create druid Segment/Index files 48 | Once Loader object is created, one needs to create required druid specific segment/index files which will be used for query purpose. In order to create segment file, one needs to specify available dimensions and which kind of aggregator function required for querying. For example, if one is interested in querying values like totalCount, max, min, totalSum and percentiles, then following AggregatorFactory objects need to be created: 49 | 50 | DimensionsSpec dimensionsSpec = new DimensionsSpec(dimensions, null, null); 51 | AggregatorFactory[] metricsAgg = new AggregatorFactory[] { 52 | new LongSumAggregatorFactory("agg_count", "count"), 53 | new MaxAggregatorFactory("agg_max", "max"), 54 | new MinAggregatorFactory("agg_min", "min"), 55 | new DoubleSumAggregatorFactory("agg_sum", "sum"), 56 | new ApproximateHistogramAggregatorFactory("agg_histogram", "value", null, null, null, null) 57 | }; 58 | 59 | In order to create segment files locally, one needs to create QueryableIndex object as follow : 60 | 61 | IncrementalIndexSchema indexSchema = new IncrementalIndexSchema(0, QueryGranularity.ALL, dimensionsSpec, metricsAgg); 62 | QueryableIndex index = IndexHelper.getQueryableIndex(loader, indexSchema); 63 | 64 | By default, segment files are created at location `System.getProperty("druid.segment.dir")`. If this property is not set, then it will use temporary location as `System.getProperty("java.io.tmpdir") + File.separator + "druid-tmp-index-"`. So if one wants to create segment files at provided location, then first set property 'druid.segment.dir'. 65 | 66 | 67 | ### Querying data 68 | Once segment files are created, one can execute different kind of queries using index object. For example, if one wants to execute GroupByQuery for above mentioned schema, then code snippets look like : 69 | 70 | List filters = new ArrayList(); 71 | filters.add(DimFilters.dimEquals("Page", "JB")); 72 | filters.add(DimFilters.dimEquals("Gender", "Male")); 73 | filters.add(DimFilters.dimEquals("metric", "CharsAdded")); 74 | GroupByQuery query = GroupByQuery.builder() 75 | .setDataSource("test") 76 | .setQuerySegmentSpec(QuerySegmentSpecs.create(new Interval(0, new DateTime().getMillis()))) 77 | .setGranularity(QueryGranularity.NONE) 78 | .addDimension("City") 79 | .addAggregator(new LongSumAggregatorFactory("agg_count", "agg_count")) 80 | .addAggregator(new MaxAggregatorFactory("agg_max", "agg_max")) 81 | .addAggregator(new MinAggregatorFactory("agg_min", "agg_min")) 82 | .addAggregator(new DoubleSumAggregatorFactory("agg_sum", "agg_sum")) 83 | .addAggregator(new ApproximateHistogramFoldingAggregatorFactory("agg_histogram", "agg_histogram", 20, 5, null, null)) 84 | .addPostAggregator(new QuantilesPostAggregator("agg_quantiles", "agg_histogram", new float[] {0.25f, 0.5f, 0.75f, 0.95f, 0.99f})) 85 | .setFilter(DimFilters.and(filters)) 86 | .build(); 87 | Sequence sequence = QueryHelper.run(query, index); 88 | ArrayList results = Sequences.toList(sequence, Lists.newArrayList()); 89 | 90 | Similarly, if one wants to execute TopNQuery, then : 91 | 92 | List filters = new ArrayList(); 93 | filters.add(DimFilters.dimEquals("Page", "JB")); 94 | filters.add(DimFilters.dimEquals("Gender", "Male")); 95 | filters.add(DimFilters.dimEquals("metric", "CharsAdded")); 96 | TopNQuery query = 97 | new TopNQueryBuilder() 98 | .threshold(5) 99 | .metric("agg_count") 100 | .dataSource("test") 101 | .intervals(QuerySegmentSpecs.create(new Interval(0, new DateTime().getMillis()))) 102 | .granularity(QueryGranularity.NONE) 103 | .dimension("City") 104 | .aggregators(Arrays.asList( 105 | new LongSumAggregatorFactory("agg_count", "agg_count"), 106 | new MaxAggregatorFactory("agg_max", "agg_max"), 107 | new MinAggregatorFactory("agg_min", "agg_min"), 108 | new DoubleSumAggregatorFactory("agg_sum", "agg_sum")) 109 | .filters(DimFilters.and(filters)) 110 | . build(); 111 | Sequence sequence = QueryHelper.run(query, index); 112 | ArrayList results = Sequences.toList(sequence, Lists.newArrayList()); 113 | 114 | ## Future Works 115 | We are planning to extend this work by providing (and/or integrating) REST APIs for ingestion and querying druid data and integrating with easy-to-use UI like Grafana for visualization purpose. This will help user to analyze data quickly and can surface meaningful information promptly. 116 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------