├── README.md ├── TSPark ├── .classpath ├── .gitignore ├── .project ├── .settings │ ├── org.eclipse.jdt.core.prefs │ └── org.eclipse.m2e.core.prefs ├── pom.xml └── src │ └── main │ ├── java │ └── org │ │ └── wowcoders │ │ └── tspark │ │ ├── TS.java │ │ ├── configurations │ │ └── Configuration.java │ │ ├── models │ │ ├── Aggregators.java │ │ ├── TSDBQueries.java │ │ ├── TSDBReq.java │ │ ├── TSKey.java │ │ └── Topo.java │ │ ├── qs │ │ ├── GrafanaTSDB2RequestHandler.java │ │ ├── HttpIngesterHandler.java │ │ ├── Server.java │ │ ├── TSParkQSResponse.java │ │ ├── TSParkQueryInput.java │ │ └── TaggedTS.java │ │ ├── tags │ │ ├── AtomixDistributedStore.java │ │ ├── AtomixReader.java │ │ ├── AtomixWriter.java │ │ ├── MetaDataCollector.java │ │ ├── ReaderInterface.java │ │ └── WriterInterface.java │ │ └── utils │ │ ├── Combinations.java │ │ ├── Hash.java │ │ └── Pair.java │ └── resources │ ├── berigeiclient-demo.properties │ ├── grafana.login.rollup.sample.json │ ├── log4j.properties │ └── tspark-demo.properties ├── beringeiclient ├── .classpath ├── .gitignore ├── .project ├── .settings │ ├── org.eclipse.jdt.core.prefs │ └── org.eclipse.m2e.core.prefs ├── config │ └── beringeiclient-demo.properties ├── pom.xml ├── scripts │ └── run-client-test.sh └── src │ ├── main │ ├── java │ │ ├── com │ │ │ └── facebook │ │ │ │ └── beringei │ │ │ │ └── thriftclient │ │ │ │ ├── BeringeiService.java │ │ │ │ ├── Compression.java │ │ │ │ ├── ConfigurationInfo.java │ │ │ │ ├── DataPoint.java │ │ │ │ ├── GetDataRequest.java │ │ │ │ ├── GetDataResult.java │ │ │ │ ├── GetLastUpdateTimesRequest.java │ │ │ │ ├── GetLastUpdateTimesResult.java │ │ │ │ ├── GetShardDataBucketResult.java │ │ │ │ ├── Key.java │ │ │ │ ├── KeyUpdateTime.java │ │ │ │ ├── PutDataRequest.java │ │ │ │ ├── PutDataResult.java │ │ │ │ ├── ServiceMap.java │ │ │ │ ├── ShardInfo.java │ │ │ │ ├── StatusCode.java │ │ │ │ ├── TimeSeriesBlock.java │ │ │ │ ├── TimeSeriesData.java │ │ │ │ └── TimeValuePair.java │ │ └── org │ │ │ └── wowcoders │ │ │ ├── beringei │ │ │ └── compression │ │ │ │ ├── BitStream.java │ │ │ │ ├── BlockDecoder.java │ │ │ │ ├── ByteBufferBitReader.java │ │ │ │ └── LongBufferBitReader.java │ │ │ └── beringeiclient │ │ │ ├── BeringeiClient.java │ │ │ ├── BeringeiTestClient.java │ │ │ ├── BeringeiTestRawClient.java │ │ │ ├── configurations │ │ │ └── Configuration.java │ │ │ └── utils │ │ │ └── Pair.java │ └── resources │ │ ├── beringeiclient-demo.properties │ │ └── log4j.properties │ └── test │ └── java │ └── org │ └── wowcoders │ └── beringeiclient │ └── compression │ └── BlockDecoderTest.java ├── config └── tspark-demo.properties ├── pom.xml ├── quick-start.md └── scripts └── startqs.sh /README.md: -------------------------------------------------------------------------------- 1 | # tspark 2 | TimeSeries Java client for Facebook Beringei. It also includes query service with tags support for metrics. 3 | 4 | ## Prerequisites - Download and deploy the beringei server from facebook 5 | https://github.com/facebookincubator/beringei 6 | 7 | ## Start the Beringei Server 8 | ./beringei_main -beringei_configuration_path /beringeidata/beringei.json -data_directory /beringeidata/data -port 9999 -create_directories -sleep_between_bucket_finalization_secs 60 -allowed_timestamp_behind 900 -bucket_size 1200 -buckets 144 -mintimestampdelta 0 -logtostderr -v=1 9 | 10 | ## Building the Beringei Java Client 11 | mvn clean package 12 | ### This creates the following jar files 13 | beringeiclient/target/beringeiclient-0.0.1-SNAPSHOT.jar 14 | TSPark/target/TSPark-0.0.1-SNAPSHOT.jar 15 | dist/target/TSPark-jar-with-dependencies.jar (fat jar) 16 | 17 | ## Samples 18 | Send & Retrieve metrics data using Java Client 19 | 20 | ## Grafana dashboards 21 | ### Setting up the grafana 22 | Add data source - sample http://127.0.0.1:58080/tsdb 23 | ### Installing the dashboards 24 | ### Accesing the data from grafana 25 | ### Importing the dashboard from the sample json file 26 | 27 | ## Features 28 | ### Store and Retieve Metrics to/from Beringei 29 | ### Aggregations 30 | 31 | ## QueryService APIs 32 | http://:/tsdb/api/query 33 | http://:/tsdb/api/aggregators 34 | http://:/tsdb/api/suggest 35 | 36 | (Default sample) 37 | http://127.0.0.1:58080/tsdb/api/query 38 | http://127.0.0.1:58080/tsdb/api/aggregators 39 | http://127.0.0.1:58080/tsdb/api/suggest 40 | -------------------------------------------------------------------------------- /TSPark/.classpath: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /TSPark/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | -------------------------------------------------------------------------------- /TSPark/.project: -------------------------------------------------------------------------------- 1 | 2 | 3 | TSPark 4 | 5 | 6 | 7 | 8 | 9 | org.eclipse.jdt.core.javabuilder 10 | 11 | 12 | 13 | 14 | org.eclipse.m2e.core.maven2Builder 15 | 16 | 17 | 18 | 19 | 20 | org.eclipse.jdt.core.javanature 21 | org.eclipse.m2e.core.maven2Nature 22 | 23 | 24 | -------------------------------------------------------------------------------- /TSPark/.settings/org.eclipse.jdt.core.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 3 | org.eclipse.jdt.core.compiler.compliance=1.8 4 | org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning 5 | org.eclipse.jdt.core.compiler.source=1.8 6 | -------------------------------------------------------------------------------- /TSPark/.settings/org.eclipse.m2e.core.prefs: -------------------------------------------------------------------------------- 1 | activeProfiles= 2 | eclipse.preferences.version=1 3 | resolveWorkspaceProjects=true 4 | version=1 5 | -------------------------------------------------------------------------------- /TSPark/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | 6 | com.wowcoders.tspark 7 | TSPark-ALL 8 | 0.0.1-SNAPSHOT 9 | 10 | 11 | TSPark 12 | 13 | 14 | 15 | 16 | org.apache.maven.plugins 17 | maven-compiler-plugin 18 | 3.6.1 19 | 20 | 1.8 21 | 1.8 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | com.wowcoders.tspark 30 | beringeiclient 31 | 0.0.1-SNAPSHOT 32 | 33 | 34 | 35 | 36 | com.facebook.jcommon 37 | util 38 | 0.1.29 39 | 40 | 41 | 42 | 43 | com.google.code.gson 44 | gson 45 | 2.8.0 46 | 47 | 48 | 49 | io.atomix 50 | atomix-all 51 | 1.0.0-rc9 52 | 53 | 54 | 55 | 56 | org.eclipse.jetty.aggregate 57 | jetty-all-server 58 | 8.2.0.v20160908 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/TS.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark; 2 | 3 | import java.math.BigInteger; 4 | import java.util.ArrayList; 5 | import java.util.Collection; 6 | import java.util.List; 7 | 8 | import org.wowcoders.beringeiclient.BeringeiClient; 9 | import org.wowcoders.tspark.models.Aggregators; 10 | import org.wowcoders.tspark.models.TSKey; 11 | import org.wowcoders.tspark.models.Topo; 12 | import org.wowcoders.tspark.tags.AtomixDistributedStore; 13 | import org.wowcoders.tspark.tags.MetaDataCollector; 14 | 15 | import com.facebook.beringei.thriftclient.DataPoint; 16 | 17 | 18 | public class TS { 19 | List dps = new ArrayList (); 20 | BeringeiClient client = null; 21 | 22 | /* aggregated to seconds */ 23 | public TS(BeringeiClient client) { 24 | this.client = client; 25 | } 26 | 27 | public void pushTS(TSKey ts, long unixTime, double value) { 28 | long hashCode = ts.topoHashCode(); 29 | 30 | long shardId = BigInteger.valueOf(hashCode).mod(BigInteger.valueOf(client.getShardCount())).intValue(); 31 | 32 | this.client.pushTS(ts.getNamespace() + "_" + ts.hash(), 33 | shardId, 34 | unixTime, value); 35 | 36 | /*TODO ES, Upload, Topo, metricToTopo, TopoToShardId(Update when changing) */ 37 | 38 | /*if (ts.getTags().get(1).second.equals("loginhost1000")) { 39 | System.out.println(ts.toString()); 40 | System.out.println(ts.hashCode() + ":" + ts.topoHashCode()); 41 | }*/ 42 | MetaDataCollector.buildIndex(shardId, (Topo)ts, ts.getMetric(), ts.metricHash()); 43 | } 44 | 45 | public void flush() { 46 | this.client.flush(); 47 | } 48 | 49 | /* could be second level raw data(number of occurance and sum of values) if cnt and sum are different */ 50 | public void addTSSum(String metric, Topo topo, long unixTime, int count, double sum) { 51 | TSKey tsCount = new TSKey(Aggregators.CNT, metric, topo); 52 | TSKey tsSum = new TSKey(Aggregators.SUM, metric, topo); 53 | 54 | pushTS(tsCount, unixTime, count); 55 | pushTS(tsSum, unixTime, sum); 56 | } 57 | 58 | /* could be second level raw data(number of occurance) if cnt and sum are same */ 59 | public void addTSCnt(String metric, Topo topo, long unixTime, int count) { 60 | TSKey tsCount = new TSKey(Aggregators.CNT, metric, topo); 61 | 62 | pushTS(tsCount, unixTime, count); 63 | } 64 | 65 | void addTSAvg(String metric, Topo topo, long unixTime, double avg) { 66 | TSKey tsAvg = new TSKey(Aggregators.AVG, metric, topo); 67 | pushTS(tsAvg, unixTime, avg); 68 | } 69 | 70 | void addTSSumAvg(String metric, Topo topo, long unixTime, int count, double sum, double avg) { 71 | addTSSum(metric, topo, unixTime, count, sum); 72 | addTSAvg(metric, topo, unixTime, avg); 73 | } 74 | 75 | 76 | public static void main(String args[]) throws Exception { 77 | MetaDataCollector.init(); 78 | BeringeiClient client = new BeringeiClient(); 79 | 80 | TS ts = new TS(client); 81 | 82 | int hosts = 10; 83 | 84 | long ms = System.currentTimeMillis() / 1000; 85 | 86 | for(int host = 0; host < hosts; host++) { 87 | Topo topo = new Topo(); 88 | topo.add("pool", "login"); 89 | topo.add("colo", "slc"); 90 | topo.add("host", "loginhost" + host); 91 | int count = (int) (Math.random() *100); 92 | int sum = (int) (Math.random() * 1000); 93 | ts.addTSSum("logincount", topo, ms, count, sum); 94 | } 95 | 96 | ts.flush(); 97 | System.out.println("Data pushed into Queue."); 98 | 99 | 100 | 101 | Topo topo = new Topo(); 102 | topo.add("pool", "login"); 103 | topo.add("colo", "lvs"); 104 | //topo.add("host", "loginhost1"); 105 | 106 | String key = topo._hash(); 107 | //String dimkey = topo._hashDims(); 108 | TSKey tsSum = new TSKey(Aggregators.AVG, "cpu", topo); 109 | String mHash = tsSum.metricHash(); 110 | 111 | Thread.sleep(8000); 112 | 113 | Collection dimKeys = AtomixDistributedStore.map2.get(mHash).join(); 114 | System.out.println(dimKeys.size()); 115 | String dimsHash = (String)dimKeys.iterator().next(); 116 | 117 | StringBuilder sb = new StringBuilder(); 118 | sb.append(dimsHash); 119 | sb.append("_"); 120 | sb.append(key); 121 | sb.append("_"); 122 | sb.append(mHash); 123 | String lkey = sb.toString(); 124 | //AtomixDistributedStore.map2.remove(mHash).join(); 125 | //AtomixDistributedStore.map3.remove(lkey).join(); 126 | 127 | System.out.println("dimkeys->"+mHash+ ":" + AtomixDistributedStore.map2.get(mHash).join()); 128 | Collection rowkeys = AtomixDistributedStore.map3.get(lkey).join(); 129 | rowkeys.iterator().forEachRemaining(o -> { 130 | System.out.println(o); 131 | }); 132 | System.out.println("-----"); 133 | //AtomixDistributedStore.map3.put("2E2FFC3249D3CC5D1116C4392F4BC584_3CB29AC06397941BDFA5CCCDB54D4F97_12F0E3BB2EB5F19C2B38602A160156AB", "B4CA2DE6AB1570CD780D5AFBF5042CDB").join(); 134 | //System.out.println("key->"+lkey+ ":" + AtomixDistributedStore.map3.get(lkey).join()); 135 | /*System.out.println(topo._hash()); 136 | System.out.println(topo.getIndexes());*/ 137 | } 138 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/configurations/Configuration.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.configurations; 2 | 3 | import java.io.FileInputStream; 4 | import java.io.FileNotFoundException; 5 | import java.io.IOException; 6 | import java.io.InputStream; 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | import java.util.Properties; 10 | 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | import org.wowcoders.tspark.utils.Pair; 14 | 15 | //TODO - Use the Standard Configuration Builder 16 | public class Configuration { 17 | final static Logger logger = LoggerFactory.getLogger(Configuration.class); 18 | 19 | private static Properties prop = null; 20 | 21 | public static void init(String configFile) { 22 | InputStream is = null; 23 | try { 24 | prop = new Properties(); 25 | if (configFile != null) { 26 | is = new FileInputStream(configFile); 27 | } else { 28 | is = Configuration.class.getResourceAsStream("/tspark-demo.properties"); 29 | } 30 | prop.load(is); 31 | } catch (FileNotFoundException e) { 32 | e.printStackTrace(); 33 | } catch (IOException e) { 34 | e.printStackTrace(); 35 | } 36 | } 37 | 38 | private Configuration(){ 39 | 40 | } 41 | 42 | public static class APIConfig { 43 | boolean enabled = false; 44 | 45 | boolean enabledTLS = false; 46 | Pair listenAddress = null; 47 | 48 | int threadsCnt = 2; 49 | 50 | public APIConfig(Properties prop) { 51 | enabled = Boolean.parseBoolean(prop.getProperty("api.enabled")); 52 | 53 | if (enabled) { 54 | enabledTLS = Boolean.parseBoolean(prop.getProperty("api.enable-tls")); 55 | String listen = prop.getProperty("api.listen"); 56 | String []addrport = listen.split(":"); 57 | listenAddress = new Pair(addrport[0], Integer.parseInt(addrport[1])); 58 | 59 | threadsCnt = Integer.parseInt(prop.getProperty("api.threads-cnt")); 60 | } 61 | } 62 | 63 | public String toString() { 64 | StringBuilder sb = new StringBuilder(); 65 | sb.append("enabled:" + enabled).append("; "); 66 | if (enabled) { 67 | sb.append("tls:"+enabledTLS) 68 | .append("; "); 69 | sb.append("(Address:"+listenAddress.first) 70 | .append("; ").append("port:"+listenAddress.second).append("); ");; 71 | } 72 | return sb.toString(); 73 | } 74 | 75 | public boolean isEnabled() { 76 | return enabled; 77 | } 78 | 79 | public void setEnabled(boolean enabled) { 80 | this.enabled = enabled; 81 | } 82 | 83 | public boolean isEnabledTLS() { 84 | return enabledTLS; 85 | } 86 | 87 | public void setEnabledTLS(boolean enabledTLS) { 88 | this.enabledTLS = enabledTLS; 89 | } 90 | 91 | public Pair getListenAddress() { 92 | return listenAddress; 93 | } 94 | 95 | public void setListenAddress(Pair listenAddress) { 96 | this.listenAddress = listenAddress; 97 | } 98 | 99 | public int getThreadsCnt() { 100 | return threadsCnt; 101 | } 102 | 103 | public void setThreadsCnt(int threadsCnt) { 104 | this.threadsCnt = threadsCnt; 105 | } 106 | } 107 | 108 | public static class TagsCacheConfig { 109 | long ttl = 604800000; 110 | Pair listenAddress = null; 111 | 112 | List> cluster = new ArrayList>(); 113 | 114 | public TagsCacheConfig(Properties prop) { 115 | ttl = Long.parseLong(prop.getProperty("atomix.cache-tags.ttl-ms")); 116 | 117 | String listen = prop.getProperty("atomix.cache-tags.listen"); 118 | if (listen != null) { 119 | String []addrport = listen.split(":"); 120 | listenAddress = new Pair(addrport[0], Integer.parseInt(addrport[1])); 121 | } 122 | 123 | String clusterStr = prop.getProperty("atomix.cache-tags.cluster"); 124 | if (clusterStr != null) { 125 | String []clusterArr = clusterStr.split(","); 126 | for(String _cluster: clusterArr) { 127 | String []ip_port = _cluster.split(":"); 128 | Pair caddress = new Pair(ip_port[0], Integer.parseInt(ip_port[1])); 129 | cluster.add(caddress); 130 | } 131 | } 132 | } 133 | 134 | public String toString() { 135 | StringBuilder sb = new StringBuilder(); 136 | sb.append("ttl:" + ttl).append("; "); 137 | if (listenAddress != null) { 138 | sb.append("(Address:"+listenAddress.first) 139 | .append("; ").append("port:"+listenAddress.second).append("); "); 140 | } 141 | 142 | if (cluster != null) { 143 | cluster.stream().forEach(addr-> { 144 | sb.append("(Address:"+addr.first) 145 | .append("; ").append("port:"+addr.second).append("); "); 146 | }); 147 | } 148 | return sb.toString(); 149 | } 150 | 151 | public long getTtl() { 152 | return ttl; 153 | } 154 | 155 | public void setTtl(long ttl) { 156 | this.ttl = ttl; 157 | } 158 | 159 | public Pair getListenAddress() { 160 | return listenAddress; 161 | } 162 | 163 | public void setListenAddress(Pair listenAddress) { 164 | this.listenAddress = listenAddress; 165 | } 166 | 167 | public List> getCluster() { 168 | return cluster; 169 | } 170 | 171 | public void setCluster(List> cluster) { 172 | this.cluster = cluster; 173 | } 174 | } 175 | 176 | 177 | /*******************************/ 178 | 179 | APIConfig apiConfig = null; 180 | TagsCacheConfig tagsCacheConfig = null; 181 | 182 | public void load() { 183 | apiConfig = new APIConfig(prop); 184 | tagsCacheConfig = new TagsCacheConfig(prop); 185 | } 186 | 187 | /*******************************/ 188 | 189 | public String toString() { 190 | StringBuilder sb = new StringBuilder(); 191 | 192 | sb.append("api config: " + apiConfig).append("\n"); 193 | sb.append("tag cache config: " + tagsCacheConfig).append("\n"); 194 | 195 | return sb.toString(); 196 | } 197 | 198 | private static Configuration instance = new Configuration(); 199 | 200 | public static void loadConfig() { 201 | instance.load(); 202 | logger.info(instance.toString()); 203 | } 204 | 205 | public APIConfig getApiConfig() { 206 | return apiConfig; 207 | } 208 | 209 | public TagsCacheConfig getTagsCacheConfig() { 210 | return tagsCacheConfig; 211 | } 212 | 213 | public static Configuration getInstnace() { 214 | if (instance.apiConfig == null) { 215 | synchronized(instance) { 216 | loadConfig(); 217 | } 218 | } 219 | return instance; 220 | } 221 | /*******************************/ 222 | 223 | public static void main(String a[]) { 224 | loadConfig(); 225 | } 226 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/models/Aggregators.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.models; 2 | 3 | import java.util.Arrays; 4 | import java.util.Map; 5 | import java.util.stream.Collectors; 6 | 7 | public enum Aggregators { 8 | CNT("cnt"), 9 | SUM("sum"), 10 | AVG("avg"), 11 | MIN("min"), 12 | MAX("max"); 13 | 14 | private String value; 15 | 16 | Aggregators(String value) { 17 | this.value = value; 18 | } 19 | 20 | private static final Map map; 21 | static { 22 | map = Arrays.stream(values()) 23 | .collect(Collectors.toMap(e -> e.value, e -> e)); 24 | } 25 | 26 | public String getAggregator() { 27 | return value; 28 | } 29 | 30 | public static String[] getAggregators() { 31 | return Arrays.stream(values()).map(s->s.value).toArray(String[]::new); 32 | } 33 | 34 | public static Aggregators fromString(String str) { 35 | return map.get(str); 36 | } 37 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/models/TSDBQueries.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.models; 2 | 3 | import java.io.Serializable; 4 | import java.util.HashMap; 5 | 6 | public class TSDBQueries implements Serializable { 7 | /** 8 | * 9 | */ 10 | private static final long serialVersionUID = -5927404387764376086L; 11 | String metric; 12 | String aggregator; 13 | 14 | HashMap tags; 15 | public String getMetric() { 16 | return metric; 17 | } 18 | public void setMetric(String metric) { 19 | this.metric = metric; 20 | } 21 | public String getAggregator() { 22 | return aggregator; 23 | } 24 | public void setAggregator(String aggregator) { 25 | this.aggregator = aggregator; 26 | } 27 | public HashMap getTags() { 28 | return tags; 29 | } 30 | public void setTags(HashMap tags) { 31 | this.tags = tags; 32 | } 33 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/models/TSDBReq.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.models; 2 | 3 | import java.io.Serializable; 4 | import java.util.List; 5 | 6 | public class TSDBReq implements Serializable { 7 | /** 8 | * 9 | */ 10 | private static final long serialVersionUID = -6383936801886925610L; 11 | long start = 0; 12 | long end = 0; 13 | List queries; 14 | 15 | public long getStart() { 16 | return start; 17 | } 18 | public void setStart(long start) { 19 | this.start = start; 20 | } 21 | public long getEnd() { 22 | return end; 23 | } 24 | public void setEnd(long end) { 25 | this.end = end; 26 | } 27 | public List getQuery() { 28 | return queries; 29 | } 30 | public void setQuery(List queries) { 31 | this.queries = queries; 32 | } 33 | public static long getSerialversionuid() { 34 | return serialVersionUID; 35 | } 36 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/models/TSKey.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.models; 2 | 3 | import java.io.Serializable; 4 | 5 | import org.wowcoders.tspark.utils.Hash; 6 | 7 | public class TSKey extends Topo implements Serializable { 8 | private static final long serialVersionUID = 1977450313720506886L; 9 | 10 | String namespace = "g"; 11 | String metric; 12 | String aggregator; 13 | 14 | String calculatedHash = null; 15 | String metricAggHash = null; 16 | String metricHash = null; 17 | 18 | public TSKey(Topo t) { 19 | super(t); 20 | calculatedHash = null; 21 | } 22 | 23 | public TSKey(Aggregators aggregator, String metric, Topo t) { 24 | super(t); 25 | 26 | calculatedHash = null; 27 | 28 | this.metric = metric; 29 | this.aggregator = aggregator.getAggregator(); 30 | 31 | hash(); 32 | } 33 | 34 | public String getNamespace() { 35 | return namespace; 36 | } 37 | 38 | public void setNamespace(String namespace) { 39 | this.namespace = namespace; 40 | } 41 | 42 | public Aggregators getAggregator() { 43 | return Aggregators.fromString(aggregator); 44 | } 45 | 46 | public String getMetric() { 47 | return metric; 48 | } 49 | 50 | public void setAggregator(Aggregators aggregator) { 51 | this.aggregator = aggregator.getAggregator(); 52 | calculatedHash = null; 53 | } 54 | 55 | public void setMetric(String metric) { 56 | this.metric = metric; 57 | calculatedHash = null; 58 | } 59 | 60 | public String toString() { 61 | StringBuilder sb = new StringBuilder(); 62 | sb.append(aggregator); 63 | sb.append(":"); 64 | sb.append(metric); 65 | sb.append(":"); 66 | sb.append(super.toString()); 67 | return sb.toString(); 68 | } 69 | 70 | public String hash() { 71 | if (calculatedHash == null) { 72 | metricAggHash = calcAggMetricHash(aggregator, metric); 73 | 74 | metricHash = Hash.hash(metric); 75 | 76 | StringBuilder sb = new StringBuilder(); 77 | sb.append(metricAggHash); 78 | sb.append("_"); 79 | sb.append(super.hash()); 80 | 81 | calculatedHash = sb.toString(); 82 | } 83 | 84 | return calculatedHash; 85 | } 86 | 87 | public static String calcAggMetricHash(String aggregator, String metric) { 88 | StringBuilder metricInfo = new StringBuilder(); 89 | metricInfo.append(aggregator); 90 | metricInfo.append(":"); 91 | metricInfo.append(metric); 92 | 93 | String metricAggHash = Hash.hash(metricInfo.toString()); 94 | return metricAggHash; 95 | } 96 | 97 | public int hashCode() { 98 | String hash = hash(); 99 | //System.out.println("*"+hash); 100 | return hash.hashCode(); 101 | } 102 | 103 | public int topoHashCode() { 104 | return super.hashCode(); 105 | } 106 | 107 | public String metricAggHash() { 108 | if (calculatedHash == null) hash(); 109 | return metricAggHash; 110 | } 111 | 112 | public String metricHash() { 113 | if (calculatedHash == null) hash(); 114 | return metricHash; 115 | } 116 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/models/Topo.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.models; 2 | 3 | import java.io.Serializable; 4 | import java.util.ArrayList; 5 | import java.util.Collections; 6 | import java.util.List; 7 | 8 | import org.wowcoders.tspark.utils.Combinations; 9 | import org.wowcoders.tspark.utils.Hash; 10 | import org.wowcoders.tspark.utils.Pair; 11 | 12 | public class Topo implements Serializable, Cloneable { 13 | private static final long serialVersionUID = -4963561786934489036L; 14 | List[] indexes = null; 15 | 16 | List> tags = null; 17 | String calculatedHash = null; 18 | String calculatedHashDims = null; 19 | long cacheTime = -1; 20 | 21 | public Topo() { 22 | tags = new ArrayList>(); 23 | clearFlags(); 24 | } 25 | 26 | public Topo(Topo t) { 27 | tags = t.tags; 28 | calculatedHash = t.calculatedHash; 29 | hashAndIndex(); 30 | } 31 | 32 | public void clear() { 33 | tags.clear(); 34 | clearFlags(); 35 | } 36 | 37 | public void clearFlags() { 38 | calculatedHash = null; 39 | calculatedHashDims = null; 40 | indexes = null; 41 | cacheTime = -1; 42 | } 43 | 44 | public Topo(List> tags) { 45 | setTags(tags); 46 | } 47 | 48 | public void add(String key, String value) { 49 | tags.add(new Pair(key, value)); 50 | clearFlags(); 51 | } 52 | 53 | public void add(Pair pair) { 54 | tags.add(pair); 55 | clearFlags(); 56 | } 57 | 58 | public void sort() { 59 | Collections.sort(tags, (o1, o2) -> o1.first.compareTo(o2.first)); 60 | } 61 | 62 | public String toString() { 63 | return topoString(); 64 | } 65 | 66 | public String topoString() { 67 | StringBuilder sb = new StringBuilder(); 68 | for(Pair pair: tags) { 69 | sb.append("("); 70 | sb.append(pair.first); 71 | sb.append("="); 72 | sb.append(pair.second); 73 | sb.append(")"); 74 | } 75 | return sb.toString(); 76 | } 77 | 78 | private String _toStringDims() { 79 | StringBuilder sb = new StringBuilder(); 80 | for(Pair pair: tags) { 81 | sb.append("("); 82 | sb.append(pair.first); 83 | sb.append(")"); 84 | } 85 | return sb.toString(); 86 | } 87 | 88 | public String hash() { 89 | return _hash(); 90 | } 91 | 92 | public String forceHash() { 93 | clearFlags(); 94 | return _hash(); 95 | } 96 | 97 | public void hashAndIndex() { 98 | if (calculatedHash == null) { 99 | _hash(); 100 | } 101 | 102 | if (indexes == null) { 103 | indexes = getIndexes(); 104 | } 105 | } 106 | 107 | public String _hash() { 108 | if (calculatedHash == null) { 109 | sort(); 110 | String asString = topoString(); 111 | calculatedHash = Hash.hash(asString); 112 | 113 | asString = _toStringDims(); 114 | calculatedHashDims = Hash.hash(asString); 115 | } 116 | 117 | return calculatedHash; 118 | } 119 | 120 | public String hashTopoWithNoStar() { 121 | StringBuilder sb = new StringBuilder(); 122 | for(Pair pair: tags) { 123 | if (!pair.second.equals("*")) { 124 | sb.append("("); 125 | sb.append(pair.first); 126 | sb.append("="); 127 | sb.append(pair.second); 128 | sb.append(")"); 129 | } 130 | } 131 | return Hash.hash(sb.toString()); 132 | } 133 | 134 | public String _hashDims() { 135 | return calculatedHashDims; 136 | } 137 | 138 | public int hashCode() { 139 | String hash = _hash(); 140 | //System.out.println("**"+hash); 141 | return hash.hashCode(); 142 | } 143 | 144 | public List> getTags() { 145 | return tags; 146 | } 147 | 148 | public void setTags(List> tags) { 149 | this.tags = tags; 150 | clearFlags(); 151 | hashAndIndex(); 152 | } 153 | 154 | public List[] getIndexes() { 155 | List indexes1 = new ArrayList(); 156 | List indexes2 = new ArrayList(); 157 | StringBuilder sb = new StringBuilder(); 158 | StringBuilder sbDims = new StringBuilder(); 159 | for(int i = 1; i<= tags.size(); i++) { 160 | List>> pairsList = Combinations.combination(tags, i); 161 | 162 | for(List> pairs: pairsList) { 163 | for(Pair pair: pairs) { 164 | sb.append("("); 165 | sb.append(pair.first); 166 | sb.append("="); 167 | sb.append(pair.second); 168 | sb.append(")"); 169 | 170 | sbDims.append("("); 171 | sbDims.append(pair.first); 172 | sbDims.append(")"); 173 | } 174 | 175 | String asString = sb.toString(); 176 | String calculatedHash = Hash.hash(asString); 177 | indexes1.add(calculatedHash); 178 | 179 | asString = sbDims.toString(); 180 | calculatedHash = Hash.hash(sbDims.toString()); 181 | indexes2.add(calculatedHash); 182 | 183 | sb.setLength(0); 184 | sbDims.setLength(0); 185 | } 186 | } 187 | @SuppressWarnings("unchecked") 188 | List[] indexSet = (ArrayList[])new ArrayList[2]; 189 | indexSet[0] = indexes1; 190 | indexSet[1] = indexes1; 191 | return indexSet; 192 | } 193 | 194 | public void setCacheTime() { 195 | cacheTime = System.currentTimeMillis()/1000; 196 | } 197 | 198 | public boolean updateRequired() { 199 | return System.currentTimeMillis()/1000 - cacheTime >= 604800; 200 | } 201 | 202 | 203 | public Topo clone() throws CloneNotSupportedException { 204 | List> clone = new ArrayList>(this.tags.size()); 205 | for (Pair item : this.tags) clone.add(item.clone()); 206 | return new Topo(clone); 207 | } 208 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/qs/GrafanaTSDB2RequestHandler.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.qs; 2 | 3 | import java.io.IOException; 4 | import java.io.OutputStream; 5 | import java.io.OutputStreamWriter; 6 | import java.util.ArrayList; 7 | import java.util.HashMap; 8 | import java.util.Iterator; 9 | import java.util.List; 10 | import java.util.Set; 11 | import java.util.concurrent.CompletableFuture; 12 | import java.util.concurrent.TimeUnit; 13 | import java.util.stream.Collectors; 14 | import java.util.zip.GZIPOutputStream; 15 | 16 | import javax.servlet.ServletException; 17 | import javax.servlet.http.HttpServlet; 18 | import javax.servlet.http.HttpServletRequest; 19 | import javax.servlet.http.HttpServletResponse; 20 | 21 | import org.eclipse.jetty.servlet.ServletContextHandler; 22 | import org.eclipse.jetty.servlet.ServletHolder; 23 | import org.slf4j.Logger; 24 | import org.slf4j.LoggerFactory; 25 | import org.wowcoders.tspark.models.Aggregators; 26 | import org.wowcoders.tspark.models.TSDBQueries; 27 | import org.wowcoders.tspark.models.TSDBReq; 28 | import org.wowcoders.tspark.models.TSKey; 29 | import org.wowcoders.tspark.models.Topo; 30 | import org.wowcoders.tspark.tags.AtomixDistributedStore; 31 | import org.wowcoders.tspark.utils.Pair; 32 | 33 | import com.google.gson.Gson; 34 | import com.google.gson.stream.JsonWriter; 35 | 36 | import io.atomix.collections.DistributedMap; 37 | 38 | public class GrafanaTSDB2RequestHandler { 39 | final static Logger logger = LoggerFactory.getLogger(GrafanaTSDB2RequestHandler.class); 40 | private static TaggedTS taggedTS = null; 41 | @SuppressWarnings("serial") 42 | public static class Tsdb2QueryAggServlet extends HttpServlet 43 | { 44 | static final String []agg = Aggregators.getAggregators(); 45 | protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException 46 | { 47 | Gson gson = new Gson(); 48 | response.setStatus(HttpServletResponse.SC_OK); 49 | response.setContentType("application/json; charset=utf-8"); 50 | OutputStream os = response.getOutputStream(); 51 | JsonWriter jw = new JsonWriter(new OutputStreamWriter(os, "UTF-8")); 52 | gson.toJson(agg, String[].class, jw); 53 | jw.flush(); 54 | jw.close(); 55 | os.close(); 56 | } 57 | } 58 | 59 | @SuppressWarnings("serial") 60 | public static class Tsdb2QuerySuggestServlet extends HttpServlet 61 | { 62 | static final String []tagv = {"slc", "login", "loginhost1000"}; 63 | static final String []tagk = {"pool", "colo", "host"}; 64 | static final String []metrics = {"logincount"}; 65 | protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException 66 | { 67 | String limitStr = request.getParameter("max"); 68 | 69 | int _limit = 100; 70 | if (limitStr != null) { 71 | _limit = Integer.parseInt(limitStr); 72 | } 73 | 74 | String query = request.getParameter("q"); 75 | String type = request.getParameter("type"); 76 | 77 | DistributedMap map = null; 78 | if (type.equals("tagv")) { 79 | map = AtomixDistributedStore.tagv; 80 | } else if (type.equals("tagk")){ 81 | map = AtomixDistributedStore.tagk; 82 | } else if (type.equals("metrics")) { 83 | map = AtomixDistributedStore.metrics; 84 | } 85 | 86 | Set _keys = map.keySet().join(); 87 | List list = _keys.stream().parallel().map(strObj->(String)strObj) 88 | .filter(str->str.contains(query)) 89 | .sorted().limit(_limit).collect(Collectors.toList()); 90 | try { 91 | sendResponse(response, list); 92 | } catch (IOException e) { 93 | e.printStackTrace(); 94 | } 95 | 96 | sendResponse(response, list); 97 | } 98 | 99 | void sendResponse(HttpServletResponse response, List list) throws IOException { 100 | response.setStatus(HttpServletResponse.SC_OK); 101 | response.setContentType("application/json; charset=utf-8"); 102 | 103 | Gson gson = new Gson(); 104 | 105 | OutputStream os = response.getOutputStream(); 106 | JsonWriter jw = new JsonWriter(new OutputStreamWriter(os, "UTF-8")); 107 | 108 | gson.toJson(list.toArray(), String[].class, jw); 109 | 110 | jw.flush(); 111 | jw.close(); 112 | os.close(); 113 | os.close(); 114 | } 115 | } 116 | 117 | 118 | @SuppressWarnings("serial") 119 | public static class Tsdb2QueryServlet extends HttpServlet 120 | { 121 | //Gson gson = new Gson(); 122 | //TSDB tsdb = Client.getTsdb(environment); 123 | 124 | //ExtensionRegistry protobufExtensionRegistry = tsdb.getStringOrdinalConverter().getRegistry(); 125 | protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException 126 | { 127 | response.setContentType("text/html"); 128 | response.setStatus(HttpServletResponse.SC_OK); 129 | response.getWriter().println("Use Post"); 130 | } 131 | 132 | protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException 133 | { 134 | Gson gson = new Gson(); 135 | response.setStatus(HttpServletResponse.SC_OK); 136 | response.setContentType("application/json"); 137 | response.setHeader("Content-Encoding", "gzip"); 138 | TSDBReq q = gson.fromJson(request.getReader(), TSDBReq.class); 139 | 140 | try { 141 | OutputStream os = response.getOutputStream(); 142 | GZIPOutputStream gzip = new GZIPOutputStream(os); 143 | JsonWriter jw = new JsonWriter(new OutputStreamWriter(gzip, "UTF-8")); 144 | jw.beginArray(); 145 | long _start = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 146 | List tsArr = new ArrayList(); 147 | long end = q.getEnd(); 148 | if (end == 0) { 149 | end = System.currentTimeMillis(); 150 | } 151 | final long endms = end; 152 | long start = q.getStart(); 153 | 154 | List tsdbQueries = q.getQuery(); 155 | for(TSDBQueries tsdbQuery: tsdbQueries) 156 | { 157 | TSDBQueries tq = tsdbQuery; 158 | 159 | Topo topo = new Topo(); 160 | HashMap tagsIn = tq.getTags(); 161 | tagsIn.entrySet().stream().forEach(entry-> { 162 | topo.add(entry.getKey(), entry.getValue()); 163 | }); 164 | 165 | Aggregators.fromString(tq.getAggregator()); 166 | 167 | TSKey tsKey = new TSKey(Aggregators.fromString(tq.getAggregator()), tq.getMetric(), topo); 168 | // tsAvg._hash(); 169 | tsArr.add(tsKey); 170 | 171 | logger.info("====ts access input begin===="); 172 | logger.info("start:" + q.getStart() + ", end:" + end); 173 | logger.info("agg:metric -> " + tq.getAggregator() + ":" + tq.getMetric()); 174 | logger.info("====ts access input end===="); 175 | } 176 | 177 | List>> cfs = new ArrayList>>(); 178 | 179 | for(TSKey _tsKey: tsArr) { 180 | CompletableFuture> cf = taggedTS.getData(q.getStart()/1000, end/1000, _tsKey); 181 | cfs.add(cf); 182 | } 183 | 184 | logger.info("Waiting multiple metrics results.."); 185 | 186 | //List dps = cf1.join(); 187 | //System.out.println(dps); 188 | 189 | CompletableFuture.allOf(cfs.toArray(new CompletableFuture[cfs.size()])).thenAccept(x -> { 190 | cfs.stream().forEach(metric -> { 191 | List series = metric.join(); 192 | series.stream().forEach(dps-> { 193 | TSKey tsKey = dps.getMeta(); 194 | StringBuilder sb = new StringBuilder(); 195 | try { 196 | jw.beginObject(); 197 | 198 | Iterator> idps = dps.getDps().iterator(); 199 | 200 | jw.name("metric"); 201 | jw.value(tsKey.getMetric()); 202 | 203 | jw.name("aggregator"); 204 | jw.value(tsKey.getAggregator().getAggregator()); 205 | 206 | jw.name("tags"); 207 | jw.beginObject(); 208 | List> tags = tsKey.getTags(); 209 | for(Pair tag : tags) { 210 | jw.name(tag.first); 211 | jw.value(tag.second); 212 | } 213 | jw.endObject(); 214 | 215 | jw.name("dps"); 216 | jw.beginObject(); 217 | while(idps.hasNext()) { 218 | Pair dp = idps.next(); 219 | long ms = dp.first * 1000; 220 | // System.out.println(ms+" start:"+ start + ", end:" + endms); 221 | if (start <= ms && ms <= endms) { 222 | sb.setLength(0); 223 | sb.append(dp.first); 224 | jw.name(sb.toString()); 225 | jw.value(dp.second); 226 | 227 | } 228 | } 229 | jw.endObject(); 230 | jw.endObject(); 231 | } catch (IOException e) { 232 | // TODO Auto-generated catch block 233 | e.printStackTrace(); 234 | } 235 | }); 236 | }); 237 | long _end = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 238 | logger.info("Overall TimeTaken:" + (_end - _start) + "(micros)"); 239 | try { 240 | jw.endArray(); 241 | jw.flush(); 242 | gzip.finish(); 243 | jw.close(); 244 | } catch (IOException e) { 245 | // TODO Auto-generated catch block 246 | e.printStackTrace(); 247 | } 248 | //gson.toJson(q, TSDBReq.class, jw); 249 | 250 | }).exceptionally(tw->{ 251 | return null; 252 | }).join(); 253 | 254 | } catch(Exception e) { 255 | // slf4jLogger.info(StringUtils.traceToString(e)); 256 | } 257 | } 258 | } 259 | 260 | public static void addServeletHander(ServletContextHandler context, TaggedTS taggedTS) { 261 | GrafanaTSDB2RequestHandler.taggedTS = taggedTS; 262 | context.addServlet(new ServletHolder(new Tsdb2QueryServlet()), "/tsdb/api/query"); 263 | context.addServlet(new ServletHolder(new Tsdb2QueryAggServlet()), "/tsdb/api/aggregators"); 264 | context.addServlet(new ServletHolder(new Tsdb2QuerySuggestServlet()), "/tsdb/api/suggest"); 265 | } 266 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/qs/HttpIngesterHandler.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.qs; 2 | 3 | import java.io.IOException; 4 | import java.io.InputStreamReader; 5 | 6 | import javax.servlet.ServletException; 7 | import javax.servlet.http.HttpServlet; 8 | import javax.servlet.http.HttpServletRequest; 9 | import javax.servlet.http.HttpServletResponse; 10 | 11 | import org.eclipse.jetty.servlet.ServletContextHandler; 12 | import org.eclipse.jetty.servlet.ServletHolder; 13 | import org.wowcoders.tspark.TS; 14 | import org.wowcoders.tspark.models.Aggregators; 15 | import org.wowcoders.tspark.models.TSKey; 16 | import org.wowcoders.tspark.models.Topo; 17 | 18 | import com.google.gson.stream.JsonReader; 19 | 20 | public class HttpIngesterHandler { 21 | @SuppressWarnings("unused") 22 | private static TaggedTS taggedTS = null; 23 | 24 | private static TS tsCli = null; 25 | 26 | public static void addServeletHander(ServletContextHandler context, TaggedTS taggedTS, TS tsCli) { 27 | HttpIngesterHandler.taggedTS = taggedTS; 28 | HttpIngesterHandler.tsCli = tsCli; 29 | 30 | context.addServlet(new ServletHolder(new InternalsUpload()), "/internals/api/datapointsupload"); 31 | } 32 | 33 | @SuppressWarnings("serial") 34 | public static class InternalsUpload extends HttpServlet 35 | { 36 | protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException 37 | { 38 | response.setStatus(HttpServletResponse.SC_OK); 39 | 40 | Topo topo = new Topo(); 41 | TSKey tsKey = new TSKey(topo); 42 | 43 | JsonReader jr = new JsonReader(new InputStreamReader(request.getInputStream(), "UTF-8")); 44 | jr.beginArray(); 45 | while (jr.hasNext()) { 46 | jr.beginObject(); 47 | String metric = null; 48 | String aggregator = null; 49 | while (jr.hasNext()) { 50 | String name = jr.nextName(); 51 | if (name.equals("aggregator")) { 52 | aggregator = jr.nextString(); 53 | System.out.println(aggregator); 54 | } else if (name.equals("metric")) { 55 | metric = jr.nextString(); 56 | System.out.println(metric); 57 | } else if (name.equals("tags")) { 58 | jr.beginObject(); 59 | tsKey.setMetric(metric); 60 | tsKey.setAggregator(Aggregators.fromString(aggregator)); 61 | tsKey.clear(); 62 | while (jr.hasNext()) { 63 | String tagk = jr.nextName(); 64 | String tagv = jr.nextString(); 65 | System.out.println(tagk+" "+tagv); 66 | topo.add(tagk, tagv); 67 | } 68 | jr.endObject(); 69 | } else if (name.equals("dps")) { 70 | jr.beginObject(); 71 | while (jr.hasNext()) { 72 | String ts = jr.nextName(); 73 | long ut = Long.parseLong(ts); 74 | double value = jr.nextDouble(); 75 | System.out.println(ut+" "+value); 76 | tsCli.pushTS(tsKey, ut, value); 77 | } 78 | jr.endObject(); 79 | } else { 80 | jr.skipValue(); 81 | } 82 | } 83 | jr.endObject(); 84 | } 85 | jr.endArray(); 86 | jr.close(); 87 | } 88 | } 89 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/qs/Server.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.qs; 2 | 3 | 4 | import org.apache.commons.cli.CommandLine; 5 | import org.apache.commons.cli.CommandLineParser; 6 | import org.apache.commons.cli.HelpFormatter; 7 | import org.apache.commons.cli.Option; 8 | import org.apache.commons.cli.Options; 9 | import org.apache.commons.cli.PosixParser; 10 | import org.eclipse.jetty.server.Connector; 11 | import org.eclipse.jetty.server.nio.*; 12 | import org.eclipse.jetty.servlet.FilterHolder; 13 | import org.eclipse.jetty.servlet.FilterMapping; 14 | import org.eclipse.jetty.servlet.ServletContextHandler; 15 | import org.eclipse.jetty.servlets.CrossOriginFilter; 16 | import org.wowcoders.beringeiclient.BeringeiClient; 17 | import org.wowcoders.tspark.TS; 18 | import org.wowcoders.tspark.configurations.Configuration; 19 | import org.wowcoders.tspark.models.Topo; 20 | import org.wowcoders.tspark.tags.MetaDataCollector; 21 | 22 | public class Server { 23 | static org.eclipse.jetty.server.Server server = new org.eclipse.jetty.server.Server(); 24 | static BeringeiClient client = null; 25 | static TaggedTS taggedTS = null; 26 | 27 | static TS tsCli = null; 28 | 29 | private static void setAccessControlAllowOrigin(ServletContextHandler context) { 30 | FilterHolder holder = new FilterHolder(CrossOriginFilter.class); 31 | holder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, "*"); 32 | //holder.setInitParameter(CrossOriginFilter.ACCESS_CONTROL_ALLOW_ORIGIN_HEADER, "http://127.0.0.1:4200"); 33 | holder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, "GET,POST,PUT,HEAD"); 34 | holder.setInitParameter(CrossOriginFilter.ALLOWED_HEADERS_PARAM, "X-Requested-With,Content-Type,Accept,Origin"); 35 | holder.setName("cross-origin"); 36 | FilterMapping fm = new FilterMapping(); 37 | fm.setFilterName("cross-origin"); 38 | fm.setPathSpec("*"); 39 | context.getServletHandler().addFilter(holder, fm); 40 | } 41 | 42 | private static void listenQS() throws Exception { 43 | Configuration cfg = Configuration.getInstnace(); 44 | 45 | if (cfg.getApiConfig().isEnabled()) { 46 | SelectChannelConnector connector1 = new SelectChannelConnector(); 47 | connector1.setHost(cfg.getApiConfig().getListenAddress().first); 48 | connector1.setPort(cfg.getApiConfig().getListenAddress().second); 49 | connector1.setThreadPool(new org.eclipse.jetty.util.thread.QueuedThreadPool(cfg.getApiConfig().getThreadsCnt())); 50 | connector1.setName("topo-query"); 51 | 52 | server.setConnectors(new Connector[]{ connector1 }); 53 | 54 | 55 | ServletContextHandler context=new ServletContextHandler(ServletContextHandler.NO_SESSIONS); 56 | server.setHandler(context); 57 | 58 | setAccessControlAllowOrigin(context); 59 | 60 | GrafanaTSDB2RequestHandler.addServeletHander(context, taggedTS); 61 | 62 | HttpIngesterHandler.addServeletHander(context, taggedTS, tsCli); 63 | 64 | server.start(); 65 | } 66 | } 67 | 68 | public static void main(String []args) throws Exception { 69 | 70 | String configFileName = null; 71 | Options options = new Options(); 72 | Option optThreads = new Option("c", "config-file", true, "a config file with config value."); 73 | options.addOption(optThreads); 74 | 75 | CommandLineParser parser = new PosixParser(); 76 | HelpFormatter formatter = new HelpFormatter(); 77 | CommandLine cmd = null; 78 | try { 79 | cmd = parser.parse(options, args); 80 | } catch (org.apache.commons.cli.ParseException e1) { 81 | // TODO Auto-generated catch block 82 | e1.printStackTrace(); 83 | formatter.printHelp("Server", options); 84 | } 85 | 86 | String _propertyFile = cmd.getOptionValue("c"); 87 | 88 | if (_propertyFile != null) { 89 | configFileName = _propertyFile; 90 | } 91 | 92 | org.wowcoders.beringeiclient.configurations.Configuration.init(configFileName); 93 | Configuration.init(configFileName); 94 | 95 | @SuppressWarnings("unused") 96 | Configuration cfg = Configuration.getInstnace(); 97 | 98 | MetaDataCollector.init(); 99 | 100 | client = new BeringeiClient(); 101 | taggedTS = new TaggedTS(client); 102 | tsCli = new TS(client); 103 | 104 | new Thread() {// second metric emitter 105 | public void run() { 106 | while(true) { 107 | int hosts = 10; 108 | 109 | long ms = System.currentTimeMillis() / 1000; 110 | 111 | for(int host = 1; host <= hosts; host++) { 112 | Topo topo = new Topo(); 113 | topo.add("pool", "login"); 114 | topo.add("datacenter", "aws-location1"); 115 | topo.add("host", "loginhost" + host); 116 | int count = (int) (Math.random() *100); 117 | tsCli.addTSCnt("logincount", topo, ms, count); 118 | } 119 | 120 | tsCli.flush(); 121 | try { 122 | Thread.sleep(1000); 123 | } catch (InterruptedException e) { 124 | break; 125 | } 126 | } 127 | } 128 | }.start(); 129 | 130 | listenQS(); 131 | server.join(); 132 | } 133 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/qs/TSParkQSResponse.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.qs; 2 | 3 | import java.util.List; 4 | 5 | import org.wowcoders.tspark.models.TSKey; 6 | import org.wowcoders.tspark.utils.Pair; 7 | 8 | public class TSParkQSResponse { 9 | String key; 10 | TSKey meta; 11 | List> dps; 12 | 13 | public TSParkQSResponse(String key, TSKey meta, List> dps) { 14 | this.key = key; 15 | this.meta = meta; 16 | this.dps = dps; 17 | } 18 | 19 | public String getKey() { 20 | return key; 21 | } 22 | 23 | public void setKey(String key) { 24 | this.key = key; 25 | } 26 | 27 | public TSKey getMeta() { 28 | return meta; 29 | } 30 | 31 | public void setMeta(TSKey meta) { 32 | this.meta = meta; 33 | } 34 | 35 | public List> getDps() { 36 | return dps; 37 | } 38 | 39 | public void setDps(List> dps) { 40 | this.dps = dps; 41 | } 42 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/qs/TSParkQueryInput.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.qs; 2 | 3 | import java.util.HashMap; 4 | 5 | import org.wowcoders.tspark.models.Aggregators; 6 | import org.wowcoders.tspark.models.TSKey; 7 | import org.wowcoders.tspark.models.Topo; 8 | import org.wowcoders.tspark.utils.Pair; 9 | 10 | import com.facebook.beringei.thriftclient.Key; 11 | 12 | public class TSParkQueryInput { 13 | Key key; 14 | Topo topoActual; 15 | Aggregators agg; 16 | TSKey topoQuery; 17 | Topo groupBy = null; 18 | 19 | TSParkQueryInput(Key key, Topo topoActual, Aggregators agg, TSKey topoQuery) { 20 | this.key = key; 21 | this.topoActual = topoActual; 22 | this.agg = agg; 23 | this.topoQuery = topoQuery; 24 | try { 25 | this.groupBy = ((Topo)topoQuery).clone(); 26 | HashMap > map = new HashMap >(); 27 | this.groupBy.getTags().stream().filter(p->p.second.equals("*")).forEach(p->{ 28 | map.put(p.first, p); 29 | }); 30 | if (map.size() > 0) { 31 | topoActual.getTags().stream().forEach(p->{ 32 | if (map.containsKey(p.first)) { 33 | Pair pair = map.get(p.first); 34 | pair.second = p.second; 35 | } 36 | }); 37 | this.groupBy.forceHash(); 38 | 39 | this.topoQuery = new TSKey(topoQuery.getAggregator(), 40 | topoQuery.getMetric(), this.groupBy); 41 | this.topoQuery.setNamespace(topoQuery.getNamespace()); 42 | } 43 | } catch (CloneNotSupportedException e) { 44 | e.printStackTrace(); 45 | } 46 | } 47 | 48 | public Key getKey() { 49 | return key; 50 | } 51 | public void setKey(Key key) { 52 | this.key = key; 53 | } 54 | public Topo getTopoActual() { 55 | return topoActual; 56 | } 57 | public void setTopoActual(Topo topoActual) { 58 | this.topoActual = topoActual; 59 | } 60 | public Aggregators getAgg() { 61 | return agg; 62 | } 63 | public void setAgg(Aggregators agg) { 64 | this.agg = agg; 65 | } 66 | 67 | public TSKey getTopoQuery() { 68 | return topoQuery; 69 | } 70 | public Topo getGroupBy() { 71 | return groupBy; 72 | } 73 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/qs/TaggedTS.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.qs; 2 | 3 | import java.math.BigInteger; 4 | import java.util.ArrayList; 5 | import java.util.Collection; 6 | import java.util.Comparator; 7 | import java.util.DoubleSummaryStatistics; 8 | import java.util.List; 9 | import java.util.Map; 10 | import java.util.Map.Entry; 11 | import java.util.concurrent.CompletableFuture; 12 | import java.util.concurrent.ConcurrentHashMap; 13 | import java.util.concurrent.TimeUnit; 14 | import java.util.function.Function; 15 | import java.util.stream.Collectors; 16 | 17 | import org.slf4j.Logger; 18 | import org.slf4j.LoggerFactory; 19 | import org.wowcoders.beringeiclient.BeringeiClient; 20 | import org.wowcoders.tspark.models.Aggregators; 21 | import org.wowcoders.tspark.models.TSKey; 22 | import org.wowcoders.tspark.models.Topo; 23 | import org.wowcoders.tspark.tags.AtomixDistributedStore; 24 | import org.wowcoders.tspark.utils.Pair; 25 | 26 | import com.facebook.beringei.thriftclient.DataPoint; 27 | import com.facebook.beringei.thriftclient.Key; 28 | 29 | public class TaggedTS { 30 | final static Logger logger = LoggerFactory.getLogger(TaggedTS.class); 31 | BeringeiClient client = null; 32 | 33 | TaggedTS(BeringeiClient client) { 34 | this.client = client; 35 | } 36 | 37 | Collection getKeys(TSKey ts) { 38 | //form the return object with required keys for aggregation 39 | Topo topo = (Topo)ts; 40 | String key = topo.hashTopoWithNoStar(); 41 | String mHash = ts.metricHash(); 42 | logger.info("dimkeys for "+mHash+ ":"); 43 | logger.debug("finding dimkeys for metric hash: " + mHash + ":"); 44 | 45 | Collection dimKeys = AtomixDistributedStore.map2.get(mHash).join();/*TODO: hangs if not available*/ 46 | String dimsHash = (String)dimKeys.iterator().next();/*TODO: multiple topo for a metric*/ 47 | 48 | logger.debug("metric hash: "+mHash+ ", dimension hash: "+ dimsHash); 49 | 50 | StringBuilder sb = new StringBuilder(); 51 | sb.append(dimsHash); 52 | sb.append("_"); 53 | sb.append(key); 54 | sb.append("_"); 55 | sb.append(mHash); 56 | String lkey = sb.toString(); 57 | 58 | { 59 | long _start = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 60 | logger.info("key to find the row keys based on filters/tags given ->"+lkey); 61 | Collection rowkeys = AtomixDistributedStore.map3.get(lkey).join(); 62 | rowkeys.iterator().forEachRemaining(o -> { 63 | logger.debug("found row key: " + o); 64 | }); 65 | logger.info("number of row keys found: "+rowkeys.size()); 66 | 67 | long _end = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 68 | logger.info("time taken to fetch row keys:" + (_end - _start) + "micros"); 69 | 70 | return rowkeys; 71 | } 72 | } 73 | 74 | public CompletableFuture > berigeiQueryWrapper(long start, long end, List keys) { 75 | CompletableFuture> cfs = new CompletableFuture>(); 76 | List dps = new ArrayList(); 77 | long _start = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 78 | client.getDataPointsByKey(start, end, keys).thenAccept(result -> { 79 | if (result != null) { 80 | result.entrySet().stream().forEach(_pair -> { 81 | Entry> pair = _pair; 82 | List _dps = pair.getValue(); 83 | 84 | dps.addAll(_dps); 85 | }); 86 | logger.info("query complete"); 87 | } else { 88 | logger.error("query failed"); 89 | } 90 | long _end = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 91 | logger.info("Query Overall TimeTaken:" + (_end - _start) + "(micros)"); 92 | cfs.complete(dps); 93 | }).exceptionally((tw)-> { 94 | return null; 95 | }); 96 | 97 | return cfs; 98 | } 99 | 100 | Comparator> dpsCompartor = (o1, o2) -> { 101 | if (o1.second > o2.second) { 102 | return 1; 103 | } else if (o1.second < o2.second) { 104 | return -1; 105 | } else { 106 | return 0; 107 | } 108 | }; 109 | 110 | public static class AggregationMapper { 111 | private static Function, Pair> CNT = (e) -> { 112 | return new Pair((Long)e.getKey(), (double)e.getValue().getCount()); 113 | }; 114 | 115 | private static Function, Pair> SUM = (e) -> { 116 | return new Pair((Long)e.getKey(), e.getValue().getSum()); 117 | }; 118 | 119 | private static Function, Pair> AVG = (e) -> { 120 | return new Pair((Long)e.getKey(), e.getValue().getAverage()); 121 | }; 122 | 123 | private static Function, Pair> MAX = (e) -> { 124 | return new Pair((Long)e.getKey(), e.getValue().getMax()); 125 | }; 126 | 127 | private static Function, Pair> MIN = (e) -> { 128 | return new Pair((Long)e.getKey(), e.getValue().getMin()); 129 | }; 130 | 131 | public static Function, Pair> getMapper(Aggregators a) { 132 | switch(a) { 133 | case CNT: 134 | return AggregationMapper.CNT; 135 | case SUM: 136 | return AggregationMapper.SUM; 137 | case AVG: 138 | return AggregationMapper.AVG; 139 | case MAX: 140 | return AggregationMapper.MAX; 141 | case MIN: 142 | return AggregationMapper.MIN; 143 | } 144 | 145 | return AggregationMapper.CNT; 146 | } 147 | } 148 | 149 | public CompletableFuture > rollUpAggregation(List result, Map inputMap) { 150 | // ExecutorService pool = Executors.newFixedThreadPool(2); 151 | CompletableFuture> cfs = new CompletableFuture>(); 152 | 153 | long _start = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 154 | 155 | if (result != null) { 156 | List resp = new ArrayList(); 157 | Map> grpByTopo = result.stream().collect( 158 | Collectors.groupingBy(dp-> { 159 | String _key = dp.key.key; 160 | TSParkQueryInput q = inputMap.get(_key); 161 | 162 | String filter = q.getGroupBy()._hash(); 163 | return filter; 164 | }) 165 | ); 166 | grpByTopo.entrySet().stream().parallel().forEach(e ->{ 167 | List _dps = e.getValue(); 168 | if (_dps.size() > 0) { 169 | String _key = _dps.get(0).getKey().getKey(); 170 | TSParkQueryInput q = inputMap.get(_key); 171 | Map utToVal = null; 172 | 173 | utToVal = _dps.stream().parallel().collect(Collectors.groupingBy(dp-> { 174 | return dp.value.getUnixTime(); 175 | }, Collectors.summarizingDouble(dp->dp.value.getValue()))); 176 | 177 | List> dpsByKey = null; 178 | 179 | dpsByKey = utToVal.entrySet().stream() 180 | .parallel() 181 | .map(AggregationMapper.getMapper(q.getAgg())) 182 | .sorted(dpsCompartor) 183 | .collect(Collectors.toList()); 184 | 185 | TSParkQSResponse _resp = new TSParkQSResponse(_key, q.getTopoQuery(), dpsByKey); 186 | resp.add(_resp); 187 | } 188 | }); 189 | logger.info("rollup complete"); 190 | cfs.complete(resp); 191 | } else { 192 | logger.error("rollup failed"); 193 | cfs.complete(null); 194 | } 195 | long _end = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 196 | logger.info("rollup Overall TimeTaken:" + (_end - _start) + "(micros)"); 197 | 198 | return cfs; 199 | } 200 | 201 | //executes one m parameter 202 | CompletableFuture> getData(long start, 203 | long end, 204 | TSKey ts) { 205 | logger.info("getData working on query " + ts.topoString()); 206 | CompletableFuture> retDPSCF = new CompletableFuture>(); 207 | Map beringeiQIMap = new ConcurrentHashMap(); 208 | 209 | // ExecutorService pool = Executors.newFixedThreadPool(2); 210 | // TODO need further optimization 211 | 212 | List newList = new ArrayList(); 213 | 214 | //get the row keys 215 | Collection o = getKeys(ts); 216 | 217 | if (o.size() > 0) { 218 | o.stream().forEach(key -> { 219 | Object [] obj = new Object[] {key, ts}; 220 | newList.add(obj); 221 | }); 222 | } 223 | 224 | Map> groupTSKey = new ConcurrentHashMap>(); 225 | newList.stream().forEach(obj-> { 226 | String fullTopoKey = (String)obj[0]; 227 | 228 | long hashCode = fullTopoKey.hashCode(); 229 | long shardId = BigInteger.valueOf(hashCode).mod(BigInteger.valueOf(client.getShardCount())).intValue(); 230 | List byShardId = groupTSKey.get(shardId); 231 | if (byShardId == null) { 232 | byShardId = new ArrayList(); 233 | groupTSKey.put(shardId, byShardId); 234 | } 235 | byShardId.add(obj); 236 | 237 | /*a TOPO will get multiple shardIds if we add more hosts/shards into cache cluster */ 238 | Collection shardIds = AtomixDistributedStore.topoKeyToShardIds.get(fullTopoKey).join(); 239 | shardIds.stream().forEach(_shardId->{ 240 | if (!_shardId.equals(shardId)) { 241 | List _byShardId = groupTSKey.get(_shardId); 242 | if (_byShardId == null) { 243 | _byShardId = new ArrayList(); 244 | groupTSKey.put(shardId, _byShardId); 245 | } 246 | _byShardId.add(obj); 247 | } 248 | }); 249 | }); 250 | 251 | List dps = new ArrayList(); 252 | List>> cfs = new ArrayList>>(); 253 | for(Entry> tsArrGrouped: groupTSKey.entrySet()) { 254 | List keys = new ArrayList(); 255 | long shardId = tsArrGrouped.getKey(); 256 | for(Object[] obj: tsArrGrouped.getValue()) { 257 | Object tsKeyObj = obj[1]; 258 | Key key = new Key(); 259 | 260 | TSKey tsKey = (TSKey)tsKeyObj; 261 | 262 | String keyByTopo = (String) obj[0]; 263 | Topo tActual = (Topo) AtomixDistributedStore.topoMap.get(keyByTopo).join(); 264 | 265 | //TODO try to use if preaggregation avg,min,max,sum available tsKey.metricAggHash() or find out the standard way of doing this, right now we use cnt to calculate sum,avg,min,max 266 | String aggAvailableInDB = "cnt"; 267 | String calcAggMetricHash = TSKey.calcAggMetricHash(aggAvailableInDB, tsKey.getMetric()); 268 | 269 | key.key = tsKey.getNamespace() + "_" + calcAggMetricHash + "_" + keyByTopo; 270 | key.shardId = shardId; 271 | keys.add(key); 272 | 273 | Aggregators agg = tsKey.getAggregator(); 274 | TSParkQueryInput bwq = new TSParkQueryInput(key, tActual, agg, tsKey); 275 | beringeiQIMap.put(key.key, bwq); 276 | logger.debug("inquery:"+((TSKey)tsKeyObj).hash()+", key:"+ key); 277 | } 278 | CompletableFuture> cf = berigeiQueryWrapper(start, end, keys); 279 | cfs.add(cf); 280 | } 281 | logger.info("Total batches, runs by shards: " + cfs.size()); 282 | long _start = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 283 | CompletableFuture.allOf(cfs.toArray(new CompletableFuture[cfs.size()])).thenAccept((result) ->{ 284 | long _end = TimeUnit.NANOSECONDS.toMicros(System.nanoTime()); 285 | logger.info("Overall TimeTaken:" + (_end - _start) + "(micros)"); 286 | 287 | cfs.stream() 288 | .map(m->m.join()) 289 | .filter(_dps->_dps != null && _dps.size() > 0) 290 | .forEach(_dps->dps.addAll(_dps)); 291 | 292 | CompletableFuture > responsesCF = rollUpAggregation(dps, beringeiQIMap); 293 | responsesCF.thenAccept(responses->retDPSCF.complete(responses)); 294 | }); 295 | 296 | return retDPSCF; 297 | } 298 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/tags/AtomixDistributedStore.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.tags; 2 | 3 | import java.io.File; 4 | import java.util.ArrayList; 5 | import java.util.List; 6 | import java.util.concurrent.CompletableFuture; 7 | 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | import org.wowcoders.tspark.utils.Pair; 11 | 12 | import io.atomix.Atomix; 13 | import io.atomix.AtomixReplica; 14 | import io.atomix.catalyst.serializer.Serializer; 15 | import io.atomix.catalyst.transport.Address; 16 | import io.atomix.catalyst.transport.netty.NettyTransport; 17 | import io.atomix.collections.DistributedMap; 18 | import io.atomix.collections.DistributedMultiMap; 19 | import io.atomix.copycat.server.storage.Storage; 20 | import io.atomix.copycat.server.storage.StorageLevel; 21 | 22 | public class AtomixDistributedStore { 23 | final static Logger logger = LoggerFactory.getLogger(AtomixDistributedStore.class); 24 | 25 | static AtomixReplica atomix = null; 26 | 27 | public static DistributedMultiMap map2 = null; 28 | public static DistributedMultiMap map3 = null; 29 | 30 | public static DistributedMultiMap topoKeyToShardIds = null; 31 | 32 | public static DistributedMap topoMap = null; 33 | public static DistributedMap metrics = null; 34 | public static DistributedMap tagv = null; 35 | public static DistributedMap tagk = null; 36 | 37 | 38 | private static void addShutdownHooks() { 39 | Runtime.getRuntime().addShutdownHook(new Thread() { 40 | public void run() { 41 | try { 42 | if (atomix != null) { 43 | System.out.println("Shutting down"); 44 | } 45 | } catch (Exception e1) { 46 | e1.printStackTrace(); 47 | } 48 | } 49 | }); 50 | } 51 | 52 | public static CompletableFuture start( 53 | Pair listen, List> clusterInp) { 54 | addShutdownHooks(); 55 | 56 | List
cluster = new ArrayList
(); 57 | 58 | if (clusterInp != null) { 59 | clusterInp.stream().forEach(addr-> { 60 | if (!(listen.first.equals(addr.first) && listen.second.equals(addr.second))) { 61 | cluster.add(new Address(addr.first, addr.second)); 62 | } 63 | }); 64 | } 65 | 66 | Address address = null; 67 | address = new Address(listen.first, listen.second); 68 | 69 | String dirName = "./atomix-data/"; 70 | 71 | atomix = AtomixReplica.builder(address) 72 | .withType(AtomixReplica.Type.ACTIVE) 73 | .withTransport(NettyTransport.builder().withThreads(4).build()) 74 | .withSerializer(new Serializer().disableWhitelist()) 75 | .withStorage(Storage.builder() 76 | .withDirectory(new File(dirName)) 77 | .withStorageLevel(StorageLevel.MAPPED) 78 | .build()).build(); 79 | 80 | CompletableFuture cf = atomix.bootstrap(cluster); 81 | Thread t = new Thread() { 82 | @Override 83 | public void run() { 84 | while (!cf.isDone()) { 85 | try { 86 | Thread.sleep(1000); 87 | } catch (InterruptedException e) { 88 | e.printStackTrace(); 89 | break; 90 | } 91 | } 92 | if (cf.isDone()) { 93 | metrics = atomix.getMap("beringei-metrics").join(); 94 | tagk = atomix.getMap("beringei-tagk").join(); 95 | tagv = atomix.getMap("beringei-tagv").join(); 96 | 97 | topoMap = atomix.getMap("beringei-topo-key-to-topo-map").join(); 98 | topoKeyToShardIds = atomix.getMultiMap("beringei-topo-key-to-shard-ids").join(); 99 | map2 = atomix.getMultiMap("beringei-metrickey-to-dimskey-map").join(); 100 | map3 = atomix.getMultiMap("beringei-topokey-dimskey-metrickey-to-keys-map").join(); 101 | 102 | logger.info("Got the atomix map references."); 103 | } 104 | } 105 | }; 106 | t.start(); 107 | return cf; 108 | } 109 | 110 | public static Atomix getInstance() { 111 | return atomix; 112 | } 113 | 114 | public static void main(String []args) throws Throwable { 115 | start(new Pair("0.0.0.0", 41292), null).join(); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/tags/AtomixReader.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.tags; 2 | 3 | import java.util.List; 4 | import java.util.concurrent.CompletableFuture; 5 | import java.util.stream.Collectors; 6 | 7 | import org.wowcoders.tspark.models.Topo; 8 | 9 | import io.atomix.Atomix; 10 | import io.atomix.collections.DistributedMultiMap; 11 | 12 | public class AtomixReader { 13 | public CompletableFuture> findKeys(Topo t) { 14 | CompletableFuture> cf = new CompletableFuture>(); 15 | 16 | Atomix atomix = AtomixDistributedStore.getInstance(); 17 | 18 | DistributedMultiMap map1 = atomix.getMultiMap("beringei-topo-key-to-keys-map").join(); 19 | DistributedMultiMap map2 = atomix.getMultiMap("beringei-topo-dimskey-to-keys-map").join(); 20 | 21 | map1.get(t.hash()).thenAcceptAsync(_list1-> { 22 | if (_list1 != null) { 23 | map2.get(t.hash()).thenAcceptAsync(_list2-> { 24 | if (_list2 != null) { 25 | List intersect = _list1.stream() 26 | .filter(_list2::contains) 27 | .map(s -> (String)s) 28 | .collect(Collectors.toList()); 29 | cf.complete(intersect); 30 | } 31 | }).exceptionally(tw -> { 32 | cf.completeExceptionally(tw); 33 | return null; 34 | }); 35 | } 36 | }).exceptionally(tw -> { 37 | cf.completeExceptionally(tw); 38 | return null; 39 | }); 40 | 41 | return cf; 42 | } 43 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/tags/AtomixWriter.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.tags; 2 | 3 | import java.time.Duration; 4 | import java.util.concurrent.CompletableFuture; 5 | 6 | import io.atomix.Atomix; 7 | import io.atomix.collections.DistributedMap; 8 | 9 | public class AtomixWriter implements WriterInterface { 10 | @Override 11 | public CompletableFuture bulkUpsert() { 12 | CompletableFuture cf = new CompletableFuture(); 13 | Atomix atomix = AtomixDistributedStore.getInstance(); 14 | DistributedMap map = atomix.getMap("beringei-topo").join(); 15 | map.put("", 16 | "", 17 | Duration.ofDays(8)); 18 | return cf; 19 | } 20 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/tags/MetaDataCollector.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.tags; 2 | 3 | import java.time.Duration; 4 | import java.util.List; 5 | import java.util.Map; 6 | import java.util.Map.Entry; 7 | import java.util.Timer; 8 | import java.util.TimerTask; 9 | import java.util.concurrent.ConcurrentHashMap; 10 | 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | import org.wowcoders.tspark.configurations.Configuration; 14 | import org.wowcoders.tspark.models.Topo; 15 | 16 | public class MetaDataCollector { 17 | final static Logger logger = LoggerFactory.getLogger(MetaDataCollector.class); 18 | 19 | static Map all = new ConcurrentHashMap(); 20 | static Map mapKeysToTopo = new ConcurrentHashMap(); 21 | static Map> topoKeysToMetricKeys = new ConcurrentHashMap>(); 22 | static Map topoKeysToShardIds = new ConcurrentHashMap(); 23 | 24 | public static void buildIndex(long shardId, 25 | Topo t, 26 | String metric, 27 | String mHash) { 28 | String thash = t._hash(); 29 | 30 | all.put(thash, t); 31 | 32 | if (t.updateRequired()) { 33 | if (!mapKeysToTopo.containsKey(thash)) { 34 | mapKeysToTopo.put(thash, t); 35 | } 36 | } 37 | 38 | if (!topoKeysToMetricKeys.containsKey(thash)) { 39 | topoKeysToMetricKeys.put(thash, new ConcurrentHashMap()); 40 | } 41 | 42 | ConcurrentHashMap mmap = topoKeysToMetricKeys.get(thash); 43 | if (mmap != null) { 44 | mmap.put(mHash, metric); 45 | } 46 | 47 | topoKeysToShardIds.put(thash, shardId); 48 | } 49 | 50 | public static void init() { 51 | Configuration cfg = Configuration.getInstnace(); 52 | AtomixDistributedStore.start(cfg.getTagsCacheConfig().getListenAddress(), cfg.getTagsCacheConfig().getCluster()).join(); 53 | Timer timer = new Timer(); 54 | timer.scheduleAtFixedRate(new TimerTask() { 55 | @Override 56 | public void run() { 57 | flush(); 58 | } 59 | }, 5000, 60000); 60 | } 61 | 62 | public static Thread flush() { 63 | Thread t = new Thread() { 64 | @Override 65 | public void run() { 66 | Configuration cfg = Configuration.getInstnace(); 67 | long cacheTTL = cfg.getTagsCacheConfig().getTtl(); 68 | final Map mapPrevKeysToTopo = mapKeysToTopo; 69 | mapKeysToTopo = new ConcurrentHashMap(); 70 | final Map> prevTopoKeysToMetricKeys = topoKeysToMetricKeys; 71 | final Map prevTopoKeysToShardIds = topoKeysToShardIds; 72 | topoKeysToMetricKeys = new ConcurrentHashMap>(); 73 | topoKeysToShardIds = new ConcurrentHashMap(); 74 | 75 | /* write tagk, tagv for suggest */ 76 | for(Entry entry : mapPrevKeysToTopo.entrySet()) { 77 | Topo t = entry.getValue(); 78 | String key = t._hash(); 79 | AtomixDistributedStore.topoMap.put(key, t, Duration.ofDays(8)); 80 | t.getTags().stream().forEach(pair -> { 81 | AtomixDistributedStore.tagk.put(pair.first, true, Duration.ofDays(8)); 82 | AtomixDistributedStore.tagv.put(pair.second, true, Duration.ofDays(8)); 83 | }); 84 | 85 | /*List []indexes = t.getIndexes(); 86 | for(String index : indexes[0]) { 87 | //System.out.println("MapsToInsert:"+index+":"+key); 88 | AtomixDistributedStore.map1.put(index, key, Duration.ofDays(8)); 89 | } 90 | for(String index : indexes[1]) { 91 | // System.out.println("MapsToInsert:"+index+":"+key); 92 | AtomixDistributedStore.map2.put(index, key, Duration.ofDays(8)); 93 | }*/ 94 | } 95 | 96 | /* TODO trigram for regex on values */ 97 | 98 | StringBuilder sb = new StringBuilder(); 99 | for(Entry> entry :prevTopoKeysToMetricKeys.entrySet()) { 100 | String tkey = entry.getKey(); 101 | 102 | Topo t = all.get(tkey); 103 | String hashDims = t._hashDims(); 104 | sb.append(hashDims); 105 | sb.append("_"); 106 | int length1 = sb.length(); 107 | List []indexes = t.getIndexes(); 108 | for(String index : indexes[0]) { 109 | sb.append(index); 110 | sb.append("_"); 111 | 112 | int length2 = sb.length(); 113 | entry.getValue().entrySet().stream().forEach(info -> { 114 | String mkey = info.getKey(); 115 | AtomixDistributedStore.map2.put(mkey, hashDims, Duration.ofMillis(cacheTTL)); 116 | sb.append(mkey); 117 | AtomixDistributedStore.map3.put(sb.toString(), tkey, Duration.ofMillis(cacheTTL)); 118 | logger.debug("inserted topo key ->"+ tkey+", filter key: "+ sb); 119 | sb.setLength(length2); 120 | 121 | AtomixDistributedStore.metrics.put(info.getValue(), true, Duration.ofMillis(cacheTTL)); 122 | }); 123 | sb.setLength(length1); 124 | } 125 | 126 | sb.setLength(0); 127 | } 128 | 129 | for(Entry entry :prevTopoKeysToShardIds.entrySet()) { 130 | logger.debug("inserted topo key to shardid ->"+ entry.getKey() +", shard keyid: "+ entry.getValue()); 131 | //TODO store hosts served the shardid ? 132 | //need to decide how we are going linearly scale. 133 | //we need to store host only if we are adding new boxes and move the shardid to new boxes. 134 | //while adding new boxes, if we have the hosts fixed for shardsid and we just assign the new range 135 | AtomixDistributedStore.topoKeyToShardIds.put(entry.getKey(), entry.getValue(), Duration.ofMillis(cacheTTL)); 136 | } 137 | } 138 | }; 139 | t.start(); 140 | return t; 141 | } 142 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/tags/ReaderInterface.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.tags; 2 | 3 | import java.util.List; 4 | import java.util.concurrent.CompletableFuture; 5 | 6 | import org.wowcoders.tspark.models.Topo; 7 | 8 | public interface ReaderInterface { 9 | CompletableFuture bulkRead(); 10 | 11 | //find topo matching key 12 | //short topokey with dims key available 13 | //find the topokeys tied to the metric 14 | 15 | public CompletableFuture> findKeys(Topo t); 16 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/tags/WriterInterface.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.tags; 2 | 3 | import java.util.concurrent.CompletableFuture; 4 | 5 | public interface WriterInterface { 6 | CompletableFuture bulkUpsert(); 7 | 8 | //write short topokey with dims key available to rowkey 9 | //find the topokeys tied to the metric 10 | //store values 11 | //store keys 12 | //api_aggregators 13 | //api_config_filters 14 | //api_query 15 | //rowkey to -> topo 16 | //rowkey to -> topo multimap 17 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/utils/Combinations.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.utils; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Collections; 5 | import java.util.LinkedList; 6 | import java.util.List; 7 | 8 | public class Combinations { 9 | public static List> combination(List values, int size) { 10 | if (0 == size) { 11 | return Collections.singletonList(Collections. emptyList()); 12 | } 13 | 14 | if (values.isEmpty()) { 15 | return Collections.emptyList(); 16 | } 17 | 18 | List> combination = new LinkedList>(); 19 | 20 | T actual = values.iterator().next(); 21 | 22 | List subSet = new LinkedList(values); 23 | subSet.remove(actual); 24 | 25 | List> subSetCombination = combination(subSet, size - 1); 26 | 27 | for (List set : subSetCombination) { 28 | List newSet = new LinkedList(set); 29 | newSet.add(0, actual); 30 | combination.add(newSet); 31 | } 32 | 33 | combination.addAll(combination(subSet, size)); 34 | 35 | return combination; 36 | } 37 | 38 | public static void main(String []args) { 39 | List > tags = new ArrayList>(); 40 | tags.add(new Pair("pool", "login")); 41 | tags.add(new Pair("colo", "lvs")); 42 | tags.add(new Pair("host", "login00001")); 43 | 44 | Collections.sort(tags, (o1, o2) -> o1.first.compareTo(o2.first)); 45 | 46 | for(int i = 1; i<= tags.size(); i++) { 47 | List>> s = combination(tags, i); 48 | System.out.println(s); 49 | } 50 | } 51 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/utils/Hash.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.utils; 2 | 3 | import com.facebook.util.digest.MurmurHash; 4 | 5 | public class Hash { 6 | static MurmurHash mh = MurmurHash.createRepeatableHasher(); 7 | public static String bytesToHex(byte[] bytes) { 8 | final char[] hexArray = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}; 9 | char[] hexChars = new char[bytes.length * 2]; 10 | int v; 11 | for ( int j = 0; j < bytes.length; j++ ) { 12 | v = bytes[j] & 0xFF; 13 | hexChars[j * 2] = hexArray[v >>> 4]; 14 | hexChars[j * 2 + 1] = hexArray[v & 0x0F]; 15 | } 16 | return new String(hexChars); 17 | } 18 | 19 | public static String hash(String str) { 20 | byte [] hashByets = mh.hash(str.getBytes()); 21 | return bytesToHex(hashByets); 22 | } 23 | 24 | public static long hashCode(String str) { 25 | return mh.hashToLong(str.getBytes()); 26 | } 27 | } -------------------------------------------------------------------------------- /TSPark/src/main/java/org/wowcoders/tspark/utils/Pair.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.tspark.utils; 2 | 3 | import java.io.Serializable; 4 | import java.util.Objects; 5 | 6 | 7 | /** 8 | * Container to ease passing around a tuple of two objects. This object provides a sensible 9 | * implementation of equals(), returning true if equals() is true on each of the contained 10 | * objects. 11 | */ 12 | public class Pair implements Serializable, Cloneable { 13 | /** 14 | * 15 | */ 16 | private static final long serialVersionUID = 3976309530595355401L; 17 | 18 | public F first; 19 | public S second; 20 | 21 | /** 22 | * Constructor for a Pair. 23 | * 24 | * @param first the first object in the Pair 25 | * @param second the second object in the pair 26 | */ 27 | public Pair(F first, S second) { 28 | this.first = first; 29 | this.second = second; 30 | } 31 | 32 | /** 33 | * Checks the two objects for equality by delegating to their respective 34 | * {@link Object#equals(Object)} methods. 35 | * 36 | * @param o the {@link Pair} to which this one is to be checked for equality 37 | * @return true if the underlying objects of the Pair are both considered 38 | * equal 39 | */ 40 | @Override 41 | public boolean equals(Object o) { 42 | if (!(o instanceof Pair)) { 43 | return false; 44 | } 45 | Pair p = (Pair) o; 46 | return Objects.equals(p.first, first) && Objects.equals(p.second, second); 47 | } 48 | 49 | /** 50 | * Compute a hash code using the hash codes of the underlying objects 51 | * 52 | * @return a hashcode of the Pair 53 | */ 54 | @Override 55 | public int hashCode() { 56 | return (first == null ? 0 : first.hashCode()) ^ (second == null ? 0 : second.hashCode()); 57 | } 58 | 59 | /** 60 | * Convenience method for creating an appropriately typed pair. 61 | * @param a the first object in the Pair 62 | * @param b the second object in the pair 63 | * @return a Pair that is templatized with the types of a and b 64 | */ 65 | public static Pair create(A a, B b) { 66 | return new Pair(a, b); 67 | } 68 | 69 | public Pair clone() throws CloneNotSupportedException { 70 | return Pair.create(this.first, this.second); 71 | } 72 | } -------------------------------------------------------------------------------- /TSPark/src/main/resources/berigeiclient-demo.properties: -------------------------------------------------------------------------------- 1 | #TODO define better names for properties 2 | 3 | #clients-by-shards(currently supported async count is 1) 4 | #TODO 5 | # 1. add support for tls 6 | # 2. make atomix as ts store 7 | clients.connect-timeout-ms=100 8 | clients.read-timeout-ms=40000 9 | clients.write-timeout-ms=40000 10 | clients.active-idle-connections-per-shards=1 11 | clients.timeout-to-close-idle-connections-in-seconds=40 12 | clients.connection-count-per-shards=10 13 | clients.count-of-async-operations-allowed-per-connection=1 14 | #supported shards total endpoints/shards 15 | # TODO. config update using admin ui 16 | clients.cluster-shards-count=100 17 | # TODO. add support for following 18 | clients.cluster-even-endpoint-as-backup-store=false 19 | clients.datacenters=dcntr1 20 | clients.dcntr1.cluster-endpoints=10.180.17.77:9999,10.180.16.207:9999 21 | #metrics flush to servers(we flush metrics to server if one of the following becomes true) 22 | clients.flush.when-metric-count-in-queue=1000 23 | clients.flush.interval=10000 24 | #threads 25 | clients.read-threads=20 26 | clients.write-threads=10 27 | #TODO retry/retry queue 28 | 29 | #shards 30 | #TODO add support for the following properties 31 | shards.multi-datacenter-write=false 32 | shards.write-on-all-hosts-in-same-datacenter=false 33 | -------------------------------------------------------------------------------- /TSPark/src/main/resources/grafana.login.rollup.sample.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": 8, 3 | "title": "rollup agg test", 4 | "originalTitle": "rollup agg test", 5 | "tags": [], 6 | "style": "dark", 7 | "timezone": "browser", 8 | "editable": true, 9 | "hideControls": false, 10 | "sharedCrosshair": false, 11 | "rows": [ 12 | { 13 | "collapse": false, 14 | "editable": true, 15 | "height": "250px", 16 | "panels": [ 17 | { 18 | "aliasdatacenterrs": {}, 19 | "bars": false, 20 | "datasource": "beringeil_local", 21 | "editable": true, 22 | "error": false, 23 | "fill": 1, 24 | "grid": { 25 | "leftLogBase": 1, 26 | "leftMax": null, 27 | "leftMin": null, 28 | "rightLogBase": 1, 29 | "rightMax": null, 30 | "rightMin": null, 31 | "threshold1": null, 32 | "threshold1datacenterr": "rgba(216, 200, 27, 0.27)", 33 | "threshold2": null, 34 | "threshold2datacenterr": "rgba(234, 112, 112, 0.22)" 35 | }, 36 | "id": 4, 37 | "isNew": true, 38 | "legend": { 39 | "avg": false, 40 | "current": false, 41 | "max": false, 42 | "min": false, 43 | "show": true, 44 | "total": false, 45 | "values": false 46 | }, 47 | "lines": true, 48 | "linewidth": 2, 49 | "links": [], 50 | "nullPointMode": "connected", 51 | "percentage": false, 52 | "pointradius": 5, 53 | "points": false, 54 | "renderer": "flot", 55 | "seriesOverrides": [], 56 | "span": 12, 57 | "stack": false, 58 | "steppedLine": false, 59 | "targets": [ 60 | { 61 | "aggregator": "sum", 62 | "currentTagKey": "", 63 | "currentTagValue": "", 64 | "disableDownsampling": true, 65 | "downsampleAggregator": "avg", 66 | "errors": {}, 67 | "metric": "logincount", 68 | "refId": "B", 69 | "tags": { 70 | "datacenter": "aws-location1", 71 | "host": "*", 72 | "pool": "login" 73 | } 74 | } 75 | ], 76 | "timeFrom": null, 77 | "timeShift": null, 78 | "title": "HOST=*", 79 | "tooltip": { 80 | "shared": true, 81 | "value_type": "cumulative" 82 | }, 83 | "type": "graph", 84 | "x-axis": true, 85 | "y-axis": true, 86 | "y_formats": [ 87 | "short", 88 | "short" 89 | ] 90 | } 91 | ], 92 | "title": "New row" 93 | }, 94 | { 95 | "collapse": false, 96 | "editable": true, 97 | "height": "250px", 98 | "panels": [ 99 | { 100 | "aliasdatacenterrs": {}, 101 | "bars": false, 102 | "datasource": "beringeil_local", 103 | "editable": true, 104 | "error": false, 105 | "fill": 1, 106 | "grid": { 107 | "leftLogBase": 1, 108 | "leftMax": null, 109 | "leftMin": null, 110 | "rightLogBase": 1, 111 | "rightMax": null, 112 | "rightMin": null, 113 | "threshold1": null, 114 | "threshold1datacenterr": "rgba(216, 200, 27, 0.27)", 115 | "threshold2": null, 116 | "threshold2datacenterr": "rgba(234, 112, 112, 0.22)" 117 | }, 118 | "id": 5, 119 | "isNew": true, 120 | "legend": { 121 | "avg": false, 122 | "current": false, 123 | "max": false, 124 | "min": false, 125 | "show": true, 126 | "total": false, 127 | "values": false 128 | }, 129 | "lines": true, 130 | "linewidth": 2, 131 | "links": [], 132 | "nullPointMode": "connected", 133 | "percentage": false, 134 | "pointradius": 5, 135 | "points": false, 136 | "renderer": "flot", 137 | "seriesOverrides": [], 138 | "span": 12, 139 | "stack": false, 140 | "steppedLine": false, 141 | "targets": [ 142 | { 143 | "aggregator": "avg", 144 | "currentTagKey": "", 145 | "currentTagValue": "", 146 | "disableDownsampling": true, 147 | "downsampleAggregator": "avg", 148 | "errors": {}, 149 | "metric": "logincount", 150 | "refId": "B", 151 | "tags": { 152 | "datacenter": "aws-location1", 153 | "pool": "login" 154 | } 155 | } 156 | ], 157 | "timeFrom": null, 158 | "timeShift": null, 159 | "title": "AVG", 160 | "tooltip": { 161 | "shared": true, 162 | "value_type": "cumulative" 163 | }, 164 | "type": "graph", 165 | "x-axis": true, 166 | "y-axis": true, 167 | "y_formats": [ 168 | "short", 169 | "short" 170 | ] 171 | } 172 | ], 173 | "title": "New row" 174 | }, 175 | { 176 | "collapse": false, 177 | "editable": true, 178 | "height": "250px", 179 | "panels": [ 180 | { 181 | "aliasdatacenterrs": {}, 182 | "bars": false, 183 | "datasource": "beringeil_local", 184 | "editable": true, 185 | "error": false, 186 | "fill": 1, 187 | "grid": { 188 | "leftLogBase": 1, 189 | "leftMax": null, 190 | "leftMin": null, 191 | "rightLogBase": 1, 192 | "rightMax": null, 193 | "rightMin": null, 194 | "threshold1": null, 195 | "threshold1datacenterr": "rgba(216, 200, 27, 0.27)", 196 | "threshold2": null, 197 | "threshold2datacenterr": "rgba(234, 112, 112, 0.22)" 198 | }, 199 | "id": 6, 200 | "isNew": true, 201 | "legend": { 202 | "avg": false, 203 | "current": false, 204 | "max": false, 205 | "min": false, 206 | "show": true, 207 | "total": false, 208 | "values": false 209 | }, 210 | "lines": true, 211 | "linewidth": 2, 212 | "links": [], 213 | "nullPointMode": "connected", 214 | "percentage": false, 215 | "pointradius": 5, 216 | "points": false, 217 | "renderer": "flot", 218 | "seriesOverrides": [], 219 | "span": 12, 220 | "stack": false, 221 | "steppedLine": false, 222 | "targets": [ 223 | { 224 | "aggregator": "sum", 225 | "currentTagKey": "", 226 | "currentTagValue": "", 227 | "disableDownsampling": true, 228 | "downsampleAggregator": "avg", 229 | "errors": {}, 230 | "metric": "logincount", 231 | "refId": "B", 232 | "tags": { 233 | "datacenter": "aws-location1", 234 | "pool": "login" 235 | } 236 | } 237 | ], 238 | "timeFrom": null, 239 | "timeShift": null, 240 | "title": "SUM", 241 | "tooltip": { 242 | "shared": true, 243 | "value_type": "cumulative" 244 | }, 245 | "type": "graph", 246 | "x-axis": true, 247 | "y-axis": true, 248 | "y_formats": [ 249 | "short", 250 | "short" 251 | ] 252 | } 253 | ], 254 | "title": "New row" 255 | } 256 | ], 257 | "time": { 258 | "from": "now-5m", 259 | "to": "now" 260 | }, 261 | "timepicker": { 262 | "now": true, 263 | "refresh_intervals": [ 264 | "5s", 265 | "10s", 266 | "30s", 267 | "1m", 268 | "5m", 269 | "15m", 270 | "30m", 271 | "1h", 272 | "2h", 273 | "1d" 274 | ], 275 | "time_options": [ 276 | "5m", 277 | "15m", 278 | "1h", 279 | "6h", 280 | "12h", 281 | "24h", 282 | "2d", 283 | "7d", 284 | "30d" 285 | ] 286 | }, 287 | "templating": { 288 | "list": [] 289 | }, 290 | "annotations": { 291 | "list": [] 292 | }, 293 | "refresh": "5s", 294 | "schemaVersion": 8, 295 | "version": 6, 296 | "links": [] 297 | } -------------------------------------------------------------------------------- /TSPark/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=INFO,CONSOLE,R 2 | log4j.appender.R=org.apache.log4j.RollingFileAppender 3 | log4j.appender.R.File=app.log 4 | log4j.appender.R.MaxFileSize=50MB 5 | log4j.appender.R.MaxBackupIndex=7 6 | log4j.appender.R.layout=org.apache.log4j.PatternLayout 7 | log4j.appender.R.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %5p %t %c - %m%n 8 | 9 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender 10 | log4j.appender.CONSOLE.Target=System.out 11 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout 12 | log4j.appender.CONSOLE.layout.conversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %5p %t %c - %m%n 13 | -------------------------------------------------------------------------------- /TSPark/src/main/resources/tspark-demo.properties: -------------------------------------------------------------------------------- 1 | #if property "listen" is missing, then instance will act as client. 2 | #This helps to have seperate atomix cluster for tags store. 3 | #TODO add support for tls 4 | #cache - ttl for metric and meta(tags, metricshash, tagshashkey, shardid, usersessions) 5 | atomix.cache-tags.ttl-ms=604800000 6 | atomix.cache-tags.enable-tls=false 7 | atomix.cache-tags.listen=0.0.0.0:55000 8 | atomix.cache-tags.cluster=0.0.0.0:55000 9 | 10 | #listen port for apis(grafana/bulkupload/ws-metric-sender) 11 | #TODO add support for tls 12 | api.enabled=true 13 | api.enable-tls=false 14 | api.listen=0.0.0.0:58080 15 | api.threads-cnt=20 16 | 17 | ################################## 18 | ## Beringei client properties 19 | ################################## 20 | #clients-by-shards(currently supported async count is 1) 21 | #TODO 22 | # 1. add support for tls 23 | # 2. make atomix as ts store 24 | clients.connect-timeout-ms=100 25 | clients.read-timeout-ms=40000 26 | clients.write-timeout-ms=40000 27 | clients.active-idle-connections-per-shards=1 28 | clients.timeout-to-close-idle-connections-in-seconds=40 29 | clients.connection-count-per-shards=10 30 | clients.count-of-async-operations-allowed-per-connection=1 31 | #supported shards total endpoints/shards 32 | # TODO. config update using admin ui 33 | clients.cluster-shards-count=100 34 | # TODO. add support for following 35 | clients.cluster-even-endpoint-as-backup-store=false 36 | clients.datacenters=dcntr1 37 | clients.dcntr1.cluster-endpoints=10.180.17.77:9999,10.180.16.207:9999 38 | #metrics flush to servers(we flush metrics to server if one of the following becomes true) 39 | clients.flush.when-metric-count-in-queue=1000 40 | clients.flush.interval=10000 41 | #threads 42 | clients.read-threads=20 43 | clients.write-threads=10 44 | #TODO retry/retry queue 45 | 46 | #shards 47 | #TODO add support for the following properties 48 | shards.multi-datacenter-write=false 49 | shards.write-on-all-hosts-in-same-datacenter=false -------------------------------------------------------------------------------- /beringeiclient/.classpath: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /beringeiclient/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | -------------------------------------------------------------------------------- /beringeiclient/.project: -------------------------------------------------------------------------------- 1 | 2 | 3 | beringeiclient 4 | 5 | 6 | 7 | 8 | 9 | org.eclipse.jdt.core.javabuilder 10 | 11 | 12 | 13 | 14 | org.eclipse.m2e.core.maven2Builder 15 | 16 | 17 | 18 | 19 | 20 | org.eclipse.jdt.core.javanature 21 | org.eclipse.m2e.core.maven2Nature 22 | 23 | 24 | -------------------------------------------------------------------------------- /beringeiclient/.settings/org.eclipse.jdt.core.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 3 | org.eclipse.jdt.core.compiler.compliance=1.8 4 | org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning 5 | org.eclipse.jdt.core.compiler.source=1.8 6 | -------------------------------------------------------------------------------- /beringeiclient/.settings/org.eclipse.m2e.core.prefs: -------------------------------------------------------------------------------- 1 | activeProfiles= 2 | eclipse.preferences.version=1 3 | resolveWorkspaceProjects=true 4 | version=1 5 | -------------------------------------------------------------------------------- /beringeiclient/config/beringeiclient-demo.properties: -------------------------------------------------------------------------------- 1 | #TODO define better names for properties 2 | 3 | #clients-by-shards(currently supported async count is 1) 4 | #TODO 5 | # 1. add support for tls 6 | # 2. make atomix as ts store 7 | clients.connect-timeout-ms=100 8 | clients.read-timeout-ms=40000 9 | clients.write-timeout-ms=40000 10 | clients.active-idle-connections-per-shards=1 11 | clients.timeout-to-close-idle-connections-in-seconds=40 12 | clients.connection-count-per-shards=10 13 | clients.count-of-async-operations-allowed-per-connection=1 14 | #supported shards total endpoints/shards 15 | # TODO. config update using admin ui 16 | clients.cluster-shards-count=100 17 | # TODO. add support for following 18 | clients.cluster-even-endpoint-as-backup-store=false 19 | clients.datacenters=dcntr1 20 | clients.dcntr1.cluster-endpoints=10.180.17.77:9999,10.180.16.207:9999 21 | #metrics flush to servers(we flush metrics to server if one of the following becomes true) 22 | clients.flush.when-metric-count-in-queue=1000 23 | clients.flush.interval=10000 24 | #threads 25 | clients.read-threads=20 26 | clients.write-threads=10 27 | #TODO retry/retry queue 28 | 29 | #shards 30 | #TODO add support for the following properties 31 | shards.multi-datacenter-write=false 32 | shards.write-on-all-hosts-in-same-datacenter=false 33 | -------------------------------------------------------------------------------- /beringeiclient/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | 6 | com.wowcoders.tspark 7 | TSPark-ALL 8 | 0.0.1-SNAPSHOT 9 | 10 | 11 | beringeiclient 12 | 13 | 14 | 15 | 16 | org.apache.maven.plugins 17 | maven-compiler-plugin 18 | 3.6.1 19 | 20 | 1.8 21 | 1.8 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | org.apache.thrift 30 | libthrift 31 | 0.10.0 32 | 33 | 34 | 35 | 36 | commons-cli 37 | commons-cli 38 | 1.2 39 | 40 | 41 | 42 | junit 43 | junit 44 | 4.12 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /beringeiclient/scripts/run-client-test.sh: -------------------------------------------------------------------------------- 1 | java -cp ../target/beringeiclient-0.0.1-SNAPSHOT-jar-with-dependencies.jar org.wowcoders.beringeiclient.BeringeiTestClient -c ../config/beringeiclient-demo.properties 2 | -------------------------------------------------------------------------------- /beringeiclient/src/main/java/com/facebook/beringei/thriftclient/Compression.java: -------------------------------------------------------------------------------- 1 | package com.facebook.beringei.thriftclient; 2 | 3 | /** 4 | * Autogenerated by Thrift Compiler (0.9.3) 5 | * 6 | * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 7 | * @generated 8 | */ 9 | 10 | public enum Compression implements org.apache.thrift.TEnum { 11 | NONE(0), 12 | ZLIB(1); 13 | 14 | private final int value; 15 | 16 | private Compression(int value) { 17 | this.value = value; 18 | } 19 | 20 | /** 21 | * Get the integer value of this enum value, as defined in the Thrift IDL. 22 | */ 23 | public int getValue() { 24 | return value; 25 | } 26 | 27 | /** 28 | * Find a the enum type by its integer value, as defined in the Thrift IDL. 29 | * @return null if the value is not found. 30 | */ 31 | public static Compression findByValue(int value) { 32 | switch (value) { 33 | case 0: 34 | return NONE; 35 | case 1: 36 | return ZLIB; 37 | default: 38 | return null; 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /beringeiclient/src/main/java/com/facebook/beringei/thriftclient/GetDataResult.java: -------------------------------------------------------------------------------- 1 | package com.facebook.beringei.thriftclient; 2 | 3 | /** 4 | * Autogenerated by Thrift Compiler (0.9.3) 5 | * 6 | * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 7 | * @generated 8 | */ 9 | import org.apache.thrift.scheme.IScheme; 10 | import org.apache.thrift.scheme.SchemeFactory; 11 | import org.apache.thrift.scheme.StandardScheme; 12 | 13 | import org.apache.thrift.scheme.TupleScheme; 14 | import org.apache.thrift.protocol.TTupleProtocol; 15 | 16 | import java.util.List; 17 | import java.util.ArrayList; 18 | import java.util.Map; 19 | import java.util.HashMap; 20 | import java.util.EnumMap; 21 | import java.util.EnumSet; 22 | import java.util.Collections; 23 | import java.util.BitSet; 24 | import javax.annotation.Generated; 25 | 26 | @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) 27 | @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-04-22") 28 | public class GetDataResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { 29 | private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetDataResult"); 30 | 31 | private static final org.apache.thrift.protocol.TField RESULTS_FIELD_DESC = new org.apache.thrift.protocol.TField("results", org.apache.thrift.protocol.TType.LIST, (short)1); 32 | 33 | private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); 34 | static { 35 | schemes.put(StandardScheme.class, new GetDataResultStandardSchemeFactory()); 36 | schemes.put(TupleScheme.class, new GetDataResultTupleSchemeFactory()); 37 | } 38 | 39 | public List results; // required 40 | 41 | /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ 42 | public enum _Fields implements org.apache.thrift.TFieldIdEnum { 43 | RESULTS((short)1, "results"); 44 | 45 | private static final Map byName = new HashMap(); 46 | 47 | static { 48 | for (_Fields field : EnumSet.allOf(_Fields.class)) { 49 | byName.put(field.getFieldName(), field); 50 | } 51 | } 52 | 53 | /** 54 | * Find the _Fields constant that matches fieldId, or null if its not found. 55 | */ 56 | public static _Fields findByThriftId(int fieldId) { 57 | switch(fieldId) { 58 | case 1: // RESULTS 59 | return RESULTS; 60 | default: 61 | return null; 62 | } 63 | } 64 | 65 | /** 66 | * Find the _Fields constant that matches fieldId, throwing an exception 67 | * if it is not found. 68 | */ 69 | public static _Fields findByThriftIdOrThrow(int fieldId) { 70 | _Fields fields = findByThriftId(fieldId); 71 | if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); 72 | return fields; 73 | } 74 | 75 | /** 76 | * Find the _Fields constant that matches name, or null if its not found. 77 | */ 78 | public static _Fields findByName(String name) { 79 | return byName.get(name); 80 | } 81 | 82 | private final short _thriftId; 83 | private final String _fieldName; 84 | 85 | _Fields(short thriftId, String fieldName) { 86 | _thriftId = thriftId; 87 | _fieldName = fieldName; 88 | } 89 | 90 | public short getThriftFieldId() { 91 | return _thriftId; 92 | } 93 | 94 | public String getFieldName() { 95 | return _fieldName; 96 | } 97 | } 98 | 99 | // isset id assignments 100 | public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; 101 | static { 102 | Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); 103 | tmpMap.put(_Fields.RESULTS, new org.apache.thrift.meta_data.FieldMetaData("results", org.apache.thrift.TFieldRequirementType.DEFAULT, 104 | new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 105 | new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TimeSeriesData.class)))); 106 | metaDataMap = Collections.unmodifiableMap(tmpMap); 107 | org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetDataResult.class, metaDataMap); 108 | } 109 | 110 | public GetDataResult() { 111 | } 112 | 113 | public GetDataResult( 114 | List results) 115 | { 116 | this(); 117 | this.results = results; 118 | } 119 | 120 | /** 121 | * Performs a deep copy on other. 122 | */ 123 | public GetDataResult(GetDataResult other) { 124 | if (other.isSetResults()) { 125 | List __this__results = new ArrayList(other.results.size()); 126 | for (TimeSeriesData other_element : other.results) { 127 | __this__results.add(new TimeSeriesData(other_element)); 128 | } 129 | this.results = __this__results; 130 | } 131 | } 132 | 133 | public GetDataResult deepCopy() { 134 | return new GetDataResult(this); 135 | } 136 | 137 | @Override 138 | public void clear() { 139 | this.results = null; 140 | } 141 | 142 | public int getResultsSize() { 143 | return (this.results == null) ? 0 : this.results.size(); 144 | } 145 | 146 | public java.util.Iterator getResultsIterator() { 147 | return (this.results == null) ? null : this.results.iterator(); 148 | } 149 | 150 | public void addToResults(TimeSeriesData elem) { 151 | if (this.results == null) { 152 | this.results = new ArrayList(); 153 | } 154 | this.results.add(elem); 155 | } 156 | 157 | public List getResults() { 158 | return this.results; 159 | } 160 | 161 | public GetDataResult setResults(List results) { 162 | this.results = results; 163 | return this; 164 | } 165 | 166 | public void unsetResults() { 167 | this.results = null; 168 | } 169 | 170 | /** Returns true if field results is set (has been assigned a value) and false otherwise */ 171 | public boolean isSetResults() { 172 | return this.results != null; 173 | } 174 | 175 | public void setResultsIsSet(boolean value) { 176 | if (!value) { 177 | this.results = null; 178 | } 179 | } 180 | 181 | public void setFieldValue(_Fields field, Object value) { 182 | switch (field) { 183 | case RESULTS: 184 | if (value == null) { 185 | unsetResults(); 186 | } else { 187 | setResults((List)value); 188 | } 189 | break; 190 | 191 | } 192 | } 193 | 194 | public Object getFieldValue(_Fields field) { 195 | switch (field) { 196 | case RESULTS: 197 | return getResults(); 198 | 199 | } 200 | throw new IllegalStateException(); 201 | } 202 | 203 | /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ 204 | public boolean isSet(_Fields field) { 205 | if (field == null) { 206 | throw new IllegalArgumentException(); 207 | } 208 | 209 | switch (field) { 210 | case RESULTS: 211 | return isSetResults(); 212 | } 213 | throw new IllegalStateException(); 214 | } 215 | 216 | @Override 217 | public boolean equals(Object that) { 218 | if (that == null) 219 | return false; 220 | if (that instanceof GetDataResult) 221 | return this.equals((GetDataResult)that); 222 | return false; 223 | } 224 | 225 | public boolean equals(GetDataResult that) { 226 | if (that == null) 227 | return false; 228 | 229 | boolean this_present_results = true && this.isSetResults(); 230 | boolean that_present_results = true && that.isSetResults(); 231 | if (this_present_results || that_present_results) { 232 | if (!(this_present_results && that_present_results)) 233 | return false; 234 | if (!this.results.equals(that.results)) 235 | return false; 236 | } 237 | 238 | return true; 239 | } 240 | 241 | @Override 242 | public int hashCode() { 243 | List list = new ArrayList(); 244 | 245 | boolean present_results = true && (isSetResults()); 246 | list.add(present_results); 247 | if (present_results) 248 | list.add(results); 249 | 250 | return list.hashCode(); 251 | } 252 | 253 | @Override 254 | public int compareTo(GetDataResult other) { 255 | if (!getClass().equals(other.getClass())) { 256 | return getClass().getName().compareTo(other.getClass().getName()); 257 | } 258 | 259 | int lastComparison = 0; 260 | 261 | lastComparison = Boolean.valueOf(isSetResults()).compareTo(other.isSetResults()); 262 | if (lastComparison != 0) { 263 | return lastComparison; 264 | } 265 | if (isSetResults()) { 266 | lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.results, other.results); 267 | if (lastComparison != 0) { 268 | return lastComparison; 269 | } 270 | } 271 | return 0; 272 | } 273 | 274 | public _Fields fieldForId(int fieldId) { 275 | return _Fields.findByThriftId(fieldId); 276 | } 277 | 278 | public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { 279 | schemes.get(iprot.getScheme()).getScheme().read(iprot, this); 280 | } 281 | 282 | public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { 283 | schemes.get(oprot.getScheme()).getScheme().write(oprot, this); 284 | } 285 | 286 | @Override 287 | public String toString() { 288 | StringBuilder sb = new StringBuilder("GetDataResult("); 289 | 290 | sb.append("results:"); 291 | if (this.results == null) { 292 | sb.append("null"); 293 | } else { 294 | sb.append(this.results); 295 | } 296 | 297 | sb.append(")"); 298 | return sb.toString(); 299 | } 300 | 301 | public void validate() throws org.apache.thrift.TException { 302 | // check for required fields 303 | // check for sub-struct validity 304 | } 305 | 306 | private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { 307 | try { 308 | write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); 309 | } catch (org.apache.thrift.TException te) { 310 | throw new java.io.IOException(te); 311 | } 312 | } 313 | 314 | private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { 315 | try { 316 | read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); 317 | } catch (org.apache.thrift.TException te) { 318 | throw new java.io.IOException(te); 319 | } 320 | } 321 | 322 | private static class GetDataResultStandardSchemeFactory implements SchemeFactory { 323 | public GetDataResultStandardScheme getScheme() { 324 | return new GetDataResultStandardScheme(); 325 | } 326 | } 327 | 328 | private static class GetDataResultStandardScheme extends StandardScheme { 329 | 330 | public void read(org.apache.thrift.protocol.TProtocol iprot, GetDataResult struct) throws org.apache.thrift.TException { 331 | org.apache.thrift.protocol.TField schemeField; 332 | iprot.readStructBegin(); 333 | while (true) 334 | { 335 | schemeField = iprot.readFieldBegin(); 336 | if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 337 | break; 338 | } 339 | switch (schemeField.id) { 340 | case 1: // RESULTS 341 | if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { 342 | { 343 | org.apache.thrift.protocol.TList _list16 = iprot.readListBegin(); 344 | struct.results = new ArrayList(_list16.size); 345 | TimeSeriesData _elem17; 346 | for (int _i18 = 0; _i18 < _list16.size; ++_i18) 347 | { 348 | _elem17 = new TimeSeriesData(); 349 | _elem17.read(iprot); 350 | struct.results.add(_elem17); 351 | } 352 | iprot.readListEnd(); 353 | } 354 | struct.setResultsIsSet(true); 355 | } else { 356 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 357 | } 358 | break; 359 | default: 360 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 361 | } 362 | iprot.readFieldEnd(); 363 | } 364 | iprot.readStructEnd(); 365 | 366 | // check for required fields of primitive type, which can't be checked in the validate method 367 | struct.validate(); 368 | } 369 | 370 | public void write(org.apache.thrift.protocol.TProtocol oprot, GetDataResult struct) throws org.apache.thrift.TException { 371 | struct.validate(); 372 | 373 | oprot.writeStructBegin(STRUCT_DESC); 374 | if (struct.results != null) { 375 | oprot.writeFieldBegin(RESULTS_FIELD_DESC); 376 | { 377 | oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.results.size())); 378 | for (TimeSeriesData _iter19 : struct.results) 379 | { 380 | _iter19.write(oprot); 381 | } 382 | oprot.writeListEnd(); 383 | } 384 | oprot.writeFieldEnd(); 385 | } 386 | oprot.writeFieldStop(); 387 | oprot.writeStructEnd(); 388 | } 389 | 390 | } 391 | 392 | private static class GetDataResultTupleSchemeFactory implements SchemeFactory { 393 | public GetDataResultTupleScheme getScheme() { 394 | return new GetDataResultTupleScheme(); 395 | } 396 | } 397 | 398 | private static class GetDataResultTupleScheme extends TupleScheme { 399 | 400 | @Override 401 | public void write(org.apache.thrift.protocol.TProtocol prot, GetDataResult struct) throws org.apache.thrift.TException { 402 | TTupleProtocol oprot = (TTupleProtocol) prot; 403 | BitSet optionals = new BitSet(); 404 | if (struct.isSetResults()) { 405 | optionals.set(0); 406 | } 407 | oprot.writeBitSet(optionals, 1); 408 | if (struct.isSetResults()) { 409 | { 410 | oprot.writeI32(struct.results.size()); 411 | for (TimeSeriesData _iter20 : struct.results) 412 | { 413 | _iter20.write(oprot); 414 | } 415 | } 416 | } 417 | } 418 | 419 | @Override 420 | public void read(org.apache.thrift.protocol.TProtocol prot, GetDataResult struct) throws org.apache.thrift.TException { 421 | TTupleProtocol iprot = (TTupleProtocol) prot; 422 | BitSet incoming = iprot.readBitSet(1); 423 | if (incoming.get(0)) { 424 | { 425 | org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); 426 | struct.results = new ArrayList(_list21.size); 427 | TimeSeriesData _elem22; 428 | for (int _i23 = 0; _i23 < _list21.size; ++_i23) 429 | { 430 | _elem22 = new TimeSeriesData(); 431 | _elem22.read(iprot); 432 | struct.results.add(_elem22); 433 | } 434 | } 435 | struct.setResultsIsSet(true); 436 | } 437 | } 438 | } 439 | 440 | } 441 | 442 | -------------------------------------------------------------------------------- /beringeiclient/src/main/java/com/facebook/beringei/thriftclient/Key.java: -------------------------------------------------------------------------------- 1 | package com.facebook.beringei.thriftclient; 2 | 3 | /** 4 | 5 | * Autogenerated by Thrift Compiler (0.9.3) 6 | * 7 | * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 8 | * @generated 9 | */ 10 | import org.apache.thrift.scheme.IScheme; 11 | import org.apache.thrift.scheme.SchemeFactory; 12 | import org.apache.thrift.scheme.StandardScheme; 13 | 14 | import org.apache.thrift.scheme.TupleScheme; 15 | import org.apache.thrift.protocol.TTupleProtocol; 16 | 17 | import org.apache.thrift.EncodingUtils; 18 | 19 | import java.util.List; 20 | import java.util.ArrayList; 21 | import java.util.Map; 22 | import java.util.HashMap; 23 | import java.util.EnumMap; 24 | 25 | import java.util.EnumSet; 26 | import java.util.Collections; 27 | import java.util.BitSet; 28 | 29 | import javax.annotation.Generated; 30 | 31 | 32 | @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) 33 | /** 34 | * Copyright (c) 2016-present, Facebook, Inc. 35 | * All rights reserved. 36 | * 37 | * This source code is licensed under the BSD-style license found in the 38 | * LICENSE file in the root directory of this source tree. An additional grant 39 | * of patent rights can be found in the PATENTS file in the same directory. 40 | */ 41 | @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-04-22") 42 | public class Key implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { 43 | private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Key"); 44 | 45 | private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); 46 | private static final org.apache.thrift.protocol.TField SHARD_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("shardId", org.apache.thrift.protocol.TType.I64, (short)2); 47 | 48 | private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); 49 | static { 50 | schemes.put(StandardScheme.class, new KeyStandardSchemeFactory()); 51 | schemes.put(TupleScheme.class, new KeyTupleSchemeFactory()); 52 | } 53 | 54 | public String key; // required 55 | public long shardId; // required 56 | 57 | /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ 58 | public enum _Fields implements org.apache.thrift.TFieldIdEnum { 59 | KEY((short)1, "key"), 60 | SHARD_ID((short)2, "shardId"); 61 | 62 | private static final Map byName = new HashMap(); 63 | 64 | static { 65 | for (_Fields field : EnumSet.allOf(_Fields.class)) { 66 | byName.put(field.getFieldName(), field); 67 | } 68 | } 69 | 70 | /** 71 | * Find the _Fields constant that matches fieldId, or null if its not found. 72 | */ 73 | public static _Fields findByThriftId(int fieldId) { 74 | switch(fieldId) { 75 | case 1: // KEY 76 | return KEY; 77 | case 2: // SHARD_ID 78 | return SHARD_ID; 79 | default: 80 | return null; 81 | } 82 | } 83 | 84 | /** 85 | * Find the _Fields constant that matches fieldId, throwing an exception 86 | * if it is not found. 87 | */ 88 | public static _Fields findByThriftIdOrThrow(int fieldId) { 89 | _Fields fields = findByThriftId(fieldId); 90 | if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); 91 | return fields; 92 | } 93 | 94 | /** 95 | * Find the _Fields constant that matches name, or null if its not found. 96 | */ 97 | public static _Fields findByName(String name) { 98 | return byName.get(name); 99 | } 100 | 101 | private final short _thriftId; 102 | private final String _fieldName; 103 | 104 | _Fields(short thriftId, String fieldName) { 105 | _thriftId = thriftId; 106 | _fieldName = fieldName; 107 | } 108 | 109 | public short getThriftFieldId() { 110 | return _thriftId; 111 | } 112 | 113 | public String getFieldName() { 114 | return _fieldName; 115 | } 116 | } 117 | 118 | // isset id assignments 119 | private static final int __SHARDID_ISSET_ID = 0; 120 | private byte __isset_bitfield = 0; 121 | public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; 122 | static { 123 | Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); 124 | tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, 125 | new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); 126 | tmpMap.put(_Fields.SHARD_ID, new org.apache.thrift.meta_data.FieldMetaData("shardId", org.apache.thrift.TFieldRequirementType.DEFAULT, 127 | new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); 128 | metaDataMap = Collections.unmodifiableMap(tmpMap); 129 | org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Key.class, metaDataMap); 130 | } 131 | 132 | public Key() { 133 | } 134 | 135 | public Key( 136 | String key, 137 | long shardId) 138 | { 139 | this(); 140 | this.key = key; 141 | this.shardId = shardId; 142 | setShardIdIsSet(true); 143 | } 144 | 145 | /** 146 | * Performs a deep copy on other. 147 | */ 148 | public Key(Key other) { 149 | __isset_bitfield = other.__isset_bitfield; 150 | if (other.isSetKey()) { 151 | this.key = other.key; 152 | } 153 | this.shardId = other.shardId; 154 | } 155 | 156 | public Key deepCopy() { 157 | return new Key(this); 158 | } 159 | 160 | @Override 161 | public void clear() { 162 | this.key = null; 163 | setShardIdIsSet(false); 164 | this.shardId = 0; 165 | } 166 | 167 | public String getKey() { 168 | return this.key; 169 | } 170 | 171 | public Key setKey(String key) { 172 | this.key = key; 173 | return this; 174 | } 175 | 176 | public void unsetKey() { 177 | this.key = null; 178 | } 179 | 180 | /** Returns true if field key is set (has been assigned a value) and false otherwise */ 181 | public boolean isSetKey() { 182 | return this.key != null; 183 | } 184 | 185 | public void setKeyIsSet(boolean value) { 186 | if (!value) { 187 | this.key = null; 188 | } 189 | } 190 | 191 | public long getShardId() { 192 | return this.shardId; 193 | } 194 | 195 | public Key setShardId(long shardId) { 196 | this.shardId = shardId; 197 | setShardIdIsSet(true); 198 | return this; 199 | } 200 | 201 | public void unsetShardId() { 202 | __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SHARDID_ISSET_ID); 203 | } 204 | 205 | /** Returns true if field shardId is set (has been assigned a value) and false otherwise */ 206 | public boolean isSetShardId() { 207 | return EncodingUtils.testBit(__isset_bitfield, __SHARDID_ISSET_ID); 208 | } 209 | 210 | public void setShardIdIsSet(boolean value) { 211 | __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SHARDID_ISSET_ID, value); 212 | } 213 | 214 | public void setFieldValue(_Fields field, Object value) { 215 | switch (field) { 216 | case KEY: 217 | if (value == null) { 218 | unsetKey(); 219 | } else { 220 | setKey((String)value); 221 | } 222 | break; 223 | 224 | case SHARD_ID: 225 | if (value == null) { 226 | unsetShardId(); 227 | } else { 228 | setShardId((Long)value); 229 | } 230 | break; 231 | 232 | } 233 | } 234 | 235 | public Object getFieldValue(_Fields field) { 236 | switch (field) { 237 | case KEY: 238 | return getKey(); 239 | 240 | case SHARD_ID: 241 | return getShardId(); 242 | 243 | } 244 | throw new IllegalStateException(); 245 | } 246 | 247 | /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ 248 | public boolean isSet(_Fields field) { 249 | if (field == null) { 250 | throw new IllegalArgumentException(); 251 | } 252 | 253 | switch (field) { 254 | case KEY: 255 | return isSetKey(); 256 | case SHARD_ID: 257 | return isSetShardId(); 258 | } 259 | throw new IllegalStateException(); 260 | } 261 | 262 | @Override 263 | public boolean equals(Object that) { 264 | if (that == null) 265 | return false; 266 | if (that instanceof Key) 267 | return this.equals((Key)that); 268 | return false; 269 | } 270 | 271 | public boolean equals(Key that) { 272 | if (that == null) 273 | return false; 274 | 275 | boolean this_present_key = true && this.isSetKey(); 276 | boolean that_present_key = true && that.isSetKey(); 277 | if (this_present_key || that_present_key) { 278 | if (!(this_present_key && that_present_key)) 279 | return false; 280 | if (!this.key.equals(that.key)) 281 | return false; 282 | } 283 | 284 | boolean this_present_shardId = true; 285 | boolean that_present_shardId = true; 286 | if (this_present_shardId || that_present_shardId) { 287 | if (!(this_present_shardId && that_present_shardId)) 288 | return false; 289 | if (this.shardId != that.shardId) 290 | return false; 291 | } 292 | 293 | return true; 294 | } 295 | 296 | @Override 297 | public int hashCode() { 298 | List list = new ArrayList(); 299 | 300 | boolean present_key = true && (isSetKey()); 301 | list.add(present_key); 302 | if (present_key) 303 | list.add(key); 304 | 305 | boolean present_shardId = true; 306 | list.add(present_shardId); 307 | if (present_shardId) 308 | list.add(shardId); 309 | 310 | return list.hashCode(); 311 | } 312 | 313 | @Override 314 | public int compareTo(Key other) { 315 | if (!getClass().equals(other.getClass())) { 316 | return getClass().getName().compareTo(other.getClass().getName()); 317 | } 318 | 319 | int lastComparison = 0; 320 | 321 | lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); 322 | if (lastComparison != 0) { 323 | return lastComparison; 324 | } 325 | if (isSetKey()) { 326 | lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); 327 | if (lastComparison != 0) { 328 | return lastComparison; 329 | } 330 | } 331 | lastComparison = Boolean.valueOf(isSetShardId()).compareTo(other.isSetShardId()); 332 | if (lastComparison != 0) { 333 | return lastComparison; 334 | } 335 | if (isSetShardId()) { 336 | lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.shardId, other.shardId); 337 | if (lastComparison != 0) { 338 | return lastComparison; 339 | } 340 | } 341 | return 0; 342 | } 343 | 344 | public _Fields fieldForId(int fieldId) { 345 | return _Fields.findByThriftId(fieldId); 346 | } 347 | 348 | public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { 349 | schemes.get(iprot.getScheme()).getScheme().read(iprot, this); 350 | } 351 | 352 | public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { 353 | schemes.get(oprot.getScheme()).getScheme().write(oprot, this); 354 | } 355 | 356 | @Override 357 | public String toString() { 358 | StringBuilder sb = new StringBuilder("Key("); 359 | boolean first = true; 360 | 361 | sb.append("key:"); 362 | if (this.key == null) { 363 | sb.append("null"); 364 | } else { 365 | sb.append(this.key); 366 | } 367 | first = false; 368 | if (!first) sb.append(", "); 369 | sb.append("shardId:"); 370 | sb.append(this.shardId); 371 | first = false; 372 | sb.append(")"); 373 | return sb.toString(); 374 | } 375 | 376 | public void validate() throws org.apache.thrift.TException { 377 | // check for required fields 378 | // check for sub-struct validity 379 | } 380 | 381 | private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { 382 | try { 383 | write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); 384 | } catch (org.apache.thrift.TException te) { 385 | throw new java.io.IOException(te); 386 | } 387 | } 388 | 389 | private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { 390 | try { 391 | // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. 392 | __isset_bitfield = 0; 393 | read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); 394 | } catch (org.apache.thrift.TException te) { 395 | throw new java.io.IOException(te); 396 | } 397 | } 398 | 399 | private static class KeyStandardSchemeFactory implements SchemeFactory { 400 | public KeyStandardScheme getScheme() { 401 | return new KeyStandardScheme(); 402 | } 403 | } 404 | 405 | private static class KeyStandardScheme extends StandardScheme { 406 | 407 | public void read(org.apache.thrift.protocol.TProtocol iprot, Key struct) throws org.apache.thrift.TException { 408 | org.apache.thrift.protocol.TField schemeField; 409 | iprot.readStructBegin(); 410 | while (true) 411 | { 412 | schemeField = iprot.readFieldBegin(); 413 | if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 414 | break; 415 | } 416 | switch (schemeField.id) { 417 | case 1: // KEY 418 | if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { 419 | struct.key = iprot.readString(); 420 | struct.setKeyIsSet(true); 421 | } else { 422 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 423 | } 424 | break; 425 | case 2: // SHARD_ID 426 | if (schemeField.type == org.apache.thrift.protocol.TType.I64) { 427 | struct.shardId = iprot.readI64(); 428 | struct.setShardIdIsSet(true); 429 | } else { 430 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 431 | } 432 | break; 433 | default: 434 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 435 | } 436 | iprot.readFieldEnd(); 437 | } 438 | iprot.readStructEnd(); 439 | 440 | // check for required fields of primitive type, which can't be checked in the validate method 441 | struct.validate(); 442 | } 443 | 444 | public void write(org.apache.thrift.protocol.TProtocol oprot, Key struct) throws org.apache.thrift.TException { 445 | struct.validate(); 446 | 447 | oprot.writeStructBegin(STRUCT_DESC); 448 | if (struct.key != null) { 449 | oprot.writeFieldBegin(KEY_FIELD_DESC); 450 | oprot.writeString(struct.key); 451 | oprot.writeFieldEnd(); 452 | } 453 | oprot.writeFieldBegin(SHARD_ID_FIELD_DESC); 454 | oprot.writeI64(struct.shardId); 455 | oprot.writeFieldEnd(); 456 | oprot.writeFieldStop(); 457 | oprot.writeStructEnd(); 458 | } 459 | 460 | } 461 | 462 | private static class KeyTupleSchemeFactory implements SchemeFactory { 463 | public KeyTupleScheme getScheme() { 464 | return new KeyTupleScheme(); 465 | } 466 | } 467 | 468 | private static class KeyTupleScheme extends TupleScheme { 469 | 470 | @Override 471 | public void write(org.apache.thrift.protocol.TProtocol prot, Key struct) throws org.apache.thrift.TException { 472 | TTupleProtocol oprot = (TTupleProtocol) prot; 473 | BitSet optionals = new BitSet(); 474 | if (struct.isSetKey()) { 475 | optionals.set(0); 476 | } 477 | if (struct.isSetShardId()) { 478 | optionals.set(1); 479 | } 480 | oprot.writeBitSet(optionals, 2); 481 | if (struct.isSetKey()) { 482 | oprot.writeString(struct.key); 483 | } 484 | if (struct.isSetShardId()) { 485 | oprot.writeI64(struct.shardId); 486 | } 487 | } 488 | 489 | @Override 490 | public void read(org.apache.thrift.protocol.TProtocol prot, Key struct) throws org.apache.thrift.TException { 491 | TTupleProtocol iprot = (TTupleProtocol) prot; 492 | BitSet incoming = iprot.readBitSet(2); 493 | if (incoming.get(0)) { 494 | struct.key = iprot.readString(); 495 | struct.setKeyIsSet(true); 496 | } 497 | if (incoming.get(1)) { 498 | struct.shardId = iprot.readI64(); 499 | struct.setShardIdIsSet(true); 500 | } 501 | } 502 | } 503 | 504 | } 505 | 506 | -------------------------------------------------------------------------------- /beringeiclient/src/main/java/com/facebook/beringei/thriftclient/PutDataRequest.java: -------------------------------------------------------------------------------- 1 | package com.facebook.beringei.thriftclient; 2 | 3 | /** 4 | 5 | * Autogenerated by Thrift Compiler (0.9.3) 6 | * 7 | * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 8 | * @generated 9 | */ 10 | import org.apache.thrift.scheme.IScheme; 11 | import org.apache.thrift.scheme.SchemeFactory; 12 | import org.apache.thrift.scheme.StandardScheme; 13 | 14 | import org.apache.thrift.scheme.TupleScheme; 15 | import org.apache.thrift.protocol.TTupleProtocol; 16 | 17 | import java.util.List; 18 | import java.util.ArrayList; 19 | import java.util.Map; 20 | import java.util.HashMap; 21 | import java.util.EnumMap; 22 | import java.util.EnumSet; 23 | import java.util.Collections; 24 | import java.util.BitSet; 25 | 26 | import javax.annotation.Generated; 27 | 28 | 29 | @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) 30 | @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-04-22") 31 | public class PutDataRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { 32 | private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PutDataRequest"); 33 | 34 | private static final org.apache.thrift.protocol.TField DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("data", org.apache.thrift.protocol.TType.LIST, (short)1); 35 | 36 | private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); 37 | static { 38 | schemes.put(StandardScheme.class, new PutDataRequestStandardSchemeFactory()); 39 | schemes.put(TupleScheme.class, new PutDataRequestTupleSchemeFactory()); 40 | } 41 | 42 | public List data; // required 43 | 44 | /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ 45 | public enum _Fields implements org.apache.thrift.TFieldIdEnum { 46 | DATA((short)1, "data"); 47 | 48 | private static final Map byName = new HashMap(); 49 | 50 | static { 51 | for (_Fields field : EnumSet.allOf(_Fields.class)) { 52 | byName.put(field.getFieldName(), field); 53 | } 54 | } 55 | 56 | /** 57 | * Find the _Fields constant that matches fieldId, or null if its not found. 58 | */ 59 | public static _Fields findByThriftId(int fieldId) { 60 | switch(fieldId) { 61 | case 1: // DATA 62 | return DATA; 63 | default: 64 | return null; 65 | } 66 | } 67 | 68 | /** 69 | * Find the _Fields constant that matches fieldId, throwing an exception 70 | * if it is not found. 71 | */ 72 | public static _Fields findByThriftIdOrThrow(int fieldId) { 73 | _Fields fields = findByThriftId(fieldId); 74 | if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); 75 | return fields; 76 | } 77 | 78 | /** 79 | * Find the _Fields constant that matches name, or null if its not found. 80 | */ 81 | public static _Fields findByName(String name) { 82 | return byName.get(name); 83 | } 84 | 85 | private final short _thriftId; 86 | private final String _fieldName; 87 | 88 | _Fields(short thriftId, String fieldName) { 89 | _thriftId = thriftId; 90 | _fieldName = fieldName; 91 | } 92 | 93 | public short getThriftFieldId() { 94 | return _thriftId; 95 | } 96 | 97 | public String getFieldName() { 98 | return _fieldName; 99 | } 100 | } 101 | 102 | // isset id assignments 103 | public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; 104 | static { 105 | Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); 106 | tmpMap.put(_Fields.DATA, new org.apache.thrift.meta_data.FieldMetaData("data", org.apache.thrift.TFieldRequirementType.DEFAULT, 107 | new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 108 | new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DataPoint.class)))); 109 | metaDataMap = Collections.unmodifiableMap(tmpMap); 110 | org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PutDataRequest.class, metaDataMap); 111 | } 112 | 113 | public PutDataRequest() { 114 | } 115 | 116 | public PutDataRequest( 117 | List data) 118 | { 119 | this(); 120 | this.data = data; 121 | } 122 | 123 | /** 124 | * Performs a deep copy on other. 125 | */ 126 | public PutDataRequest(PutDataRequest other) { 127 | if (other.isSetData()) { 128 | List __this__data = new ArrayList(other.data.size()); 129 | for (DataPoint other_element : other.data) { 130 | __this__data.add(new DataPoint(other_element)); 131 | } 132 | this.data = __this__data; 133 | } 134 | } 135 | 136 | public PutDataRequest deepCopy() { 137 | return new PutDataRequest(this); 138 | } 139 | 140 | @Override 141 | public void clear() { 142 | this.data = null; 143 | } 144 | 145 | public int getDataSize() { 146 | return (this.data == null) ? 0 : this.data.size(); 147 | } 148 | 149 | public java.util.Iterator getDataIterator() { 150 | return (this.data == null) ? null : this.data.iterator(); 151 | } 152 | 153 | public void addToData(DataPoint elem) { 154 | if (this.data == null) { 155 | this.data = new ArrayList(); 156 | } 157 | this.data.add(elem); 158 | } 159 | 160 | public List getData() { 161 | return this.data; 162 | } 163 | 164 | public PutDataRequest setData(List data) { 165 | this.data = data; 166 | return this; 167 | } 168 | 169 | public void unsetData() { 170 | this.data = null; 171 | } 172 | 173 | /** Returns true if field data is set (has been assigned a value) and false otherwise */ 174 | public boolean isSetData() { 175 | return this.data != null; 176 | } 177 | 178 | public void setDataIsSet(boolean value) { 179 | if (!value) { 180 | this.data = null; 181 | } 182 | } 183 | 184 | public void setFieldValue(_Fields field, Object value) { 185 | switch (field) { 186 | case DATA: 187 | if (value == null) { 188 | unsetData(); 189 | } else { 190 | setData((List)value); 191 | } 192 | break; 193 | 194 | } 195 | } 196 | 197 | public Object getFieldValue(_Fields field) { 198 | switch (field) { 199 | case DATA: 200 | return getData(); 201 | 202 | } 203 | throw new IllegalStateException(); 204 | } 205 | 206 | /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ 207 | public boolean isSet(_Fields field) { 208 | if (field == null) { 209 | throw new IllegalArgumentException(); 210 | } 211 | 212 | switch (field) { 213 | case DATA: 214 | return isSetData(); 215 | } 216 | throw new IllegalStateException(); 217 | } 218 | 219 | @Override 220 | public boolean equals(Object that) { 221 | if (that == null) 222 | return false; 223 | if (that instanceof PutDataRequest) 224 | return this.equals((PutDataRequest)that); 225 | return false; 226 | } 227 | 228 | public boolean equals(PutDataRequest that) { 229 | if (that == null) 230 | return false; 231 | 232 | boolean this_present_data = true && this.isSetData(); 233 | boolean that_present_data = true && that.isSetData(); 234 | if (this_present_data || that_present_data) { 235 | if (!(this_present_data && that_present_data)) 236 | return false; 237 | if (!this.data.equals(that.data)) 238 | return false; 239 | } 240 | 241 | return true; 242 | } 243 | 244 | @Override 245 | public int hashCode() { 246 | List list = new ArrayList(); 247 | 248 | boolean present_data = true && (isSetData()); 249 | list.add(present_data); 250 | if (present_data) 251 | list.add(data); 252 | 253 | return list.hashCode(); 254 | } 255 | 256 | @Override 257 | public int compareTo(PutDataRequest other) { 258 | if (!getClass().equals(other.getClass())) { 259 | return getClass().getName().compareTo(other.getClass().getName()); 260 | } 261 | 262 | int lastComparison = 0; 263 | 264 | lastComparison = Boolean.valueOf(isSetData()).compareTo(other.isSetData()); 265 | if (lastComparison != 0) { 266 | return lastComparison; 267 | } 268 | if (isSetData()) { 269 | lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.data, other.data); 270 | if (lastComparison != 0) { 271 | return lastComparison; 272 | } 273 | } 274 | return 0; 275 | } 276 | 277 | public _Fields fieldForId(int fieldId) { 278 | return _Fields.findByThriftId(fieldId); 279 | } 280 | 281 | public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { 282 | schemes.get(iprot.getScheme()).getScheme().read(iprot, this); 283 | } 284 | 285 | public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { 286 | schemes.get(oprot.getScheme()).getScheme().write(oprot, this); 287 | } 288 | 289 | @Override 290 | public String toString() { 291 | StringBuilder sb = new StringBuilder("PutDataRequest("); 292 | 293 | sb.append("data:"); 294 | if (this.data == null) { 295 | sb.append("null"); 296 | } else { 297 | sb.append(this.data); 298 | } 299 | 300 | sb.append(")"); 301 | return sb.toString(); 302 | } 303 | 304 | public void validate() throws org.apache.thrift.TException { 305 | // check for required fields 306 | // check for sub-struct validity 307 | } 308 | 309 | private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { 310 | try { 311 | write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); 312 | } catch (org.apache.thrift.TException te) { 313 | throw new java.io.IOException(te); 314 | } 315 | } 316 | 317 | private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { 318 | try { 319 | read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); 320 | } catch (org.apache.thrift.TException te) { 321 | throw new java.io.IOException(te); 322 | } 323 | } 324 | 325 | private static class PutDataRequestStandardSchemeFactory implements SchemeFactory { 326 | public PutDataRequestStandardScheme getScheme() { 327 | return new PutDataRequestStandardScheme(); 328 | } 329 | } 330 | 331 | private static class PutDataRequestStandardScheme extends StandardScheme { 332 | 333 | public void read(org.apache.thrift.protocol.TProtocol iprot, PutDataRequest struct) throws org.apache.thrift.TException { 334 | org.apache.thrift.protocol.TField schemeField; 335 | iprot.readStructBegin(); 336 | while (true) 337 | { 338 | schemeField = iprot.readFieldBegin(); 339 | if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 340 | break; 341 | } 342 | switch (schemeField.id) { 343 | case 1: // DATA 344 | if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { 345 | { 346 | org.apache.thrift.protocol.TList _list24 = iprot.readListBegin(); 347 | struct.data = new ArrayList(_list24.size); 348 | DataPoint _elem25; 349 | for (int _i26 = 0; _i26 < _list24.size; ++_i26) 350 | { 351 | _elem25 = new DataPoint(); 352 | _elem25.read(iprot); 353 | struct.data.add(_elem25); 354 | } 355 | iprot.readListEnd(); 356 | } 357 | struct.setDataIsSet(true); 358 | } else { 359 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 360 | } 361 | break; 362 | default: 363 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 364 | } 365 | iprot.readFieldEnd(); 366 | } 367 | iprot.readStructEnd(); 368 | 369 | // check for required fields of primitive type, which can't be checked in the validate method 370 | struct.validate(); 371 | } 372 | 373 | public void write(org.apache.thrift.protocol.TProtocol oprot, PutDataRequest struct) throws org.apache.thrift.TException { 374 | struct.validate(); 375 | 376 | oprot.writeStructBegin(STRUCT_DESC); 377 | if (struct.data != null) { 378 | oprot.writeFieldBegin(DATA_FIELD_DESC); 379 | { 380 | oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.data.size())); 381 | for (DataPoint _iter27 : struct.data) 382 | { 383 | _iter27.write(oprot); 384 | } 385 | oprot.writeListEnd(); 386 | } 387 | oprot.writeFieldEnd(); 388 | } 389 | oprot.writeFieldStop(); 390 | oprot.writeStructEnd(); 391 | } 392 | 393 | } 394 | 395 | private static class PutDataRequestTupleSchemeFactory implements SchemeFactory { 396 | public PutDataRequestTupleScheme getScheme() { 397 | return new PutDataRequestTupleScheme(); 398 | } 399 | } 400 | 401 | private static class PutDataRequestTupleScheme extends TupleScheme { 402 | 403 | @Override 404 | public void write(org.apache.thrift.protocol.TProtocol prot, PutDataRequest struct) throws org.apache.thrift.TException { 405 | TTupleProtocol oprot = (TTupleProtocol) prot; 406 | BitSet optionals = new BitSet(); 407 | if (struct.isSetData()) { 408 | optionals.set(0); 409 | } 410 | oprot.writeBitSet(optionals, 1); 411 | if (struct.isSetData()) { 412 | { 413 | oprot.writeI32(struct.data.size()); 414 | for (DataPoint _iter28 : struct.data) 415 | { 416 | _iter28.write(oprot); 417 | } 418 | } 419 | } 420 | } 421 | 422 | @Override 423 | public void read(org.apache.thrift.protocol.TProtocol prot, PutDataRequest struct) throws org.apache.thrift.TException { 424 | TTupleProtocol iprot = (TTupleProtocol) prot; 425 | BitSet incoming = iprot.readBitSet(1); 426 | if (incoming.get(0)) { 427 | { 428 | org.apache.thrift.protocol.TList _list29 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); 429 | struct.data = new ArrayList(_list29.size); 430 | DataPoint _elem30; 431 | for (int _i31 = 0; _i31 < _list29.size; ++_i31) 432 | { 433 | _elem30 = new DataPoint(); 434 | _elem30.read(iprot); 435 | struct.data.add(_elem30); 436 | } 437 | } 438 | struct.setDataIsSet(true); 439 | } 440 | } 441 | } 442 | 443 | } 444 | 445 | -------------------------------------------------------------------------------- /beringeiclient/src/main/java/com/facebook/beringei/thriftclient/PutDataResult.java: -------------------------------------------------------------------------------- 1 | package com.facebook.beringei.thriftclient; 2 | 3 | /** 4 | 5 | * Autogenerated by Thrift Compiler (0.9.3) 6 | * 7 | * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 8 | * @generated 9 | */ 10 | import org.apache.thrift.scheme.IScheme; 11 | import org.apache.thrift.scheme.SchemeFactory; 12 | import org.apache.thrift.scheme.StandardScheme; 13 | 14 | import org.apache.thrift.scheme.TupleScheme; 15 | import org.apache.thrift.protocol.TTupleProtocol; 16 | 17 | import java.util.List; 18 | import java.util.ArrayList; 19 | import java.util.Map; 20 | import java.util.HashMap; 21 | import java.util.EnumMap; 22 | 23 | import java.util.EnumSet; 24 | import java.util.Collections; 25 | import java.util.BitSet; 26 | 27 | import javax.annotation.Generated; 28 | 29 | @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) 30 | @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-04-22") 31 | public class PutDataResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { 32 | private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PutDataResult"); 33 | 34 | private static final org.apache.thrift.protocol.TField DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("data", org.apache.thrift.protocol.TType.LIST, (short)1); 35 | 36 | private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); 37 | static { 38 | schemes.put(StandardScheme.class, new PutDataResultStandardSchemeFactory()); 39 | schemes.put(TupleScheme.class, new PutDataResultTupleSchemeFactory()); 40 | } 41 | 42 | public List data; // required 43 | 44 | /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ 45 | public enum _Fields implements org.apache.thrift.TFieldIdEnum { 46 | DATA((short)1, "data"); 47 | 48 | private static final Map byName = new HashMap(); 49 | 50 | static { 51 | for (_Fields field : EnumSet.allOf(_Fields.class)) { 52 | byName.put(field.getFieldName(), field); 53 | } 54 | } 55 | 56 | /** 57 | * Find the _Fields constant that matches fieldId, or null if its not found. 58 | */ 59 | public static _Fields findByThriftId(int fieldId) { 60 | switch(fieldId) { 61 | case 1: // DATA 62 | return DATA; 63 | default: 64 | return null; 65 | } 66 | } 67 | 68 | /** 69 | * Find the _Fields constant that matches fieldId, throwing an exception 70 | * if it is not found. 71 | */ 72 | public static _Fields findByThriftIdOrThrow(int fieldId) { 73 | _Fields fields = findByThriftId(fieldId); 74 | if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); 75 | return fields; 76 | } 77 | 78 | /** 79 | * Find the _Fields constant that matches name, or null if its not found. 80 | */ 81 | public static _Fields findByName(String name) { 82 | return byName.get(name); 83 | } 84 | 85 | private final short _thriftId; 86 | private final String _fieldName; 87 | 88 | _Fields(short thriftId, String fieldName) { 89 | _thriftId = thriftId; 90 | _fieldName = fieldName; 91 | } 92 | 93 | public short getThriftFieldId() { 94 | return _thriftId; 95 | } 96 | 97 | public String getFieldName() { 98 | return _fieldName; 99 | } 100 | } 101 | 102 | // isset id assignments 103 | public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; 104 | static { 105 | Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); 106 | tmpMap.put(_Fields.DATA, new org.apache.thrift.meta_data.FieldMetaData("data", org.apache.thrift.TFieldRequirementType.DEFAULT, 107 | new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 108 | new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DataPoint.class)))); 109 | metaDataMap = Collections.unmodifiableMap(tmpMap); 110 | org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PutDataResult.class, metaDataMap); 111 | } 112 | 113 | public PutDataResult() { 114 | } 115 | 116 | public PutDataResult( 117 | List data) 118 | { 119 | this(); 120 | this.data = data; 121 | } 122 | 123 | /** 124 | * Performs a deep copy on other. 125 | */ 126 | public PutDataResult(PutDataResult other) { 127 | if (other.isSetData()) { 128 | List __this__data = new ArrayList(other.data.size()); 129 | for (DataPoint other_element : other.data) { 130 | __this__data.add(new DataPoint(other_element)); 131 | } 132 | this.data = __this__data; 133 | } 134 | } 135 | 136 | public PutDataResult deepCopy() { 137 | return new PutDataResult(this); 138 | } 139 | 140 | @Override 141 | public void clear() { 142 | this.data = null; 143 | } 144 | 145 | public int getDataSize() { 146 | return (this.data == null) ? 0 : this.data.size(); 147 | } 148 | 149 | public java.util.Iterator getDataIterator() { 150 | return (this.data == null) ? null : this.data.iterator(); 151 | } 152 | 153 | public void addToData(DataPoint elem) { 154 | if (this.data == null) { 155 | this.data = new ArrayList(); 156 | } 157 | this.data.add(elem); 158 | } 159 | 160 | public List getData() { 161 | return this.data; 162 | } 163 | 164 | public PutDataResult setData(List data) { 165 | this.data = data; 166 | return this; 167 | } 168 | 169 | public void unsetData() { 170 | this.data = null; 171 | } 172 | 173 | /** Returns true if field data is set (has been assigned a value) and false otherwise */ 174 | public boolean isSetData() { 175 | return this.data != null; 176 | } 177 | 178 | public void setDataIsSet(boolean value) { 179 | if (!value) { 180 | this.data = null; 181 | } 182 | } 183 | 184 | public void setFieldValue(_Fields field, Object value) { 185 | switch (field) { 186 | case DATA: 187 | if (value == null) { 188 | unsetData(); 189 | } else { 190 | setData((List)value); 191 | } 192 | break; 193 | 194 | } 195 | } 196 | 197 | public Object getFieldValue(_Fields field) { 198 | switch (field) { 199 | case DATA: 200 | return getData(); 201 | 202 | } 203 | throw new IllegalStateException(); 204 | } 205 | 206 | /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ 207 | public boolean isSet(_Fields field) { 208 | if (field == null) { 209 | throw new IllegalArgumentException(); 210 | } 211 | 212 | switch (field) { 213 | case DATA: 214 | return isSetData(); 215 | } 216 | throw new IllegalStateException(); 217 | } 218 | 219 | @Override 220 | public boolean equals(Object that) { 221 | if (that == null) 222 | return false; 223 | if (that instanceof PutDataResult) 224 | return this.equals((PutDataResult)that); 225 | return false; 226 | } 227 | 228 | public boolean equals(PutDataResult that) { 229 | if (that == null) 230 | return false; 231 | 232 | boolean this_present_data = true && this.isSetData(); 233 | boolean that_present_data = true && that.isSetData(); 234 | if (this_present_data || that_present_data) { 235 | if (!(this_present_data && that_present_data)) 236 | return false; 237 | if (!this.data.equals(that.data)) 238 | return false; 239 | } 240 | 241 | return true; 242 | } 243 | 244 | @Override 245 | public int hashCode() { 246 | List list = new ArrayList(); 247 | 248 | boolean present_data = true && (isSetData()); 249 | list.add(present_data); 250 | if (present_data) 251 | list.add(data); 252 | 253 | return list.hashCode(); 254 | } 255 | 256 | @Override 257 | public int compareTo(PutDataResult other) { 258 | if (!getClass().equals(other.getClass())) { 259 | return getClass().getName().compareTo(other.getClass().getName()); 260 | } 261 | 262 | int lastComparison = 0; 263 | 264 | lastComparison = Boolean.valueOf(isSetData()).compareTo(other.isSetData()); 265 | if (lastComparison != 0) { 266 | return lastComparison; 267 | } 268 | if (isSetData()) { 269 | lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.data, other.data); 270 | if (lastComparison != 0) { 271 | return lastComparison; 272 | } 273 | } 274 | return 0; 275 | } 276 | 277 | public _Fields fieldForId(int fieldId) { 278 | return _Fields.findByThriftId(fieldId); 279 | } 280 | 281 | public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { 282 | schemes.get(iprot.getScheme()).getScheme().read(iprot, this); 283 | } 284 | 285 | public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { 286 | schemes.get(oprot.getScheme()).getScheme().write(oprot, this); 287 | } 288 | 289 | @Override 290 | public String toString() { 291 | StringBuilder sb = new StringBuilder("PutDataResult("); 292 | sb.append("data:"); 293 | if (this.data == null) { 294 | sb.append("null"); 295 | } else { 296 | sb.append(this.data); 297 | } 298 | 299 | sb.append(")"); 300 | return sb.toString(); 301 | } 302 | 303 | public void validate() throws org.apache.thrift.TException { 304 | // check for required fields 305 | // check for sub-struct validity 306 | } 307 | 308 | private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { 309 | try { 310 | write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); 311 | } catch (org.apache.thrift.TException te) { 312 | throw new java.io.IOException(te); 313 | } 314 | } 315 | 316 | private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { 317 | try { 318 | read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); 319 | } catch (org.apache.thrift.TException te) { 320 | throw new java.io.IOException(te); 321 | } 322 | } 323 | 324 | private static class PutDataResultStandardSchemeFactory implements SchemeFactory { 325 | public PutDataResultStandardScheme getScheme() { 326 | return new PutDataResultStandardScheme(); 327 | } 328 | } 329 | 330 | private static class PutDataResultStandardScheme extends StandardScheme { 331 | 332 | public void read(org.apache.thrift.protocol.TProtocol iprot, PutDataResult struct) throws org.apache.thrift.TException { 333 | org.apache.thrift.protocol.TField schemeField; 334 | iprot.readStructBegin(); 335 | while (true) 336 | { 337 | schemeField = iprot.readFieldBegin(); 338 | if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 339 | break; 340 | } 341 | switch (schemeField.id) { 342 | case 1: // DATA 343 | if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { 344 | { 345 | org.apache.thrift.protocol.TList _list32 = iprot.readListBegin(); 346 | struct.data = new ArrayList(_list32.size); 347 | DataPoint _elem33; 348 | for (int _i34 = 0; _i34 < _list32.size; ++_i34) 349 | { 350 | _elem33 = new DataPoint(); 351 | _elem33.read(iprot); 352 | struct.data.add(_elem33); 353 | } 354 | iprot.readListEnd(); 355 | } 356 | struct.setDataIsSet(true); 357 | } else { 358 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 359 | } 360 | break; 361 | default: 362 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 363 | } 364 | iprot.readFieldEnd(); 365 | } 366 | iprot.readStructEnd(); 367 | 368 | // check for required fields of primitive type, which can't be checked in the validate method 369 | struct.validate(); 370 | } 371 | 372 | public void write(org.apache.thrift.protocol.TProtocol oprot, PutDataResult struct) throws org.apache.thrift.TException { 373 | struct.validate(); 374 | 375 | oprot.writeStructBegin(STRUCT_DESC); 376 | if (struct.data != null) { 377 | oprot.writeFieldBegin(DATA_FIELD_DESC); 378 | { 379 | oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.data.size())); 380 | for (DataPoint _iter35 : struct.data) 381 | { 382 | _iter35.write(oprot); 383 | } 384 | oprot.writeListEnd(); 385 | } 386 | oprot.writeFieldEnd(); 387 | } 388 | oprot.writeFieldStop(); 389 | oprot.writeStructEnd(); 390 | } 391 | 392 | } 393 | 394 | private static class PutDataResultTupleSchemeFactory implements SchemeFactory { 395 | public PutDataResultTupleScheme getScheme() { 396 | return new PutDataResultTupleScheme(); 397 | } 398 | } 399 | 400 | private static class PutDataResultTupleScheme extends TupleScheme { 401 | 402 | @Override 403 | public void write(org.apache.thrift.protocol.TProtocol prot, PutDataResult struct) throws org.apache.thrift.TException { 404 | TTupleProtocol oprot = (TTupleProtocol) prot; 405 | BitSet optionals = new BitSet(); 406 | if (struct.isSetData()) { 407 | optionals.set(0); 408 | } 409 | oprot.writeBitSet(optionals, 1); 410 | if (struct.isSetData()) { 411 | { 412 | oprot.writeI32(struct.data.size()); 413 | for (DataPoint _iter36 : struct.data) 414 | { 415 | _iter36.write(oprot); 416 | } 417 | } 418 | } 419 | } 420 | 421 | @Override 422 | public void read(org.apache.thrift.protocol.TProtocol prot, PutDataResult struct) throws org.apache.thrift.TException { 423 | TTupleProtocol iprot = (TTupleProtocol) prot; 424 | BitSet incoming = iprot.readBitSet(1); 425 | if (incoming.get(0)) { 426 | { 427 | org.apache.thrift.protocol.TList _list37 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); 428 | struct.data = new ArrayList(_list37.size); 429 | DataPoint _elem38; 430 | for (int _i39 = 0; _i39 < _list37.size; ++_i39) 431 | { 432 | _elem38 = new DataPoint(); 433 | _elem38.read(iprot); 434 | struct.data.add(_elem38); 435 | } 436 | } 437 | struct.setDataIsSet(true); 438 | } 439 | } 440 | } 441 | 442 | } 443 | 444 | -------------------------------------------------------------------------------- /beringeiclient/src/main/java/com/facebook/beringei/thriftclient/StatusCode.java: -------------------------------------------------------------------------------- 1 | package com.facebook.beringei.thriftclient; 2 | 3 | /** 4 | * Autogenerated by Thrift Compiler (0.9.3) 5 | * 6 | * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 7 | * @generated 8 | */ 9 | 10 | public enum StatusCode implements org.apache.thrift.TEnum { 11 | OK(0), 12 | DONT_OWN_SHARD(1), 13 | KEY_MISSING(2), 14 | RPC_FAIL(3), 15 | SHARD_IN_PROGRESS(4), 16 | BUCKET_NOT_FINALIZED(5), 17 | ZIPPY_STORAGE_FAIL(6), 18 | MISSING_TOO_MUCH_DATA(7); 19 | 20 | private final int value; 21 | 22 | private StatusCode(int value) { 23 | this.value = value; 24 | } 25 | 26 | /** 27 | * Get the integer value of this enum value, as defined in the Thrift IDL. 28 | */ 29 | public int getValue() { 30 | return value; 31 | } 32 | 33 | /** 34 | * Find a the enum type by its integer value, as defined in the Thrift IDL. 35 | * @return null if the value is not found. 36 | */ 37 | public static StatusCode findByValue(int value) { 38 | switch (value) { 39 | case 0: 40 | return OK; 41 | case 1: 42 | return DONT_OWN_SHARD; 43 | case 2: 44 | return KEY_MISSING; 45 | case 3: 46 | return RPC_FAIL; 47 | case 4: 48 | return SHARD_IN_PROGRESS; 49 | case 5: 50 | return BUCKET_NOT_FINALIZED; 51 | case 6: 52 | return ZIPPY_STORAGE_FAIL; 53 | case 7: 54 | return MISSING_TOO_MUCH_DATA; 55 | default: 56 | return null; 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringei/compression/BitStream.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringei.compression; 2 | 3 | /** 4 | * 5 | * @author vmukumar 6 | * 7 | */ 8 | public interface BitStream { 9 | /** 10 | * to read a bit 11 | * @return 12 | */ 13 | public boolean getBit(); 14 | 15 | /** 16 | * reads given number of bits and returns as long 17 | * @param bits number of bits to read 18 | * @return 19 | */ 20 | public long getBits(int bits); 21 | } -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringei/compression/BlockDecoder.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringei.compression; 2 | 3 | import java.nio.ByteBuffer; 4 | import java.util.ArrayList; 5 | import java.util.List; 6 | 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | import com.facebook.beringei.thriftclient.DataPoint; 11 | import com.facebook.beringei.thriftclient.Key; 12 | import com.facebook.beringei.thriftclient.TimeSeriesBlock; 13 | import com.facebook.beringei.thriftclient.TimeValuePair; 14 | 15 | /** 16 | * 17 | * @author vmukumar 18 | * 19 | */ 20 | 21 | /*Reference: https://raw.githubusercontent.com/facebookincubator/beringei/f1023333b6189e766b4ff7dbfa7d684176fa7083/beringei/lib/TimeSeriesStream.cpp*/ 22 | 23 | public class BlockDecoder { 24 | final static Logger logger = LoggerFactory.getLogger(BlockDecoder.class); 25 | 26 | 27 | /*TODO 1. evaluate performance between ByteBuffer and LongBuffer 28 | * 2. optimize the bit reading. 29 | */ 30 | private static boolean useLongBuffer = false; 31 | 32 | private static final byte kBitsForFirstTimestamp = 31; 33 | private static final byte kLeadingZerosLengthBits = 5; 34 | private static final byte kBlockSizeLengthBits = 6; 35 | 36 | private static class TimestampEncodings { 37 | int bitsForValue; 38 | @SuppressWarnings("unused") 39 | int controlValue; 40 | @SuppressWarnings("unused") 41 | int controlValueBitLength; 42 | 43 | public TimestampEncodings(int bitsForValue, int controlValue, int controlValueBitLength) { 44 | this.bitsForValue = bitsForValue; 45 | this.controlValue = controlValue; 46 | this.controlValueBitLength = controlValueBitLength; 47 | } 48 | } 49 | 50 | @SuppressWarnings("serial") 51 | private static List timestampEncodings = new ArrayList() {{ 52 | add(new TimestampEncodings(7, 2, 2)); 53 | add(new TimestampEncodings(9, 6, 3)); 54 | add(new TimestampEncodings(12, 14, 4)); 55 | add(new TimestampEncodings(32, 15, 4)); 56 | }}; 57 | 58 | private final Key key; 59 | private final BitStream bs; 60 | 61 | private int dataPoints = 0; 62 | 63 | private long prevTimestamp = 0; 64 | private long prevValue = 0; 65 | 66 | private long defaultDelta = 60; 67 | private long prevDelta = defaultDelta; 68 | 69 | private long previousTrailingZeros = 0; 70 | private long previousLeadingZeros = 0; 71 | 72 | /** 73 | * 74 | * @param key berginei key object(contains key and shard id) 75 | * @param tsb time series block for the given key 76 | */ 77 | public BlockDecoder(Key key, TimeSeriesBlock tsb) { 78 | dataPoints = tsb.getCount(); 79 | 80 | ByteBuffer buffer = tsb.bufferForData(); 81 | 82 | if (useLongBuffer == false) { 83 | this.bs = new ByteBufferBitReader(buffer); 84 | } else { 85 | int total = buffer.remaining(); 86 | int remaining = total%8; 87 | 88 | if (remaining != 0) { 89 | byte []remainingBytes = new byte[8]; 90 | int addBytes = remaining; 91 | buffer.position(total-remaining); 92 | for(int i = 0; i < addBytes; i++) { 93 | remainingBytes[i] = buffer.get(); 94 | } 95 | buffer.position(0); 96 | ByteBuffer bb = ByteBuffer.wrap(remainingBytes); 97 | this.bs = new LongBufferBitReader(buffer.asLongBuffer(), bb.getLong()); 98 | } else { 99 | this.bs = new LongBufferBitReader(buffer.asLongBuffer()); 100 | } 101 | } 102 | this.key = key; 103 | } 104 | 105 | /** 106 | * 107 | * @return returns the timeseries from the block 108 | */ 109 | public List readDps() { 110 | List dps = new ArrayList(); 111 | for(int i = 0; i < dataPoints; i++) { 112 | long ts = readTimeStamp(); 113 | double val = readValue(); 114 | TimeValuePair tvp = new TimeValuePair(); 115 | tvp.setUnixTime(ts); 116 | tvp.setValue(val); 117 | 118 | DataPoint dp = new DataPoint(); 119 | dp.setKey(key); 120 | dp.setValue(tvp); 121 | 122 | // System.out.println("**"+key.getKey()+" "+ ts+" "+val); 123 | 124 | dps.add(dp); 125 | } 126 | logger.info("no points: " + dps.size()); 127 | logger.debug("points: " + dps); 128 | return dps; 129 | } 130 | 131 | /** 132 | * 133 | * @return timestamp in seconds 134 | */ 135 | private long readTimeStamp() { 136 | if (prevTimestamp == 0) { 137 | prevTimestamp = bs.getBits(kBitsForFirstTimestamp); 138 | } else { 139 | int bits = 0; 140 | int limit = 4; 141 | 142 | while (bits < limit) { 143 | int bit = (int)bs.getBits(1); 144 | if (bit == 0) { 145 | break; 146 | } 147 | 148 | bits++; 149 | } 150 | 151 | int type = bits; 152 | if (type > 0) { 153 | int index = type - 1; 154 | TimestampEncodings te = timestampEncodings.get(index); 155 | long decodedValue = bs.getBits(te.bitsForValue); 156 | decodedValue -= (1l << (te.bitsForValue - 1)); 157 | if (decodedValue >= 0) { 158 | decodedValue++; 159 | } 160 | prevDelta += decodedValue; 161 | } 162 | prevTimestamp += prevDelta; 163 | } 164 | 165 | return prevTimestamp; 166 | } 167 | 168 | /** 169 | * 170 | * @return value in double 171 | */ 172 | private double readValue() { 173 | long nonzero = bs.getBits(1); 174 | if (nonzero != 0) { 175 | long usePreviousBlockInformation = bs.getBits(1); 176 | long xorValue = -1; 177 | 178 | if (usePreviousBlockInformation != 0) { 179 | xorValue = bs.getBits((int)(64 - previousLeadingZeros - previousTrailingZeros)); 180 | xorValue <<= previousTrailingZeros; 181 | } else { 182 | long leadingZeros = bs.getBits(kLeadingZerosLengthBits); 183 | // System.out.println("leadingZeros" + leadingZeros); 184 | int blockSize = (int) bs.getBits(kBlockSizeLengthBits) + 1; 185 | // System.out.println("blockSize" + blockSize); 186 | 187 | previousTrailingZeros = 64 - blockSize - leadingZeros; 188 | // System.out.println("previousTrailingZeros" + previousTrailingZeros); 189 | xorValue = bs.getBits(blockSize); 190 | xorValue <<= previousTrailingZeros; 191 | 192 | previousLeadingZeros = leadingZeros; 193 | } 194 | prevValue = xorValue^prevValue; 195 | } 196 | return Double.longBitsToDouble(prevValue); 197 | } 198 | } -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringei/compression/ByteBufferBitReader.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringei.compression; 2 | 3 | import java.nio.ByteBuffer; 4 | 5 | /** 6 | * 7 | * @author vmukumar 8 | * 9 | */ 10 | public class ByteBufferBitReader implements BitStream { 11 | public static int []masksRead = { 12 | 128, 64, 32, 16, 8, 4, 2, 1 13 | }; 14 | 15 | public static long []masksSet = new long[64]; 16 | 17 | static { 18 | for (int i= 0; i < 64; i++) { 19 | long mask = 1L << i; 20 | masksSet[i] = mask; 21 | } 22 | } 23 | 24 | 25 | private final ByteBuffer buf; 26 | 27 | private final long numOfBits; 28 | private int pos; 29 | 30 | private int lastReadIdx = -1; 31 | private byte readByte = 0; 32 | 33 | /** 34 | * 35 | * @param buf 36 | */ 37 | public ByteBufferBitReader(ByteBuffer buf) { 38 | this.buf = buf; 39 | 40 | int totalBytes = buf.remaining(); 41 | this.numOfBits = totalBytes * 8; 42 | 43 | this.pos = 0; 44 | } 45 | 46 | /** 47 | * 48 | * @return 49 | */ 50 | public boolean getBit() { 51 | if (pos >= numOfBits) 52 | throw new IllegalStateException(); 53 | 54 | int i = pos / 8; 55 | int a = pos % 8; 56 | ++pos; 57 | 58 | if (lastReadIdx != i) { 59 | readByte = buf.get(i); 60 | lastReadIdx = i; 61 | } 62 | 63 | return ((readByte & masksRead[a]) != 0); 64 | } 65 | 66 | /** 67 | * 68 | * @param bits 69 | * @return 70 | */ 71 | public long getBits(int bits) { 72 | if (bits > 64) 73 | throw new IllegalArgumentException(); 74 | if (bits + pos > numOfBits) 75 | throw new IllegalArgumentException(); 76 | if (bits == 0) 77 | return 0; 78 | 79 | long r = 0; 80 | for (int i = 0; i < bits; ++i) { 81 | if (getBit()) { 82 | //r |= 1L << (bits - i - 1); 83 | r |= masksSet[bits - i - 1]; 84 | } 85 | } 86 | return r; 87 | } 88 | } -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringei/compression/LongBufferBitReader.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringei.compression; 2 | 3 | import java.nio.LongBuffer; 4 | 5 | /** 6 | * 7 | * @author vmukumar 8 | * 9 | */ 10 | public class LongBufferBitReader implements BitStream { 11 | public static long []masksRead = new long[64]; 12 | public static long []masksSet = new long[64]; 13 | 14 | static { 15 | for (int i= 0; i < 64; i++) { 16 | long mask = 1L << i; 17 | masksRead[63-i] = mask; 18 | masksSet[i] = mask; 19 | } 20 | } 21 | 22 | private final LongBuffer buf; 23 | 24 | private final long numOfBits; 25 | private int pos; 26 | 27 | private int totalInBuf = -1; 28 | private int lastReadIdx = -1; 29 | private long readLong = 0; 30 | private long readLast = 0; 31 | 32 | /** 33 | * 34 | * @param buf 35 | */ 36 | public LongBufferBitReader(LongBuffer buf) { 37 | this.buf = buf; 38 | totalInBuf = buf.remaining(); 39 | this.numOfBits = (totalInBuf * 64); 40 | 41 | this.pos = 0; 42 | } 43 | 44 | /** 45 | * 46 | * @param buf 47 | * @param remaining 48 | */ 49 | 50 | public LongBufferBitReader(LongBuffer buf, long remaining) { 51 | this.buf = buf; 52 | totalInBuf = buf.remaining(); 53 | this.numOfBits = ((totalInBuf + 1) * 64); 54 | this.pos = 0; 55 | 56 | readLast = remaining; 57 | } 58 | 59 | /** 60 | * 61 | * @return 62 | */ 63 | public boolean getBit() { 64 | if (pos >= numOfBits) 65 | throw new IllegalStateException(); 66 | 67 | int i = pos / 64; 68 | int a = pos % 64; 69 | ++pos; 70 | 71 | if (lastReadIdx != i) { 72 | if (i >= totalInBuf) { 73 | readLong = readLast; 74 | } else { 75 | readLong = buf.get(i); 76 | } 77 | lastReadIdx = i; 78 | } 79 | 80 | //System.out.println(readLong); 81 | 82 | return ((readLong & masksRead[a]) != 0); 83 | } 84 | 85 | /** 86 | * 87 | * @param bits 88 | * @return 89 | */ 90 | public long getBits(int bits) { 91 | 92 | if (bits > 64) 93 | throw new IllegalArgumentException(); 94 | 95 | if ((bits + pos) > numOfBits) 96 | throw new IllegalArgumentException(); 97 | if (bits == 0) 98 | return 0; 99 | 100 | long r = 0; 101 | for (int i = 0; i < bits; ++i) { 102 | if (getBit()) { 103 | //r |= 1L << (bits - i - 1); 104 | r |= masksSet[bits - i - 1]; 105 | } 106 | } 107 | return r; 108 | } 109 | } -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringeiclient/BeringeiClient.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringeiclient; 2 | 3 | import java.io.IOException; 4 | import java.util.ArrayList; 5 | import java.util.HashMap; 6 | import java.util.HashSet; 7 | import java.util.Iterator; 8 | import java.util.List; 9 | import java.util.Map; 10 | import java.util.Timer; 11 | import java.util.TimerTask; 12 | import java.util.concurrent.CompletableFuture; 13 | import java.util.concurrent.ConcurrentHashMap; 14 | import java.util.concurrent.ExecutorService; 15 | import java.util.concurrent.Executors; 16 | 17 | import org.slf4j.Logger; 18 | import org.slf4j.LoggerFactory; 19 | import org.wowcoders.beringei.compression.BlockDecoder; 20 | import org.wowcoders.beringeiclient.configurations.Configuration; 21 | import org.wowcoders.beringeiclient.utils.Pair; 22 | 23 | import com.facebook.beringei.thriftclient.DataPoint; 24 | import com.facebook.beringei.thriftclient.GetDataRequest; 25 | import com.facebook.beringei.thriftclient.GetDataResult; 26 | import com.facebook.beringei.thriftclient.Key; 27 | import com.facebook.beringei.thriftclient.PutDataRequest; 28 | import com.facebook.beringei.thriftclient.PutDataResult; 29 | import com.facebook.beringei.thriftclient.StatusCode; 30 | import com.facebook.beringei.thriftclient.TimeSeriesBlock; 31 | import com.facebook.beringei.thriftclient.TimeSeriesData; 32 | import com.facebook.beringei.thriftclient.TimeValuePair; 33 | import com.facebook.beringei.thriftclient.BeringeiService; 34 | import com.facebook.beringei.thriftclient.BeringeiService.AsyncClient; 35 | import com.facebook.beringei.thriftclient.BeringeiService.AsyncClient.Factory; 36 | 37 | import org.apache.thrift.TException; 38 | import org.apache.thrift.async.AsyncMethodCallback; 39 | import org.apache.thrift.async.TAsyncClientManager; 40 | import org.apache.thrift.protocol.TBinaryProtocol; 41 | import org.apache.thrift.protocol.TProtocol; 42 | import org.apache.thrift.protocol.TProtocolFactory; 43 | import org.apache.thrift.transport.TNonblockingSocket; 44 | import org.apache.thrift.transport.TTransport; 45 | 46 | import cn.danielw.fop.ObjectFactory; 47 | import cn.danielw.fop.ObjectPool; 48 | import cn.danielw.fop.PoolConfig; 49 | import cn.danielw.fop.Poolable; 50 | 51 | 52 | public class BeringeiClient { 53 | final static Logger logger = LoggerFactory.getLogger(BeringeiClient.class); 54 | 55 | static int shardCount = 100; 56 | 57 | static int flushAfterCount = 1000; 58 | static int flushInterval = 10000; 59 | 60 | static int executerReaderThreads = 20; 61 | static int executerWriterThreads = 10; 62 | 63 | static ExecutorService executorReader = Executors.newFixedThreadPool(executerReaderThreads); 64 | static ExecutorService executorWriter = Executors.newFixedThreadPool(executerWriterThreads); 65 | 66 | Map>> clusterByShards = new ConcurrentHashMap>>(); 67 | Map> batchByShards = new ConcurrentHashMap>(); 68 | 69 | static TProtocolFactory proto_fac = new TProtocolFactory() { 70 | private static final long serialVersionUID = 1L; 71 | 72 | @Override 73 | public TProtocol getProtocol(TTransport trans) { 74 | return new TBinaryProtocol(trans); 75 | } 76 | }; 77 | 78 | private ObjectFactory createObjectFactory(Pair addr) { 79 | ObjectFactory factory = new ObjectFactory() { 80 | TAsyncClientManager asm = null; 81 | TNonblockingSocket tnbs = null; 82 | @Override public AsyncClient create() { 83 | Configuration cfg = Configuration.getInstnace(); 84 | try { 85 | asm = new TAsyncClientManager(); 86 | } catch (IOException e) { 87 | e.printStackTrace(); 88 | } 89 | 90 | Factory fac = new AsyncClient.Factory(asm, proto_fac); 91 | try { 92 | tnbs = new TNonblockingSocket(addr.first, addr.second); 93 | } catch (IOException e) { 94 | e.printStackTrace(); 95 | } 96 | tnbs.setTimeout((int)(cfg.getClientConfig().getConnectTimeout() 97 | + cfg.getClientConfig().getWriteTimeout() 98 | + cfg.getClientConfig().getReadTimeout())); 99 | 100 | return fac.getAsyncClient(tnbs); 101 | } 102 | @Override public void destroy(AsyncClient o) { 103 | try { 104 | asm.stop(); 105 | tnbs.close(); 106 | } catch(Exception e) { 107 | 108 | } finally { 109 | asm = null; 110 | tnbs = null; 111 | } 112 | } 113 | @Override public boolean validate(AsyncClient o) { 114 | return asm != null && tnbs != null; 115 | } 116 | }; 117 | 118 | return factory; 119 | } 120 | 121 | @SuppressWarnings("serial") 122 | public BeringeiClient() throws IOException { 123 | Configuration cfg = Configuration.getInstnace(); 124 | 125 | shardCount = cfg.getClientConfig().getShardCounts(); 126 | 127 | flushAfterCount = cfg.getClientConfig().getFlushAfterCount(); 128 | flushInterval = cfg.getClientConfig().getFlushInterval(); 129 | 130 | //TODO need review around how we creates pools for async vs threads 131 | executerReaderThreads = cfg.getClientConfig().getReadThreads(); 132 | executerWriterThreads = cfg.getClientConfig().getWriteThreads(); 133 | 134 | PoolConfig config = new PoolConfig(); 135 | config.setPartitionSize(5); 136 | config.setMaxSize(executerReaderThreads + executerWriterThreads); 137 | config.setMinSize(cfg.getClientConfig().getIdleConnectionsPerShards()); 138 | config.setMaxIdleMilliseconds((int)cfg.getClientConfig().getTimeoutToCloseIdleConnection()); 139 | 140 | //TODO 1. Multidatacenter support, 141 | //2. refresh the map if it is configured using admin ui. 142 | //3. event endpoints as backup/can all beringei nodes supports read of key with any shardid? 143 | 144 | String [] datacenters = cfg.getClientConfig().getDatacenters(); 145 | String datacenter = datacenters[0]; 146 | HashSet> hostSets = cfg.getClientConfig().getDatacenterHostListMap().get(datacenter); 147 | 148 | int hostCount = hostSets.size(); 149 | int shardsPerHost = shardCount/hostCount; 150 | 151 | Iterator> it = hostSets.iterator(); 152 | int startShard = 0; 153 | // System.out.println(hostCount + ":" + shardCount + ":" + shardsPerHost); 154 | while(it.hasNext()) { 155 | Pair hostAddr = it.next(); 156 | // System.out.println("hostaddr"+hostAddr.first); 157 | ObjectPool cliPool = new ObjectPool(config, createObjectFactory(hostAddr)); 158 | for(long i = startShard; i < shardCount; i++) { 159 | clusterByShards.put(i, new ArrayList>() {{ 160 | add(cliPool); 161 | }}); 162 | batchByShards.put(i, new ArrayList()); 163 | // System.out.println("added shard" + i); 164 | } 165 | startShard += shardsPerHost; 166 | } 167 | 168 | Timer timer = new Timer(); 169 | timer.scheduleAtFixedRate(new TimerTask() { 170 | @Override 171 | public void run() { 172 | flush(); 173 | } 174 | }, flushInterval, flushInterval); 175 | } 176 | 177 | public void flush() { 178 | for(long shardId = 0; shardId < shardCount; shardId++) { 179 | List dpsPrev = batchByShards.get(shardId); 180 | batchByShards.put(shardId, new ArrayList ()); 181 | 182 | if (dpsPrev.size() != 0) { 183 | CompletableFuture> droppedCF = putDataPoints(dpsPrev); 184 | droppedCF.thenAccept(dropped -> { 185 | if (dropped != null) { 186 | if (dropped.size() != 0) { 187 | logger.error("Dropped size" + dropped); 188 | } else { 189 | logger.debug("successfully sent data points to beringei server."); 190 | } 191 | } else { 192 | logger.error("failed to send data points to beringei server."); 193 | } 194 | }).exceptionally(tw -> { 195 | return null; 196 | }); 197 | } 198 | } 199 | } 200 | 201 | public int getShardCount() { 202 | return shardCount; 203 | } 204 | 205 | public void pushTS(String hash, long shardId, long unixTime, double value) { 206 | Key key = new Key(); 207 | key.key = hash; 208 | key.shardId = shardId; 209 | 210 | TimeValuePair tvp = new TimeValuePair(); 211 | tvp.setUnixTime(unixTime); 212 | tvp.setValue(value); 213 | 214 | DataPoint dp = new DataPoint(); 215 | dp.setKey(key); 216 | dp.setValue(tvp); 217 | dp.setCategoryId(0); // TODO how are going to categorize, can it serve the purpose of CNT, SUM .... 218 | 219 | List dps = batchByShards.get(shardId); 220 | 221 | dps.add(dp); 222 | 223 | if (dps.size() == flushAfterCount) { 224 | flush(); 225 | } 226 | } 227 | 228 | 229 | public CompletableFuture> putDataPoints(List dps) { 230 | CompletableFuture> completableFuture = new CompletableFuture>(); 231 | executorWriter.submit(() -> { 232 | PutDataRequest req = new PutDataRequest(); 233 | req.setData(dps); 234 | 235 | long shardId = dps.get(0).getKey().getShardId(); 236 | 237 | List> cliList = clusterByShards.get(shardId); 238 | 239 | ObjectPool pool = cliList.get(0); 240 | Poolable obj = pool.borrowObject(true); 241 | AsyncClient client = obj.getObject(); 242 | 243 | CompletableFuture> _completableFuture = new CompletableFuture>(); 244 | PutDataPointsRequestResponseHandler pdprrh = new PutDataPointsRequestResponseHandler(req, _completableFuture); 245 | _completableFuture.thenAccept(rdps->{ 246 | completableFuture.complete(rdps); 247 | pool.returnObject(obj); 248 | }); 249 | pdprrh.on(client); 250 | }); 251 | return completableFuture; 252 | } 253 | 254 | class PutDataPointsRequestResponseHandler { 255 | PutDataRequest req; 256 | CompletableFuture> cf; 257 | PutDataPointsRequestResponseHandler(PutDataRequest req, CompletableFuture> cf) { 258 | this.req = req; 259 | this.cf = cf; 260 | } 261 | 262 | public void on(AsyncClient cli) { 263 | try { 264 | ((BeringeiService.AsyncClient)cli).putDataPoints(req, new AsyncMethodCallback() { 265 | @Override 266 | public void onError(Exception e) { 267 | e.printStackTrace(); 268 | //taskDone(); 269 | } 270 | 271 | @Override 272 | public void onComplete(PutDataResult result) { 273 | cf.complete(result.getData()); 274 | //taskDone(); 275 | } 276 | }); 277 | } catch (Exception e) { 278 | e.printStackTrace(); 279 | } 280 | } 281 | } 282 | 283 | private CompletableFuture> getData(AsyncClient client, long start, long end, List keys) { 284 | CompletableFuture> completableFuture = new CompletableFuture>(); 285 | 286 | GetDataRequest req = new GetDataRequest(); 287 | 288 | req.setBegin(start); 289 | req.setEnd(end); 290 | req.setKeys(keys); 291 | 292 | GetDataPointsRequestResponseHandler gdprrh = new GetDataPointsRequestResponseHandler(req, completableFuture); 293 | gdprrh.on(client); 294 | // client.req(new GetDataPointsRequestResponseHandler(req, completableFuture)); 295 | 296 | return completableFuture; 297 | } 298 | 299 | class GetDataPointsRequestResponseHandler { 300 | GetDataRequest req; 301 | CompletableFuture> cf; 302 | GetDataPointsRequestResponseHandler(GetDataRequest req, CompletableFuture> cf) { 303 | this.req = req; 304 | this.cf = cf; 305 | } 306 | 307 | public void on(AsyncClient cli) { 308 | try { 309 | ((BeringeiService.AsyncClient)cli).getData(req, new AsyncMethodCallback() { 310 | @Override 311 | public void onError(Exception e) { 312 | e.printStackTrace(); 313 | //taskDone(); 314 | cf.completeExceptionally(e); 315 | } 316 | 317 | @Override 318 | public void onComplete(GetDataResult result) { 319 | cf.complete(result.getResults()); 320 | } 321 | }); 322 | } catch (TException e) { 323 | e.printStackTrace(); 324 | } 325 | } 326 | } 327 | 328 | public CompletableFuture>> getDataPointsByKey(long start, long end, List keys) { 329 | CompletableFuture>> completableFuture = new CompletableFuture>>(); 330 | long shardId = keys.get(0).getShardId(); 331 | 332 | List> cliList = clusterByShards.get(shardId); 333 | ObjectPool pool = cliList.get(0); 334 | 335 | Poolable obj = pool.borrowObject(true); 336 | AsyncClient client = obj.getObject(); 337 | 338 | CompletableFuture> cf = getData(client, start, end, keys); 339 | cf.thenAccept(lts -> { 340 | pool.returnObject(obj); 341 | executorReader.submit(() -> {; 342 | if (lts == null) { 343 | 344 | } else { 345 | 346 | Map> map = new HashMap>(); 347 | 348 | int idx = 0; 349 | for(TimeSeriesData ts: lts) { 350 | Key key = keys.get(idx); 351 | if (ts.status == StatusCode.OK) { 352 | List ltsb = ts.getData(); 353 | List dps = new ArrayList(); 354 | for(TimeSeriesBlock tsb: ltsb) { 355 | BlockDecoder bd = new BlockDecoder(key, tsb); 356 | List _dps = bd.readDps(); 357 | dps.addAll(_dps); 358 | } 359 | map.put(key, dps); 360 | } 361 | idx++; 362 | } 363 | completableFuture.complete(map); 364 | } 365 | 366 | }); 367 | }).exceptionally(tw-> { 368 | tw.printStackTrace(); 369 | completableFuture.completeExceptionally(tw); 370 | return null; 371 | }); 372 | 373 | return completableFuture; 374 | } 375 | 376 | public CompletableFuture> getDataPoints(long start, long end, List keys) { 377 | CompletableFuture> completableFuture = new CompletableFuture>(); 378 | 379 | long shardId = keys.get(0).getShardId(); 380 | 381 | List> cliList = clusterByShards.get(shardId); 382 | ObjectPool pool = cliList.get(0); 383 | 384 | Poolable obj = cliList.get(0).borrowObject(true); 385 | AsyncClient client = obj.getObject(); 386 | CompletableFuture> cf = getData(client, start, end, keys); 387 | cf.thenAccept(lts -> { 388 | pool.returnObject(obj); 389 | if (lts == null) { 390 | 391 | } else { 392 | executorReader.submit(() -> { 393 | List dpsall = new ArrayList (); 394 | 395 | int idx = 0; 396 | for(TimeSeriesData ts: lts) { 397 | Key key = keys.get(idx); 398 | if (ts.status == StatusCode.OK) { 399 | List ltsb = ts.getData(); 400 | List dps = new ArrayList(); 401 | for(TimeSeriesBlock tsb: ltsb) { 402 | BlockDecoder bd = new BlockDecoder(key, tsb); 403 | List _dps = bd.readDps(); 404 | dps.addAll(_dps); 405 | } 406 | dpsall.addAll(dps); 407 | } 408 | idx++; 409 | } 410 | completableFuture.complete(dpsall); 411 | }); 412 | } 413 | }).exceptionally(tw-> { 414 | completableFuture.completeExceptionally(tw); 415 | return null; 416 | }); 417 | return completableFuture; 418 | } 419 | } -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringeiclient/BeringeiTestClient.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringeiclient; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | import java.util.Map; 6 | import java.util.concurrent.CompletableFuture; 7 | import java.util.concurrent.CountDownLatch; 8 | 9 | import org.apache.commons.cli.CommandLine; 10 | import org.apache.commons.cli.CommandLineParser; 11 | import org.apache.commons.cli.HelpFormatter; 12 | import org.apache.commons.cli.Option; 13 | import org.apache.commons.cli.Options; 14 | import org.apache.commons.cli.PosixParser; 15 | import org.wowcoders.beringeiclient.configurations.Configuration; 16 | 17 | import com.facebook.beringei.thriftclient.DataPoint; 18 | import com.facebook.beringei.thriftclient.Key; 19 | import com.facebook.beringei.thriftclient.TimeValuePair; 20 | 21 | public class BeringeiTestClient { 22 | static String keyT = "af2e37e6a2792452dea2b69024296faa0"; 23 | static long sec = System.currentTimeMillis() / 1000; 24 | 25 | static final int points = 1; 26 | 27 | private static final CountDownLatch latch = new CountDownLatch(points); 28 | 29 | static BeringeiClient client = null; 30 | 31 | public static void main(String [] args) throws InterruptedException { 32 | // Command line options to set the config file. 33 | String configFileName = "/beringeiclient-demo.properties"; 34 | Options options = new Options(); 35 | Option optThreads = new Option("c", "config-file", true, "a config file with config value."); 36 | options.addOption(optThreads); 37 | 38 | CommandLineParser parser = new PosixParser(); 39 | HelpFormatter formatter = new HelpFormatter(); 40 | CommandLine cmd = null; 41 | try { 42 | cmd = parser.parse(options, args); 43 | } catch (org.apache.commons.cli.ParseException e1) { 44 | // TODO Auto-generated catch block 45 | e1.printStackTrace(); 46 | formatter.printHelp("Server", options); 47 | } 48 | 49 | String _propertyFile = cmd.getOptionValue("c"); 50 | 51 | if (_propertyFile != null) { 52 | configFileName = _propertyFile; 53 | } 54 | 55 | // Initialize the Client Configuration 56 | Configuration.init(configFileName); 57 | Configuration.loadConfig(); 58 | 59 | try { 60 | client = new BeringeiClient(); 61 | performPut(); 62 | Thread.sleep(5000); 63 | performGet(); 64 | } catch (Exception x) { 65 | x.printStackTrace(); 66 | } 67 | while(true) { 68 | Thread.sleep(250); 69 | 70 | if (latch.getCount() == 0) { 71 | System.out.println("done"); 72 | System.exit(0); 73 | } 74 | } 75 | } 76 | 77 | private static void performPut() throws Exception 78 | { 79 | 80 | List dps = new ArrayList(); 81 | Key key = new Key(); 82 | key.key = keyT; 83 | key.shardId = 2; 84 | 85 | DataPoint dp = new DataPoint(); 86 | dp.setKey(key); 87 | TimeValuePair tvp = new TimeValuePair(); 88 | 89 | tvp.setUnixTime(sec); 90 | 91 | tvp.setValue(10); 92 | dp.setValue(tvp); 93 | 94 | dps.add(dp); 95 | 96 | for(int i = 0; i < points; i++) { 97 | CompletableFuture> droppedCF = client.putDataPoints(dps); 98 | droppedCF.thenAccept(dropped -> { 99 | if (dropped.size() != 0) { 100 | System.out.println(dropped); 101 | } 102 | latch.countDown(); 103 | }); 104 | } 105 | } 106 | 107 | private static void performGet() throws Exception 108 | { 109 | List keys = new ArrayList(); 110 | Key key = new Key(); 111 | key.key = keyT; 112 | key.shardId = 2; 113 | keys.add(key); 114 | CompletableFuture>> dpsCF = client.getDataPointsByKey(sec-60, sec, keys); 115 | dpsCF.thenAccept(dps -> { 116 | System.out.println(dps); 117 | }); 118 | } 119 | } 120 | 121 | 122 | -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringeiclient/BeringeiTestRawClient.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringeiclient; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | 6 | import org.apache.thrift.protocol.TBinaryProtocol; 7 | import org.apache.thrift.protocol.TProtocol; 8 | import org.apache.thrift.transport.TSocket; 9 | 10 | import com.facebook.beringei.thriftclient.BeringeiService; 11 | import com.facebook.beringei.thriftclient.DataPoint; 12 | import com.facebook.beringei.thriftclient.GetDataRequest; 13 | import com.facebook.beringei.thriftclient.GetDataResult; 14 | import com.facebook.beringei.thriftclient.Key; 15 | import com.facebook.beringei.thriftclient.PutDataRequest; 16 | import com.facebook.beringei.thriftclient.PutDataResult; 17 | import com.facebook.beringei.thriftclient.TimeValuePair; 18 | 19 | public class BeringeiTestRawClient 20 | { 21 | static String keyT = "af2e37e6a2792452dea2b69024296faa0"; 22 | static long sec = System.currentTimeMillis() / 1000; 23 | 24 | static final int points = 1; 25 | 26 | static BeringeiService.Client client = null; 27 | public static void main(String[] args) 28 | { 29 | try 30 | { 31 | TSocket transport = new TSocket("10.180.17.77", 9999); 32 | transport.open(); 33 | TProtocol protocol = new TBinaryProtocol(transport); 34 | 35 | client = new BeringeiService.Client(protocol); 36 | 37 | performPut(); 38 | performGet(); 39 | } 40 | catch (Exception e) 41 | { 42 | e.printStackTrace(); 43 | } 44 | } 45 | 46 | private static void performPut() throws Exception 47 | { 48 | 49 | List dps = new ArrayList(); 50 | Key key = new Key(); 51 | key.key = keyT; 52 | key.shardId = 2; 53 | 54 | DataPoint dp = new DataPoint(); 55 | dp.setKey(key); 56 | TimeValuePair tvp = new TimeValuePair(); 57 | 58 | tvp.setUnixTime(sec); 59 | 60 | tvp.setValue(10); 61 | dp.setValue(tvp); 62 | 63 | dps.add(dp); 64 | 65 | for(int i = 0; i < points; i++) { 66 | PutDataRequest req = new PutDataRequest(); 67 | req.setData(dps); 68 | PutDataResult res = client.putDataPoints(req); 69 | System.out.println(res); 70 | /*CompletableFuture> droppedCF = client.putDataPoints(req); 71 | droppedCF.thenAccept(dropped -> { 72 | if (dropped.size() != 0) { 73 | System.out.println(dropped); 74 | } 75 | latch.countDown(); 76 | });*/ 77 | } 78 | } 79 | 80 | 81 | private static void performGet() throws Exception 82 | { 83 | List keys = new ArrayList(); 84 | Key key = new Key(); 85 | key.key = keyT; 86 | key.shardId = 2; 87 | keys.add(key); 88 | GetDataRequest req = new GetDataRequest(); 89 | req.setKeys(keys); 90 | req.setBegin(sec-60); 91 | req.setEnd(sec); 92 | GetDataResult res = client.getData(req); 93 | System.out.println(res); 94 | } 95 | } -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringeiclient/configurations/Configuration.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringeiclient.configurations; 2 | 3 | import java.io.FileInputStream; 4 | import java.io.FileNotFoundException; 5 | import java.io.IOException; 6 | import java.io.InputStream; 7 | import java.util.ArrayList; 8 | import java.util.Arrays; 9 | import java.util.HashMap; 10 | import java.util.HashSet; 11 | import java.util.List; 12 | import java.util.Properties; 13 | 14 | import org.slf4j.Logger; 15 | import org.slf4j.LoggerFactory; 16 | import org.wowcoders.beringeiclient.utils.Pair; 17 | import org.wowcoders.beringeiclient.configurations.Configuration; 18 | 19 | //TODO - Use the Standard Configuration Builder 20 | public class Configuration { 21 | final static Logger logger = LoggerFactory.getLogger(Configuration.class); 22 | 23 | private static Properties prop = null; 24 | 25 | public static void init(String configFile) { 26 | InputStream is = null; 27 | try { 28 | prop = new Properties(); 29 | if (configFile != null) { 30 | is = new FileInputStream(configFile); 31 | } else { 32 | is = Configuration.class.getResourceAsStream("/beringeiclient-demo.properties"); 33 | } 34 | prop.load(is); 35 | } catch (FileNotFoundException e) { 36 | e.printStackTrace(); 37 | } catch (IOException e) { 38 | e.printStackTrace(); 39 | } 40 | } 41 | 42 | private Configuration(){ 43 | 44 | } 45 | 46 | static class ShardConfig { 47 | boolean writeOnMultiDataCenter = false; 48 | boolean writeOnAllHosts = false; 49 | 50 | public ShardConfig(Properties prop) { 51 | writeOnMultiDataCenter = Boolean.parseBoolean(prop.getProperty("shards.multi-datacenter-write", "false")); 52 | writeOnAllHosts = Boolean.parseBoolean(prop.getProperty("shards.write-on-all-hosts-in-same-datacenter", "false")); 53 | } 54 | 55 | public String toString() { 56 | StringBuilder sb = new StringBuilder(); 57 | sb.append("write on multidatacenter enabled:" + writeOnMultiDataCenter).append("; "); 58 | sb.append("write on all hosts enabled:" + writeOnMultiDataCenter).append("; "); 59 | return sb.toString(); 60 | } 61 | 62 | public boolean isWriteOnMultiDataCenter() { 63 | return writeOnMultiDataCenter; 64 | } 65 | 66 | public void setWriteOnMultiDataCenter(boolean writeOnMultiDataCenter) { 67 | this.writeOnMultiDataCenter = writeOnMultiDataCenter; 68 | } 69 | 70 | public boolean isWriteOnAllHosts() { 71 | return writeOnAllHosts; 72 | } 73 | 74 | public void setWriteOnAllHosts(boolean writeOnAllHosts) { 75 | this.writeOnAllHosts = writeOnAllHosts; 76 | } 77 | } 78 | 79 | public static class TagsCacheConfig { 80 | long ttl = 604800000; 81 | Pair listenAddress = null; 82 | 83 | List> cluster = new ArrayList>(); 84 | 85 | public TagsCacheConfig(Properties prop) { 86 | ttl = Long.parseLong(prop.getProperty("atomix.cache-tags.ttl-ms")); 87 | 88 | String listen = prop.getProperty("atomix.cache-tags.listen"); 89 | if (listen != null) { 90 | String []addrport = listen.split(":"); 91 | listenAddress = new Pair(addrport[0], Integer.parseInt(addrport[1])); 92 | } 93 | 94 | String clusterStr = prop.getProperty("atomix.cache-tags.cluster"); 95 | if (clusterStr != null) { 96 | String []clusterArr = clusterStr.split(","); 97 | for(String _cluster: clusterArr) { 98 | String []ip_port = _cluster.split(":"); 99 | Pair caddress = new Pair(ip_port[0], Integer.parseInt(ip_port[1])); 100 | cluster.add(caddress); 101 | } 102 | } 103 | } 104 | 105 | public String toString() { 106 | StringBuilder sb = new StringBuilder(); 107 | sb.append("ttl:" + ttl).append("; "); 108 | if (listenAddress != null) { 109 | sb.append("(Address:"+listenAddress.first) 110 | .append("; ").append("port:"+listenAddress.second).append("); "); 111 | } 112 | 113 | if (cluster != null) { 114 | cluster.stream().forEach(addr-> { 115 | sb.append("(Address:"+addr.first) 116 | .append("; ").append("port:"+addr.second).append("); "); 117 | }); 118 | } 119 | return sb.toString(); 120 | } 121 | 122 | public long getTtl() { 123 | return ttl; 124 | } 125 | 126 | public void setTtl(long ttl) { 127 | this.ttl = ttl; 128 | } 129 | 130 | public Pair getListenAddress() { 131 | return listenAddress; 132 | } 133 | 134 | public void setListenAddress(Pair listenAddress) { 135 | this.listenAddress = listenAddress; 136 | } 137 | 138 | public List> getCluster() { 139 | return cluster; 140 | } 141 | 142 | public void setCluster(List> cluster) { 143 | this.cluster = cluster; 144 | } 145 | } 146 | 147 | public static class ClientsConfig { 148 | long connectTimeout = 600; 149 | long readTimeout = 60000; 150 | long writeTimeout = 60000; 151 | 152 | int idleConnectionsPerShards = 1; 153 | 154 | long timeoutToCloseIdleConnection = 40000; 155 | 156 | int connectionCountPerShards = 2; 157 | 158 | int shardCounts = 100; 159 | 160 | String[] datacenters = null; 161 | 162 | HashMap>> datacenterHostListMap = new HashMap>>(); 163 | 164 | int flushInterval = 10000; 165 | int flushAfterCount = 1000; 166 | int readThreads = 10; 167 | int writeThreads = 10; 168 | public ClientsConfig(Properties prop) { 169 | connectTimeout = Long.parseLong(prop.getProperty("clients.connect-timeout-ms")); 170 | readTimeout = Long.parseLong(prop.getProperty("clients.read-timeout-ms")); 171 | writeTimeout = Long.parseLong(prop.getProperty("clients.write-timeout-ms")); 172 | 173 | idleConnectionsPerShards = Integer.parseInt(prop.getProperty("clients.active-idle-connections-per-shards")); 174 | timeoutToCloseIdleConnection = Long.parseLong(prop.getProperty("clients.timeout-to-close-idle-connections-in-seconds")); 175 | connectionCountPerShards = Integer.parseInt(prop.getProperty("clients.connection-count-per-shards")); 176 | 177 | shardCounts = Integer.parseInt(prop.getProperty("clients.cluster-shards-count", "100")); 178 | datacenters = prop.getProperty("clients.datacenters", "").split(","); 179 | 180 | for (String datacenter: datacenters) { 181 | String endpointsStr = prop.getProperty("clients." + datacenter + ".cluster-endpoints", ""); 182 | String []endpoints = endpointsStr.split(","); 183 | HashSet> endpointSet = datacenterHostListMap.get(datacenter); 184 | if (endpointSet == null) { 185 | endpointSet = new HashSet>(); 186 | datacenterHostListMap.put(datacenter, endpointSet); 187 | } 188 | for(String endpoint: endpoints) { 189 | String []ip_port = endpoint.split(":"); 190 | Pair caddress = new Pair(ip_port[0], Integer.parseInt(ip_port[1])); 191 | endpointSet.add(caddress); 192 | } 193 | } 194 | 195 | flushInterval = Integer.parseInt(prop.getProperty("clients.flush.when-metric-count-in-queue")); 196 | flushAfterCount = Integer.parseInt(prop.getProperty("clients.flush.interval")); 197 | readThreads = Integer.parseInt(prop.getProperty("clients.read-threads")); 198 | writeThreads = Integer.parseInt(prop.getProperty("clients.write-threads")); 199 | } 200 | 201 | public String toString() { 202 | StringBuilder sb = new StringBuilder(); 203 | 204 | sb.append("connectTimeout:"+connectTimeout).append("; ") 205 | .append("readTimeout:"+readTimeout).append("; ") 206 | .append("writeTimeout:"+writeTimeout).append("; "); 207 | 208 | sb.append("idleConnectionsPerShards:"+idleConnectionsPerShards).append("; ") 209 | .append("timeoutToCloseIdleConnection:"+timeoutToCloseIdleConnection).append("; ") 210 | .append("connectionCountPerShards:"+connectionCountPerShards).append("; "); 211 | 212 | sb.append("shardCnt:"+shardCounts).append("; ") 213 | .append("datacenters:"+Arrays.toString(datacenters)).append("; "); 214 | 215 | return sb.toString(); 216 | } 217 | 218 | public long getConnectTimeout() { 219 | return connectTimeout; 220 | } 221 | 222 | public void setConnectTimeout(long connectTimeout) { 223 | this.connectTimeout = connectTimeout; 224 | } 225 | 226 | public long getReadTimeout() { 227 | return readTimeout; 228 | } 229 | 230 | public void setReadTimeout(long readTimeout) { 231 | this.readTimeout = readTimeout; 232 | } 233 | 234 | public long getWriteTimeout() { 235 | return writeTimeout; 236 | } 237 | 238 | public void setWriteTimeout(long writeTimeout) { 239 | this.writeTimeout = writeTimeout; 240 | } 241 | 242 | public int getIdleConnectionsPerShards() { 243 | return idleConnectionsPerShards; 244 | } 245 | 246 | public void setIdleConnectionsPerShards(int idleConnectionsPerShards) { 247 | this.idleConnectionsPerShards = idleConnectionsPerShards; 248 | } 249 | 250 | public long getTimeoutToCloseIdleConnection() { 251 | return timeoutToCloseIdleConnection; 252 | } 253 | 254 | public void setTimeoutToCloseIdleConnection(long timeoutToCloseIdleConnection) { 255 | this.timeoutToCloseIdleConnection = timeoutToCloseIdleConnection; 256 | } 257 | 258 | public int getConnectionCountPerShards() { 259 | return connectionCountPerShards; 260 | } 261 | 262 | public void setConnectionCountPerShards(int connectionCountPerShards) { 263 | this.connectionCountPerShards = connectionCountPerShards; 264 | } 265 | 266 | public int getShardCounts() { 267 | return shardCounts; 268 | } 269 | 270 | public void setShardCounts(int shardCounts) { 271 | this.shardCounts = shardCounts; 272 | } 273 | 274 | public String[] getDatacenters() { 275 | return datacenters; 276 | } 277 | 278 | public void setDatacenters(String[] datacenters) { 279 | this.datacenters = datacenters; 280 | } 281 | 282 | public HashMap>> getDatacenterHostListMap() { 283 | return datacenterHostListMap; 284 | } 285 | 286 | public void setDatacenterHostListMap(HashMap>> datacenterHostListMap) { 287 | this.datacenterHostListMap = datacenterHostListMap; 288 | } 289 | 290 | public int getFlushInterval() { 291 | return flushInterval; 292 | } 293 | 294 | public void setFlushInterval(int flushInterval) { 295 | this.flushInterval = flushInterval; 296 | } 297 | 298 | public int getFlushAfterCount() { 299 | return flushAfterCount; 300 | } 301 | 302 | public void setFlushAfterCount(int flushAfterCount) { 303 | this.flushAfterCount = flushAfterCount; 304 | } 305 | 306 | public int getReadThreads() { 307 | return readThreads; 308 | } 309 | 310 | public void setReadThreads(int readThreads) { 311 | this.readThreads = readThreads; 312 | } 313 | 314 | public int getWriteThreads() { 315 | return writeThreads; 316 | } 317 | 318 | public void setWriteThreads(int writeThreads) { 319 | this.writeThreads = writeThreads; 320 | } 321 | } 322 | 323 | /*******************************/ 324 | ShardConfig shardConfig = null; 325 | ClientsConfig clientConfig = null; 326 | 327 | public void load() { 328 | shardConfig = new ShardConfig(prop); 329 | clientConfig = new ClientsConfig(prop); 330 | } 331 | 332 | /*******************************/ 333 | 334 | public String toString() { 335 | StringBuilder sb = new StringBuilder(); 336 | sb.append("shard config: " + shardConfig).append("\n"); 337 | sb.append("clients cache config: " + clientConfig).append("\n"); 338 | 339 | return sb.toString(); 340 | } 341 | 342 | private static Configuration instance = new Configuration(); 343 | 344 | public static void loadConfig() { 345 | instance.load(); 346 | logger.info(instance.toString()); 347 | } 348 | 349 | public ShardConfig getShardConfig() { 350 | return shardConfig; 351 | } 352 | 353 | public ClientsConfig getClientConfig() { 354 | return clientConfig; 355 | } 356 | 357 | public static Configuration getInstnace() { 358 | if (instance.shardConfig == null) { 359 | synchronized(instance) { 360 | loadConfig(); 361 | } 362 | } 363 | return instance; 364 | } 365 | /*******************************/ 366 | 367 | public static void main(String a[]) { 368 | loadConfig(); 369 | } 370 | } -------------------------------------------------------------------------------- /beringeiclient/src/main/java/org/wowcoders/beringeiclient/utils/Pair.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringeiclient.utils; 2 | 3 | import java.io.Serializable; 4 | import java.util.Objects; 5 | 6 | 7 | /** 8 | * Container to ease passing around a tuple of two objects. This object provides a sensible 9 | * implementation of equals(), returning true if equals() is true on each of the contained 10 | * objects. 11 | */ 12 | public class Pair implements Serializable, Cloneable { 13 | /** 14 | * 15 | */ 16 | private static final long serialVersionUID = 3976309530595355401L; 17 | 18 | public F first; 19 | public S second; 20 | 21 | /** 22 | * Constructor for a Pair. 23 | * 24 | * @param first the first object in the Pair 25 | * @param second the second object in the pair 26 | */ 27 | public Pair(F first, S second) { 28 | this.first = first; 29 | this.second = second; 30 | } 31 | 32 | /** 33 | * Checks the two objects for equality by delegating to their respective 34 | * {@link Object#equals(Object)} methods. 35 | * 36 | * @param o the {@link Pair} to which this one is to be checked for equality 37 | * @return true if the underlying objects of the Pair are both considered 38 | * equal 39 | */ 40 | @Override 41 | public boolean equals(Object o) { 42 | if (!(o instanceof Pair)) { 43 | return false; 44 | } 45 | Pair p = (Pair) o; 46 | return Objects.equals(p.first, first) && Objects.equals(p.second, second); 47 | } 48 | 49 | /** 50 | * Compute a hash code using the hash codes of the underlying objects 51 | * 52 | * @return a hashcode of the Pair 53 | */ 54 | @Override 55 | public int hashCode() { 56 | return (first == null ? 0 : first.hashCode()) ^ (second == null ? 0 : second.hashCode()); 57 | } 58 | 59 | /** 60 | * Convenience method for creating an appropriately typed pair. 61 | * @param a the first object in the Pair 62 | * @param b the second object in the pair 63 | * @return a Pair that is templatized with the types of a and b 64 | */ 65 | public static Pair create(A a, B b) { 66 | return new Pair(a, b); 67 | } 68 | 69 | public Pair clone() throws CloneNotSupportedException { 70 | return Pair.create(this.first, this.second); 71 | } 72 | } -------------------------------------------------------------------------------- /beringeiclient/src/main/resources/beringeiclient-demo.properties: -------------------------------------------------------------------------------- 1 | #TODO define better names for properties 2 | 3 | #clients-by-shards(currently supported async count is 1) 4 | #TODO 5 | # 1. add support for tls 6 | # 2. make atomix as ts store 7 | clients.connect-timeout-ms=100 8 | clients.read-timeout-ms=40000 9 | clients.write-timeout-ms=40000 10 | clients.active-idle-connections-per-shards=1 11 | clients.timeout-to-close-idle-connections-in-seconds=40 12 | clients.connection-count-per-shards=10 13 | #supported shards total endpoints/shards 14 | # TODO. config update using admin ui 15 | clients.cluster-shards-count=100 16 | # TODO. add support for following 17 | clients.cluster-even-endpoint-as-backup-store=false 18 | clients.datacenters=dcntr1 19 | clients.dcntr1.cluster-endpoints=10.180.17.77:9999,10.180.16.207:9999 20 | #metrics flush to servers(we flush metrics to server if one of the following becomes true) 21 | clients.flush.when-metric-count-in-queue=1000 22 | clients.flush.interval=10000 23 | #threads 24 | clients.read-threads=20 25 | clients.write-threads=10 26 | #TODO retry/retry queue 27 | 28 | #shards 29 | #TODO add support for the following properties 30 | shards.multi-datacenter-write=false 31 | shards.write-on-all-hosts-in-same-datacenter=false 32 | -------------------------------------------------------------------------------- /beringeiclient/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=INFO,CONSOLE,R 2 | log4j.appender.R=org.apache.log4j.RollingFileAppender 3 | log4j.appender.R.File=app.log 4 | log4j.appender.R.MaxFileSize=50MB 5 | log4j.appender.R.MaxBackupIndex=7 6 | log4j.appender.R.layout=org.apache.log4j.PatternLayout 7 | log4j.appender.R.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %5p %t %c - %m%n 8 | 9 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender 10 | log4j.appender.CONSOLE.Target=System.out 11 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout 12 | log4j.appender.CONSOLE.layout.conversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %5p %t %c - %m%n 13 | -------------------------------------------------------------------------------- /beringeiclient/src/test/java/org/wowcoders/beringeiclient/compression/BlockDecoderTest.java: -------------------------------------------------------------------------------- 1 | package org.wowcoders.beringeiclient.compression; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | 5 | import java.util.List; 6 | 7 | import org.junit.Test; 8 | import org.wowcoders.beringei.compression.BlockDecoder; 9 | 10 | import com.facebook.beringei.thriftclient.DataPoint; 11 | import com.facebook.beringei.thriftclient.Key; 12 | import com.facebook.beringei.thriftclient.TimeSeriesBlock; 13 | 14 | public class BlockDecoderTest { 15 | public static byte[] hexStringToByteArray(String s) { 16 | int len = s.length(); 17 | byte[] data = new byte[len / 2]; 18 | for (int i = 0; i < len; i += 2) { 19 | data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4) 20 | + Character.digit(s.charAt(i+1), 16)); 21 | } 22 | return data; 23 | } 24 | 25 | @Test 26 | public void decodeEmptyBlock() { 27 | Key key = new Key(); 28 | key.key = "testkey"; 29 | key.shardId = 100; 30 | TimeSeriesBlock tsd = new TimeSeriesBlock(); 31 | byte [] b =new byte[100]; 32 | tsd.setData(b); 33 | tsd.setCount(0); 34 | BlockDecoder bd = new BlockDecoder(key, tsd); 35 | 36 | List dps = bd.readDps(); 37 | assertEquals(0, dps.size()); 38 | } 39 | 40 | @Test 41 | public void decode4DataPointsBlock() { 42 | String hex="B20A83A305080A5C167814D0292468C0"; 43 | Key key = new Key(); 44 | key.key = "testkey"; 45 | key.shardId = 100; 46 | TimeSeriesBlock tsd = new TimeSeriesBlock(); 47 | byte [] b = hexStringToByteArray(hex); 48 | tsd.setData(b); 49 | tsd.setCount(4); 50 | BlockDecoder bd = new BlockDecoder(key, tsd); 51 | 52 | List dps = bd.readDps(); 53 | 54 | assertEquals(4, dps.size()); 55 | 56 | //1493516753, 75.0 57 | DataPoint dp = dps.get(0); 58 | assertEquals(key, dp.key); 59 | assertEquals(1493516753, dp.value.unixTime); 60 | assertEquals(75.0, dp.value.value, 0); 61 | 62 | //1493516754, 79.0 63 | dp = dps.get(1); 64 | assertEquals(key, dp.key); 65 | assertEquals(1493516754, dp.value.unixTime); 66 | assertEquals(79.0, dp.value.value, 0); 67 | 68 | //1493516755 95.0 69 | dp = dps.get(2); 70 | assertEquals(key, dp.key); 71 | assertEquals(1493516755, dp.value.unixTime); 72 | assertEquals(95.0, dp.value.value, 0); 73 | 74 | //1493516756 31.0 75 | dp = dps.get(3); 76 | assertEquals(key, dp.key); 77 | assertEquals(1493516756, dp.value.unixTime); 78 | assertEquals(31.0, dp.value.value, 0); 79 | } 80 | 81 | @Test 82 | public void decode54DataPointsBlock() { 83 | String hex="B20C032704F809982CC13E923576A1EF4E0FD344EB3B5A48F9AC91E031B63EC759DBC67D8C759E30F646963694923F6F88EF8DF8C68F84C96C4EC0EF5AF48C32F52FECE5CDE8F98F80FA0C24F44F0CC70CB550C7F2FCAECC8E00EDCF4CF500"; 84 | Key key=new Key(); 85 | key.key = "testkey"; 86 | key.shardId = 100; 87 | TimeSeriesBlock tsd = new TimeSeriesBlock(); 88 | byte [] b = hexStringToByteArray(hex); 89 | tsd.setData(b); 90 | tsd.setCount(54); 91 | BlockDecoder bd = new BlockDecoder(key, tsd); 92 | 93 | List dps = bd.readDps(); 94 | assertEquals(54, dps.size()); 95 | } 96 | } -------------------------------------------------------------------------------- /config/tspark-demo.properties: -------------------------------------------------------------------------------- 1 | #if property "listen" is missing, then instance will act as client. 2 | #This helps to have seperate atomix cluster for tags store. 3 | #TODO add support for tls 4 | #cache - ttl for metric and meta(tags, metricshash, tagshashkey, shardid, usersessions) 5 | atomix.cache-tags.ttl-ms=604800000 6 | atomix.cache-tags.enable-tls=false 7 | atomix.cache-tags.listen=0.0.0.0:55000 8 | atomix.cache-tags.cluster=0.0.0.0:55000 9 | 10 | #listen port for apis(grafana/bulkupload/ws-metric-sender) 11 | #TODO add support for tls 12 | api.enabled=true 13 | api.enable-tls=false 14 | api.listen=0.0.0.0:58080 15 | api.threads-cnt=20 16 | 17 | ################################## 18 | ## Beringei client properties 19 | ################################## 20 | #clients-by-shards(currently supported async count is 1) 21 | #TODO 22 | # 1. add support for tls 23 | # 2. make atomix as ts store 24 | clients.connect-timeout-ms=100 25 | clients.read-timeout-ms=40000 26 | clients.write-timeout-ms=40000 27 | clients.active-idle-connections-per-shards=1 28 | clients.timeout-to-close-idle-connections-in-seconds=40 29 | clients.connection-count-per-shards=10 30 | clients.count-of-async-operations-allowed-per-connection=1 31 | #supported shards total endpoints/shards 32 | # TODO. config update using admin ui 33 | clients.cluster-shards-count=100 34 | # TODO. add support for following 35 | clients.cluster-even-endpoint-as-backup-store=false 36 | clients.datacenters=dcntr1 37 | clients.dcntr1.cluster-endpoints=10.180.17.77:9999,10.180.16.207:9999 38 | #metrics flush to servers(we flush metrics to server if one of the following becomes true) 39 | clients.flush.when-metric-count-in-queue=1000 40 | clients.flush.interval=10000 41 | #threads 42 | clients.read-threads=20 43 | clients.write-threads=10 44 | #TODO retry/retry queue 45 | 46 | #shards 47 | #TODO add support for the following properties 48 | shards.multi-datacenter-write=false 49 | shards.write-on-all-hosts-in-same-datacenter=false -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | com.wowcoders.tspark 5 | TSPark-ALL 6 | 0.0.1-SNAPSHOT 7 | pom 8 | 9 | 10 | TSPark 11 | beringeiclient 12 | dist 13 | 14 | 15 | 16 | 17 | 18 | org.apache.maven.plugins 19 | maven-compiler-plugin 20 | 3.6.1 21 | 22 | 1.8 23 | 1.8 24 | 25 | 26 | 27 | 28 | 29 | 30 | org.slf4j 31 | slf4j-api 32 | 1.7.1 33 | 34 | 35 | 36 | org.slf4j 37 | jcl-over-slf4j 38 | 1.7.1 39 | 40 | 41 | org.slf4j 42 | slf4j-log4j12 43 | 1.7.1 44 | 45 | 46 | 47 | log4j 48 | log4j 49 | 1.2.14 50 | 51 | 52 | 53 | cn.danielw 54 | fast-object-pool 55 | 2.0.0 56 | 57 | 58 | 59 | 60 | 61 | akka 62 | http://repo.akka.io/releases/ 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /quick-start.md: -------------------------------------------------------------------------------- 1 | ## Prerequisites - Download and deploy the beriengi server from facebook 2 | https://github.com/facebookincubator/beringei 3 | 4 | ## Start the Beringei Server 5 | ./beringei_main -beringei_configuration_path /beringeidata/beringei.json -data_directory /beringeidata/data -port 9999 -create_directories -sleep_between_bucket_finalization_secs 60 -allowed_timestamp_behind 900 -bucket_size 1200 -buckets 144 -mintimestampdelta 0 -logtostderr -v=1 6 | 7 | ## Building the Beringei Java Client 8 | mvn clean package 9 | dependencies com.facebook thrift-1.0-SNAPSHOT 10 | 11 | ## Samples 12 | Send & Retrieve metrics data using Java Client 13 | 14 | ## Grafana dashboards 15 | ### Setting up the grafana 16 | Add data source - sample http://127.0.0.1:58080/tsdb 17 | ### Installing the dashboards 18 | ### Accesing the data from grafana 19 | ### Importing the dashboard from the sample json file 20 | 21 | ## Features 22 | ### Store and Retieve Metrics to/from Beringei 23 | ### Aggregations 24 | 25 | ## QueryService APIs 26 | http://:/tsdb/api/query 27 | http://:/tsdb/api/aggregators 28 | http://:/tsdb/api/suggest 29 | 30 | (Default sample) 31 | http://127.0.0.1:58080/tsdb/api/query 32 | http://127.0.0.1:58080/tsdb/api/aggregators 33 | http://127.0.0.1:58080/tsdb/api/suggest 34 | -------------------------------------------------------------------------------- /scripts/startqs.sh: -------------------------------------------------------------------------------- 1 | java -cp ../TSPark/target/TSPark-0.0.1-SNAPSHOT-jar-with-dependencies.jar org.wowcoders.tspark.qs.Server -c ../config/tspark-demo.properties 2 | --------------------------------------------------------------------------------