├── .gitignore ├── LICENSE ├── README.markdown ├── cache.go ├── cache_test.go ├── cubism └── seriesism.js ├── database.go ├── database_test.go ├── debug.go ├── handlers.go ├── handlers_test.go ├── main.go ├── mcserver.go ├── query.go ├── query_test.go ├── sample.json ├── serieslyclient ├── client.go ├── db.go └── query.go ├── timelib ├── time.go └── time_test.go ├── tools ├── compact │ └── compact.go ├── dump │ ├── dump.go │ ├── format.go │ └── format_test.go ├── load │ └── load.go ├── sample │ └── sample.go └── serieslyinfo │ └── serieslyinfo.go ├── util.go └── util_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | #* 2 | *.gz 3 | *~ 4 | /db/ 5 | /seriesly 6 | /tools/compact/compact 7 | /tools/dump/dump 8 | /tools/load/load 9 | /tools/sample/sample 10 | /tools/serieslyinfo/serieslyinfo 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012-2015 Dustin Sallings 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | 21 | 22 | -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | # A Timeseries Database 2 | 3 | seriesly is a database for storing and querying time series data. 4 | Unlike databases like RRDtool, it's schemaless so you can just lob 5 | data into it and start hacking. However, it also doesn't use a finite 6 | amount of space. Tradeoffs. 7 | 8 | Detailed docs are in [the wiki][wiki]. 9 | 10 | # Quick Start 11 | 12 | ## Prereqs: 13 | 14 | To build the software, you need a [go][go] runtime installed. 15 | Preferably a recent one. 16 | 17 | ## Installation 18 | 19 | go get github.com/dustin/seriesly 20 | 21 | ## Running 22 | 23 | Seriesly will use as many cores as you have to offer. Set the 24 | `GOMAXPROCS` environment variable to the number of cores (or hardware 25 | threads) you've got. There's a lot of fine-grained tuning you can 26 | employ to specify how to map the software needs to your hardware, but 27 | the defaults should work quite well. 28 | 29 | Then just start blasting data into it. See the [protocol docs][wiki] 30 | for details on this. 31 | 32 | # More Info 33 | 34 | My [blog post][blog] provides an overview of the why and a little bit 35 | of the what. There's also a [youtube video][youtube] of me demoing 36 | the software a few days after the initial version hit github. It's 37 | matured a lot since then, but the core concepts haven't changed. 38 | 39 | [blog]: http://dustin.github.com/2012/09/09/seriesly.html 40 | [youtube]: http://youtu.be/8b-8NTCyFQQ 41 | [wiki]: https://github.com/dustin/seriesly/wiki 42 | [couchstore]: https://github.com/couchbase/couchstore 43 | [go]: http://golang.org/ 44 | -------------------------------------------------------------------------------- /cache.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "hash/fnv" 5 | "log" 6 | "math" 7 | "strconv" 8 | "time" 9 | 10 | "github.com/dustin/gojson" 11 | "github.com/dustin/gomemcached" 12 | "github.com/dustin/gomemcached/client" 13 | ) 14 | 15 | var cacheInput chan *processIn 16 | var cacheInputSet chan *processOut 17 | 18 | func cacheReceiveLoop(client *memcached.Client, out chan *processOut) { 19 | for { 20 | res, err := memcached.UnwrapMemcachedError(client.Receive()) 21 | if err != nil { 22 | log.Printf("Error receiving from memcache: %v", err) 23 | return 24 | } 25 | 26 | po := processOut{cacheOpaque: res.Opaque} 27 | 28 | if res.Opcode == gomemcached.GET && res.Status == gomemcached.SUCCESS { 29 | rv := map[string][]interface{}{} 30 | err = json.Unmarshal(res.Body, &rv) 31 | if err == nil { 32 | po.value = rv["v"] 33 | } else { 34 | log.Printf("Decoding error: %v\n%s", err, res.Body) 35 | po.err = err 36 | } 37 | } else { 38 | po.err = res 39 | } 40 | 41 | out <- &po 42 | } 43 | } 44 | 45 | func cacheOpener(ch chan<- *memcached.Client) { 46 | log.Printf("Connecting to memcached") 47 | client, err := memcached.Connect("tcp", *cacheAddr) 48 | if err != nil { 49 | log.Printf("Failed to connect to memcached: %v", err) 50 | } 51 | ch <- client 52 | } 53 | 54 | func cacheProcessor(ch <-chan *processIn, chset <-chan *processOut) { 55 | omap := map[uint32]*processIn{} 56 | opaque := uint32(100) 57 | 58 | out := make(chan *processOut) 59 | connch := make(chan *memcached.Client) 60 | 61 | go cacheOpener(connch) 62 | 63 | var client *memcached.Client 64 | 65 | for { 66 | select { 67 | case client = <-connch: 68 | if client == nil { 69 | time.AfterFunc(time.Second, func() { 70 | cacheOpener(connch) 71 | }) 72 | } else { 73 | go cacheReceiveLoop(client, out) 74 | } 75 | case pi := <-ch: 76 | switch { 77 | case client == nil: 78 | // No connection, pass through 79 | processorInput <- pi 80 | case time.Now().Before(pi.before): 81 | newOpaque := opaque 82 | opaque++ 83 | if opaque == math.MaxUint32 { 84 | opaque = 100 85 | } 86 | omap[newOpaque] = pi 87 | pi.cacheKey = cacheKey(pi) 88 | 89 | req := &gomemcached.MCRequest{ 90 | Opcode: gomemcached.GET, 91 | Opaque: newOpaque, 92 | Key: []byte(pi.cacheKey), 93 | } 94 | 95 | err := client.Transmit(req) 96 | if err != nil { 97 | log.Printf("Error transmitting!: %v", err) 98 | client.Close() 99 | client = nil 100 | go cacheOpener(connch) 101 | for _, v := range omap { 102 | processorInput <- v 103 | } 104 | omap = map[uint32]*processIn{} 105 | } 106 | default: 107 | // Too old. 108 | pi.out <- &processOut{"", pi.key, nil, errTimeout, 0} 109 | } 110 | case po := <-out: 111 | pi, ok := omap[po.cacheOpaque] 112 | if ok { 113 | delete(omap, po.cacheOpaque) 114 | po.key = pi.key 115 | if po.err == nil { 116 | po.cacheKey = pi.cacheKey 117 | pi.out <- po 118 | } else { 119 | processorInput <- pi 120 | } 121 | } else { 122 | if po.cacheOpaque > 10 { 123 | log.Printf("Unknown opaque: %v", po.cacheOpaque) 124 | } 125 | } 126 | case po := <-chset: 127 | if client != nil { 128 | bytes, err := json.Marshal(po) 129 | if err == nil { 130 | req := &gomemcached.MCRequest{ 131 | Opcode: gomemcached.SETQ, 132 | Key: []byte(po.cacheKey), 133 | Cas: 0, 134 | Opaque: 1, 135 | Extras: []byte{0, 0, 0, 0, 0, 0, 0, 0}, 136 | Body: bytes} 137 | 138 | err = client.Transmit(req) 139 | if err != nil { 140 | log.Printf("Error transmitting!: %v", err) 141 | client.Close() 142 | client = nil 143 | go cacheOpener(connch) 144 | for _, v := range omap { 145 | processorInput <- v 146 | } 147 | omap = map[uint32]*processIn{} 148 | } 149 | } else { 150 | log.Fatalf("Error marshaling %v: %v", po, err) 151 | } 152 | } // has client, will cache 153 | } 154 | } 155 | } 156 | 157 | func cacheKey(p *processIn) string { 158 | h := fnv.New64() 159 | for _, i := range p.infos { 160 | i.WriteIDTo(h) 161 | } 162 | for i := range p.ptrs { 163 | h.Write([]byte(p.ptrs[i])) 164 | h.Write([]byte(p.reds[i])) 165 | } 166 | for i := range p.filters { 167 | h.Write([]byte(p.filters[i])) 168 | h.Write([]byte(p.filtervals[i])) 169 | } 170 | return p.dbname + "#" + strconv.FormatInt(p.key, 10) + 171 | "#" + strconv.FormatUint(h.Sum64(), 10) 172 | } 173 | -------------------------------------------------------------------------------- /cache_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/mschoch/gouchstore" 8 | ) 9 | 10 | func benchCacheSize(b *testing.B, num int) { 11 | p := processIn{ 12 | dbname: "mydatabase", 13 | key: 817492945, 14 | ptrs: []string{"/some/pointer", "/other/pointer"}, 15 | reds: []string{"min", "max"}, 16 | } 17 | 18 | startTime := time.Now() 19 | for i := 0; i < num; i++ { 20 | di := gouchstore.NewDocumentInfo(startTime.Format(time.RFC3339Nano)) 21 | p.infos = append(p.infos, di) 22 | } 23 | 24 | b.ResetTimer() 25 | for i := 0; i < b.N; i++ { 26 | cacheKey(&p) 27 | } 28 | } 29 | 30 | func BenchmarkCacheKeying1440(b *testing.B) { 31 | benchCacheSize(b, 1440) 32 | } 33 | 34 | func BenchmarkCacheKeying10(b *testing.B) { 35 | benchCacheSize(b, 10) 36 | } 37 | -------------------------------------------------------------------------------- /cubism/seriesism.js: -------------------------------------------------------------------------------- 1 | // Seriesly plugin for cubism. 2 | // 3 | // Load this file and use seriesly.context() 4 | // in place of your normal cubism.context() 5 | 6 | var seriesly = { 7 | context: function() { 8 | var context = cubism.context(); 9 | 10 | function seriesly(baseUrl) { 11 | var source = {}; 12 | 13 | source.metric = function(dbname, ptr, reducer, lbl) { 14 | return context.metric(function(start, stop, step, callback) { 15 | d3.json(baseUrl + dbname + 16 | "/_query?ptr=" + ptr + 17 | "&reducer=" + reducer + 18 | "&from=" + (+start) + 19 | "&to=" + (+stop) + 20 | "&group=" + step, 21 | function(data) { 22 | if (data) { 23 | var rv = []; 24 | for (var i = (+start); i < (+stop); i+=step) { 25 | rv.push(data[i] ? data[i][0] : NaN); 26 | } 27 | callback(null, rv); 28 | } 29 | }); 30 | }, lbl || (dbname + " - " + reducer + "@" + ptr)); 31 | }; 32 | 33 | source.toString = function() { 34 | return baseUrl; 35 | }; 36 | 37 | return source; 38 | } 39 | 40 | context.seriesly = seriesly; 41 | 42 | return context; 43 | } 44 | }; 45 | -------------------------------------------------------------------------------- /database.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | 13 | "github.com/dustin/seriesly/timelib" 14 | "github.com/mschoch/gouchstore" 15 | ) 16 | 17 | type dbOperation uint8 18 | 19 | const ( 20 | opStoreItem = dbOperation(iota) 21 | opDeleteItem 22 | opCompact 23 | ) 24 | 25 | const dbExt = ".couch" 26 | 27 | type dbqitem struct { 28 | dbname string 29 | k string 30 | data []byte 31 | op dbOperation 32 | cherr chan error 33 | } 34 | 35 | type dbWriter struct { 36 | dbname string 37 | ch chan dbqitem 38 | quit chan bool 39 | db *gouchstore.Gouchstore 40 | } 41 | 42 | var errClosed = errors.New("closed") 43 | 44 | func (w *dbWriter) Close() error { 45 | select { 46 | case <-w.quit: 47 | return errClosed 48 | default: 49 | } 50 | close(w.quit) 51 | return nil 52 | } 53 | 54 | var dbLock = sync.Mutex{} 55 | var dbConns = map[string]*dbWriter{} 56 | 57 | func dbPath(n string) string { 58 | return filepath.Join(*dbRoot, n) + dbExt 59 | } 60 | 61 | func dbBase(n string) string { 62 | left := 0 63 | right := len(n) 64 | if strings.HasPrefix(n, *dbRoot) { 65 | left = len(*dbRoot) 66 | if n[left] == '/' { 67 | left++ 68 | } 69 | } 70 | if strings.HasSuffix(n, dbExt) { 71 | right = len(n) - len(dbExt) 72 | } 73 | return n[left:right] 74 | } 75 | 76 | func dbopen(name string) (*gouchstore.Gouchstore, error) { 77 | path := dbPath(name) 78 | db, err := gouchstore.Open(dbPath(name), 0) 79 | if err == nil { 80 | recordDBConn(path, db) 81 | } 82 | return db, err 83 | } 84 | 85 | func dbcreate(path string) error { 86 | db, err := gouchstore.Open(path, gouchstore.OPEN_CREATE) 87 | if err != nil { 88 | return err 89 | } 90 | recordDBConn(path, db) 91 | closeDBConn(db) 92 | return nil 93 | } 94 | 95 | func dbRemoveConn(dbname string) { 96 | dbLock.Lock() 97 | defer dbLock.Unlock() 98 | 99 | writer := dbConns[dbname] 100 | if writer != nil && writer.quit != nil { 101 | writer.Close() 102 | } 103 | delete(dbConns, dbname) 104 | } 105 | 106 | func dbCloseAll() { 107 | dbLock.Lock() 108 | defer dbLock.Unlock() 109 | 110 | for n, c := range dbConns { 111 | log.Printf("Shutting down open conn %v", n) 112 | c.Close() 113 | } 114 | } 115 | 116 | func dbdelete(dbname string) error { 117 | dbRemoveConn(dbname) 118 | return os.Remove(dbPath(dbname)) 119 | } 120 | 121 | func dblist(root string) []string { 122 | rv := []string{} 123 | filepath.Walk(root, func(p string, info os.FileInfo, err error) error { 124 | if err == nil { 125 | if !info.IsDir() && strings.HasSuffix(p, dbExt) { 126 | rv = append(rv, dbBase(p)) 127 | } 128 | } else { 129 | log.Printf("Error on %#v: %v", p, err) 130 | } 131 | return nil 132 | }) 133 | return rv 134 | } 135 | 136 | func dbCompact(dq *dbWriter, bulk gouchstore.BulkWriter, queued int, 137 | qi dbqitem) (gouchstore.BulkWriter, error) { 138 | start := time.Now() 139 | if queued > 0 { 140 | bulk.Commit() 141 | if *verbose { 142 | log.Printf("Flushed %d items in %v for pre-compact", 143 | queued, time.Since(start)) 144 | } 145 | bulk.Close() 146 | } 147 | dbn := dbPath(dq.dbname) 148 | queued = 0 149 | start = time.Now() 150 | err := dq.db.Compact(dbn + ".compact") 151 | if err != nil { 152 | log.Printf("Error compacting: %v", err) 153 | return dq.db.Bulk(), err 154 | } 155 | log.Printf("Finished compaction of %v in %v", dq.dbname, 156 | time.Since(start)) 157 | err = os.Rename(dbn+".compact", dbn) 158 | if err != nil { 159 | log.Printf("Error putting compacted data back") 160 | return dq.db.Bulk(), err 161 | } 162 | 163 | log.Printf("Reopening post-compact") 164 | closeDBConn(dq.db) 165 | 166 | dq.db, err = dbopen(dq.dbname) 167 | if err != nil { 168 | log.Fatalf("Error reopening DB after compaction: %v", err) 169 | } 170 | return dq.db.Bulk(), nil 171 | } 172 | 173 | var dbWg = sync.WaitGroup{} 174 | 175 | func dbWriteLoop(dq *dbWriter) { 176 | defer dbWg.Done() 177 | 178 | queued := 0 179 | bulk := dq.db.Bulk() 180 | 181 | t := time.NewTimer(*flushTime) 182 | defer t.Stop() 183 | liveTracker := time.NewTicker(*liveTime) 184 | defer liveTracker.Stop() 185 | liveOps := 0 186 | 187 | dbst := dbStats.getOrCreate(dq.dbname) 188 | defer atomic.StoreUint32(&dbst.qlen, 0) 189 | defer atomic.AddUint32(&dbst.closes, 1) 190 | 191 | for { 192 | atomic.StoreUint32(&dbst.qlen, uint32(queued)) 193 | 194 | select { 195 | case <-dq.quit: 196 | sdt := time.Now() 197 | bulk.Commit() 198 | bulk.Close() 199 | closeDBConn(dq.db) 200 | dbRemoveConn(dq.dbname) 201 | log.Printf("Closed %v with %v items in %v", 202 | dq.dbname, queued, time.Since(sdt)) 203 | return 204 | case <-liveTracker.C: 205 | if queued == 0 && liveOps == 0 { 206 | log.Printf("Closing idle DB: %v", dq.dbname) 207 | close(dq.quit) 208 | } 209 | liveOps = 0 210 | case qi := <-dq.ch: 211 | liveOps++ 212 | switch qi.op { 213 | case opStoreItem: 214 | bulk.Set(gouchstore.NewDocumentInfo(qi.k), 215 | gouchstore.NewDocument(qi.k, qi.data)) 216 | queued++ 217 | case opDeleteItem: 218 | queued++ 219 | bulk.Delete(gouchstore.NewDocumentInfo(qi.k)) 220 | case opCompact: 221 | var err error 222 | bulk, err = dbCompact(dq, bulk, queued, qi) 223 | qi.cherr <- err 224 | atomic.AddUint64(&dbst.written, uint64(queued)) 225 | queued = 0 226 | default: 227 | log.Panicf("Unhandled case: %v", qi.op) 228 | } 229 | if queued >= *maxOpQueue { 230 | start := time.Now() 231 | bulk.Commit() 232 | if *verbose { 233 | log.Printf("Flush of %d items took %v", 234 | queued, time.Since(start)) 235 | } 236 | atomic.AddUint64(&dbst.written, uint64(queued)) 237 | queued = 0 238 | t.Reset(*flushTime) 239 | } 240 | case <-t.C: 241 | if queued > 0 { 242 | start := time.Now() 243 | bulk.Commit() 244 | if *verbose { 245 | log.Printf("Flush of %d items from timer took %v", 246 | queued, time.Since(start)) 247 | } 248 | atomic.AddUint64(&dbst.written, uint64(queued)) 249 | queued = 0 250 | } 251 | t.Reset(*flushTime) 252 | } 253 | } 254 | } 255 | 256 | func dbWriteFun(dbname string) (*dbWriter, error) { 257 | db, err := dbopen(dbname) 258 | if err != nil { 259 | return nil, err 260 | } 261 | 262 | writer := &dbWriter{ 263 | dbname, 264 | make(chan dbqitem, *maxOpQueue), 265 | make(chan bool), 266 | db, 267 | } 268 | 269 | dbWg.Add(1) 270 | go dbWriteLoop(writer) 271 | 272 | return writer, nil 273 | } 274 | 275 | func getOrCreateDB(dbname string) (*dbWriter, bool, error) { 276 | dbLock.Lock() 277 | defer dbLock.Unlock() 278 | 279 | writer := dbConns[dbname] 280 | var err error 281 | opened := false 282 | if writer == nil { 283 | writer, err = dbWriteFun(dbname) 284 | if err != nil { 285 | return nil, false, err 286 | } 287 | dbConns[dbname] = writer 288 | opened = true 289 | } 290 | return writer, opened, nil 291 | } 292 | 293 | func dbstore(dbname string, k string, body []byte) error { 294 | writer, _, err := getOrCreateDB(dbname) 295 | if err != nil { 296 | return err 297 | } 298 | 299 | writer.ch <- dbqitem{dbname, k, body, opStoreItem, nil} 300 | 301 | return nil 302 | } 303 | 304 | func dbcompact(dbname string) error { 305 | writer, opened, err := getOrCreateDB(dbname) 306 | if err != nil { 307 | return err 308 | } 309 | if opened { 310 | log.Printf("Requesting post-compaction close of %v", dbname) 311 | defer writer.Close() 312 | } 313 | 314 | cherr := make(chan error) 315 | defer close(cherr) 316 | writer.ch <- dbqitem{dbname: dbname, 317 | op: opCompact, 318 | cherr: cherr, 319 | } 320 | 321 | return <-cherr 322 | } 323 | 324 | func dbGetDoc(dbname, id string) ([]byte, error) { 325 | db, err := dbopen(dbname) 326 | if err != nil { 327 | log.Printf("Error opening db: %v - %v", dbname, err) 328 | return nil, err 329 | } 330 | defer closeDBConn(db) 331 | 332 | doc, err := db.DocumentById(id) 333 | if err != nil { 334 | return nil, err 335 | } 336 | return doc.Body, err 337 | } 338 | 339 | func dbwalk(dbname, from, to string, f func(k string, v []byte) error) error { 340 | db, err := dbopen(dbname) 341 | if err != nil { 342 | log.Printf("Error opening db: %v - %v", dbname, err) 343 | return err 344 | } 345 | defer closeDBConn(db) 346 | 347 | return db.WalkDocs(from, to, func(d *gouchstore.Gouchstore, 348 | di *gouchstore.DocumentInfo, doc *gouchstore.Document) error { 349 | return f(di.ID, doc.Body) 350 | }) 351 | } 352 | 353 | func dbwalkKeys(dbname, from, to string, f func(k string) error) error { 354 | db, err := dbopen(dbname) 355 | if err != nil { 356 | log.Printf("Error opening db: %v - %v", dbname, err) 357 | return err 358 | } 359 | defer closeDBConn(db) 360 | 361 | return db.AllDocuments(from, to, func(db *gouchstore.Gouchstore, documentInfo *gouchstore.DocumentInfo, userContext interface{}) error { 362 | return f(documentInfo.ID) 363 | }, nil) 364 | } 365 | 366 | func parseKey(s string) int64 { 367 | t, err := timelib.ParseCanonicalTime(s) 368 | if err != nil { 369 | return -1 370 | } 371 | return t.UnixNano() 372 | } 373 | -------------------------------------------------------------------------------- /database_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestKeyParsing(t *testing.T) { 8 | tests := map[string]int64{ 9 | "2012-08-26T20:46:01.911627314Z": 1346013961911627314, 10 | "82488858158": -1, 11 | } 12 | 13 | for input, exp := range tests { 14 | got := parseKey(input) 15 | if got != exp { 16 | t.Errorf("Expected %v, got %v", exp, got) 17 | } 18 | } 19 | } 20 | 21 | func TestDBWClose(t *testing.T) { 22 | w := dbWriter{"test", make(chan dbqitem), make(chan bool), nil} 23 | err := w.Close() 24 | if err != nil { 25 | t.Errorf("First close expected success, got %v", err) 26 | } 27 | err = w.Close() 28 | if err != errClosed { 29 | t.Errorf("Expected second close to fail, got %v", err) 30 | } 31 | } 32 | 33 | func BenchmarkKeyParsing(b *testing.B) { 34 | input := "2012-08-26T20:46:01.911627314Z" 35 | 36 | for i := 0; i < b.N; i++ { 37 | parseKey(input) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /debug.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "expvar" 6 | "fmt" 7 | "log" 8 | "net/http" 9 | "runtime" 10 | "strings" 11 | "sync" 12 | "sync/atomic" 13 | 14 | "github.com/mschoch/gouchstore" 15 | ) 16 | 17 | type frameSnap []uintptr 18 | 19 | func (f frameSnap) MarshalJSON() ([]byte, error) { 20 | frames := []string{} 21 | for _, pc := range f { 22 | frame := runtime.FuncForPC(pc) 23 | fn, line := frame.FileLine(frame.Entry()) 24 | frames = append(frames, fmt.Sprintf("%v() - %v:%v", 25 | frame.Name(), fn, line)) 26 | } 27 | return json.Marshal(frames) 28 | } 29 | 30 | type dbOpenState struct { 31 | path string 32 | funcs frameSnap 33 | } 34 | 35 | var openConnLock = sync.Mutex{} 36 | var openConns = map[*gouchstore.Gouchstore]dbOpenState{} 37 | 38 | func recordDBConn(path string, db *gouchstore.Gouchstore) { 39 | callers := make([]uintptr, 32) 40 | n := runtime.Callers(2, callers) 41 | openConnLock.Lock() 42 | openConns[db] = dbOpenState{path, frameSnap(callers[:n-1])} 43 | openConnLock.Unlock() 44 | } 45 | 46 | func closeDBConn(db *gouchstore.Gouchstore) { 47 | db.Close() 48 | openConnLock.Lock() 49 | _, ok := openConns[db] 50 | delete(openConns, db) 51 | openConnLock.Unlock() 52 | if !ok { 53 | log.Printf("Closing untracked DB: %p", db) 54 | } 55 | } 56 | 57 | func debugListOpenDBs(parts []string, w http.ResponseWriter, req *http.Request) { 58 | openConnLock.Lock() 59 | snap := map[string][]frameSnap{} 60 | for _, st := range openConns { 61 | snap[st.path] = append(snap[st.path], st.funcs) 62 | } 63 | openConnLock.Unlock() 64 | 65 | mustEncode(200, w, snap) 66 | } 67 | 68 | type dbStat struct { 69 | written uint64 70 | qlen, opens, closes uint32 71 | } 72 | 73 | func (d *dbStat) MarshalJSON() ([]byte, error) { 74 | m := map[string]interface{}{} 75 | m["written"] = atomic.LoadUint64(&d.written) 76 | m["qlen"] = atomic.LoadUint32(&d.qlen) 77 | m["opens"] = atomic.LoadUint32(&d.opens) 78 | m["closes"] = atomic.LoadUint32(&d.closes) 79 | return json.Marshal(m) 80 | } 81 | 82 | type databaseStats struct { 83 | m map[string]*dbStat 84 | mu sync.Mutex 85 | } 86 | 87 | func newQueueMap() *databaseStats { 88 | return &databaseStats{m: map[string]*dbStat{}} 89 | } 90 | 91 | func (q *databaseStats) getOrCreate(name string) *dbStat { 92 | q.mu.Lock() 93 | defer q.mu.Unlock() 94 | rv, ok := q.m[name] 95 | if !ok { 96 | rv = &dbStat{} 97 | q.m[name] = rv 98 | } 99 | atomic.AddUint32(&rv.opens, 1) 100 | return rv 101 | } 102 | 103 | func (q *databaseStats) String() string { 104 | q.mu.Lock() 105 | defer q.mu.Unlock() 106 | d, err := json.Marshal(q.m) 107 | if err != nil { 108 | log.Fatalf("Error marshaling queueMap: %v", err) 109 | } 110 | return string(d) 111 | } 112 | 113 | var dbStats = newQueueMap() 114 | 115 | func init() { 116 | expvar.Publish("dbs", dbStats) 117 | } 118 | 119 | func debugVars(parts []string, w http.ResponseWriter, req *http.Request) { 120 | req.URL.Path = strings.Replace(req.URL.Path, "/_debug/vars", "/debug/vars", 1) 121 | http.DefaultServeMux.ServeHTTP(w, req) 122 | } 123 | -------------------------------------------------------------------------------- /handlers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "compress/gzip" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "log" 9 | "net/http" 10 | "net/url" 11 | "strconv" 12 | "strings" 13 | "time" 14 | 15 | "github.com/dustin/go-humanize" 16 | "github.com/dustin/gojson" 17 | "github.com/dustin/seriesly/timelib" 18 | "github.com/mschoch/gouchstore" 19 | ) 20 | 21 | func serverInfo(parts []string, w http.ResponseWriter, req *http.Request) { 22 | sinfo := map[string]string{ 23 | "seriesly": "Why so series?", "version": "seriesly 0.0", 24 | } 25 | mustEncode(200, w, sinfo) 26 | } 27 | 28 | func listDatabases(parts []string, w http.ResponseWriter, req *http.Request) { 29 | mustEncode(200, w, dblist(*dbRoot)) 30 | } 31 | 32 | func notImplemented(parts []string, w http.ResponseWriter, req *http.Request) { 33 | emitError(501, w, "not implemented", "TODO") 34 | } 35 | 36 | func createDB(parts []string, w http.ResponseWriter, req *http.Request) { 37 | path := dbPath(parts[0]) 38 | err := dbcreate(path) 39 | if err == nil { 40 | w.WriteHeader(201) 41 | } else { 42 | emitError(500, w, "Server Error", err.Error()) 43 | } 44 | } 45 | 46 | func checkDB(args []string, w http.ResponseWriter, req *http.Request) { 47 | dbname := args[0] 48 | if db, err := dbopen(dbname); err == nil { 49 | closeDBConn(db) 50 | w.WriteHeader(200) 51 | } else { 52 | w.WriteHeader(404) 53 | } 54 | } 55 | 56 | func newDocument(args []string, w http.ResponseWriter, req *http.Request) { 57 | var k, fk string 58 | form, err := url.ParseQuery(req.URL.RawQuery) 59 | if err == nil { 60 | fk = form.Get("ts") 61 | } 62 | 63 | if fk == "" { 64 | k = time.Now().UTC().Format(time.RFC3339Nano) 65 | } else { 66 | t, err := timelib.ParseTime(fk) 67 | if err != nil { 68 | emitError(400, w, "Bad time format", err.Error()) 69 | return 70 | } 71 | k = t.UTC().Format(time.RFC3339Nano) 72 | } 73 | putDocument([]string{args[0], k}, w, req) 74 | } 75 | 76 | func putDocument(args []string, w http.ResponseWriter, req *http.Request) { 77 | dbname := args[0] 78 | k := args[1] 79 | defer req.Body.Close() 80 | body, err := ioutil.ReadAll(req.Body) 81 | if err != nil { 82 | emitError(400, w, "Bad Request", 83 | fmt.Sprintf("Error reading body: %v", err)) 84 | return 85 | } 86 | 87 | err = json.Validate(body) 88 | if err != nil { 89 | emitError(400, w, "Error parsing JSON data", err.Error()) 90 | return 91 | } 92 | 93 | err = dbstore(dbname, k, body) 94 | 95 | if err == nil { 96 | w.WriteHeader(201) 97 | } else { 98 | emitError(500, w, "Error storing data", err.Error()) 99 | } 100 | } 101 | 102 | func cleanupRangeParam(in, def string) (string, error) { 103 | if in == "" { 104 | return def, nil 105 | } 106 | t, err := timelib.ParseTime(in) 107 | if err != nil { 108 | return in, err 109 | } 110 | return t.UTC().Format(time.RFC3339Nano), nil 111 | } 112 | 113 | func query(args []string, w http.ResponseWriter, req *http.Request) { 114 | // Parse the params 115 | 116 | req.ParseForm() 117 | 118 | group, err := strconv.Atoi(req.FormValue("group")) 119 | if err != nil { 120 | emitError(400, w, "Bad group value", err.Error()) 121 | return 122 | } 123 | 124 | from, err := cleanupRangeParam(req.FormValue("from"), "") 125 | if err != nil { 126 | emitError(400, w, "Bad from value", err.Error()) 127 | return 128 | } 129 | to, err := cleanupRangeParam(req.FormValue("to"), "") 130 | if err != nil { 131 | emitError(400, w, "Bad to value", err.Error()) 132 | return 133 | } 134 | 135 | ptrs := req.Form["ptr"] 136 | reds := make([]string, 0, len(ptrs)) 137 | for _, r := range req.Form["reducer"] { 138 | _, ok := reducers[r] 139 | if !ok { 140 | emitError(400, w, "No such reducer", r) 141 | return 142 | } 143 | reds = append(reds, r) 144 | } 145 | 146 | if len(ptrs) < 1 { 147 | emitError(400, w, "Pointer required", 148 | "At least one ptr argument is required") 149 | return 150 | } 151 | 152 | if len(ptrs) != len(reds) { 153 | emitError(400, w, "Parameter mismatch", 154 | "Must supply the same number of pointers and reducers") 155 | return 156 | } 157 | 158 | filters := req.Form["f"] 159 | filtervals := req.Form["fv"] 160 | if len(filters) != len(filtervals) { 161 | emitError(400, w, "Parameter mismatch", 162 | "Must supply the same number of filters and filter values") 163 | return 164 | } 165 | 166 | q := executeQuery(args[0], from, to, group, ptrs, reds, filters, filtervals) 167 | defer close(q.out) 168 | defer close(q.cherr) 169 | 170 | output := io.Writer(newGzippingWriter(w, req)) 171 | defer output.(io.Closer).Close() 172 | 173 | going := true 174 | finished := int32(0) 175 | started := false 176 | walkComplete := false 177 | for going { 178 | select { 179 | case po := <-q.out: 180 | if !started { 181 | started = true 182 | w.WriteHeader(200) 183 | output.Write([]byte{'{'}) 184 | } 185 | if finished != 0 { 186 | output.Write([]byte{',', '\n'}) 187 | } 188 | finished++ 189 | 190 | _, err := fmt.Fprintf(output, `"%d": `, po.key/1e6) 191 | if err == nil { 192 | var d []byte 193 | d, err = json.Marshal(po.value) 194 | if err == nil { 195 | _, err = output.Write(d) 196 | } 197 | } 198 | if err != nil { 199 | log.Printf("Error sending item: %v", err) 200 | output = ioutil.Discard 201 | q.before = time.Time{} 202 | } 203 | case err = <-q.cherr: 204 | if err != nil { 205 | if !started { 206 | w.WriteHeader(500) 207 | w.Header().Set("Content-Type", "text/plain") 208 | fmt.Fprintf(output, "Error beginning traversal: %v", err) 209 | } 210 | log.Printf("Walk completed with err: %v", err) 211 | going = false 212 | } 213 | walkComplete = true 214 | } 215 | going = (q.started-finished > 0) || !walkComplete 216 | } 217 | 218 | if started { 219 | output.Write([]byte{'}'}) 220 | } 221 | 222 | duration := time.Since(q.start) 223 | if duration > *minQueryLogDuration { 224 | log.Printf("Completed query processing in %v, %v keys, %v chunks", 225 | duration, humanize.Comma(int64(q.totalKeys)), 226 | humanize.Comma(int64(q.started))) 227 | } 228 | } 229 | 230 | func deleteBulk(args []string, w http.ResponseWriter, req *http.Request) { 231 | // Parse the params 232 | 233 | req.ParseForm() 234 | 235 | compactAfter := strings.ToLower(req.FormValue("compact")) 236 | 237 | from, err := cleanupRangeParam(req.FormValue("from"), "") 238 | if err != nil { 239 | emitError(400, w, "Bad from value", err.Error()) 240 | return 241 | } 242 | to, err := cleanupRangeParam(req.FormValue("to"), "") 243 | if err != nil { 244 | emitError(400, w, "Bad to value", err.Error()) 245 | return 246 | } 247 | 248 | db, err := dbopen(args[0]) 249 | if err != nil { 250 | emitError(400, w, "Unable to open Database", err.Error()) 251 | return 252 | } 253 | 254 | bulk := db.Bulk() 255 | deleteCount := 0 256 | commitThreshold := 10000 257 | 258 | err = dbwalkKeys(args[0], from, to, func(k string) error { 259 | 260 | bulk.Delete(gouchstore.NewDocumentInfo(k)) 261 | deleteCount++ 262 | if deleteCount >= commitThreshold { 263 | bulk.Commit() 264 | deleteCount = 0 265 | } 266 | return err 267 | }) 268 | 269 | if deleteCount > 0 { 270 | bulk.Commit() 271 | } 272 | 273 | bulk.Close() 274 | 275 | if compactAfter == "true" { 276 | err = dbcompact(args[0]) 277 | } 278 | 279 | w.WriteHeader(201) 280 | 281 | } 282 | func deleteDB(parts []string, w http.ResponseWriter, req *http.Request) { 283 | err := dbdelete(parts[0]) 284 | if err == nil { 285 | mustEncode(200, w, map[string]interface{}{"ok": true}) 286 | } else { 287 | emitError(500, w, "Error deleting DB", err.Error()) 288 | } 289 | } 290 | 291 | func compact(parts []string, w http.ResponseWriter, req *http.Request) { 292 | err := dbcompact(parts[0]) 293 | if err == nil { 294 | mustEncode(200, w, map[string]interface{}{"ok": true}) 295 | } else { 296 | emitError(500, w, "Error compacting DB", err.Error()) 297 | } 298 | } 299 | 300 | func canGzip(req *http.Request) bool { 301 | acceptable := req.Header.Get("accept-encoding") 302 | return strings.Contains(acceptable, "gzip") 303 | } 304 | 305 | type gzippingWriter struct { 306 | gz *gzip.Writer 307 | output io.Writer 308 | } 309 | 310 | func newGzippingWriter(w http.ResponseWriter, req *http.Request) *gzippingWriter { 311 | rv := &gzippingWriter{output: w} 312 | if canGzip(req) { 313 | w.Header().Set("Content-Encoding", "gzip") 314 | rv.gz = gzip.NewWriter(w) 315 | rv.output = rv.gz 316 | } 317 | return rv 318 | } 319 | 320 | func (g *gzippingWriter) Write(b []byte) (int, error) { 321 | return g.output.Write(b) 322 | } 323 | 324 | func (g *gzippingWriter) Close() error { 325 | if g.gz != nil { 326 | return g.gz.Close() 327 | } 328 | return nil 329 | } 330 | 331 | func allDocs(args []string, w http.ResponseWriter, req *http.Request) { 332 | // Parse the params 333 | 334 | req.ParseForm() 335 | 336 | from, err := cleanupRangeParam(req.FormValue("from"), "") 337 | if err != nil { 338 | emitError(400, w, "Bad from value", err.Error()) 339 | return 340 | } 341 | to, err := cleanupRangeParam(req.FormValue("to"), "") 342 | if err != nil { 343 | emitError(400, w, "Bad to value", err.Error()) 344 | return 345 | } 346 | 347 | limit, err := strconv.Atoi(req.FormValue("limit")) 348 | if err != nil { 349 | limit = 2000000000 350 | } 351 | 352 | output := newGzippingWriter(w, req) 353 | defer output.Close() 354 | w.WriteHeader(200) 355 | 356 | output.Write([]byte{'{'}) 357 | defer output.Write([]byte{'}'}) 358 | 359 | seenOne := false 360 | 361 | walked := 0 362 | err = dbwalk(args[0], from, to, func(k string, v []byte) error { 363 | if walked > limit { 364 | return io.EOF 365 | } 366 | walked++ 367 | if seenOne { 368 | output.Write([]byte(",\n")) 369 | } else { 370 | seenOne = true 371 | } 372 | _, err := fmt.Fprintf(output, `"%s": `, k) 373 | if err != nil { 374 | return err 375 | } 376 | _, err = output.Write(v) 377 | return err 378 | }) 379 | } 380 | 381 | func dumpDocs(args []string, w http.ResponseWriter, req *http.Request) { 382 | // Parse the params 383 | 384 | req.ParseForm() 385 | 386 | from, err := cleanupRangeParam(req.FormValue("from"), "") 387 | if err != nil { 388 | emitError(400, w, "Bad from value", err.Error()) 389 | return 390 | } 391 | to, err := cleanupRangeParam(req.FormValue("to"), "") 392 | if err != nil { 393 | emitError(400, w, "Bad to value", err.Error()) 394 | return 395 | } 396 | 397 | limit, err := strconv.Atoi(req.FormValue("limit")) 398 | if err != nil { 399 | limit = 2000000000 400 | } 401 | 402 | output := newGzippingWriter(w, req) 403 | defer output.Close() 404 | w.WriteHeader(200) 405 | 406 | walked := 0 407 | err = dbwalk(args[0], from, to, func(k string, v []byte) error { 408 | if walked > limit { 409 | return io.EOF 410 | } 411 | walked++ 412 | _, err := fmt.Fprintf(output, `{"%s": `, k) 413 | if err != nil { 414 | return err 415 | } 416 | _, err = output.Write(v) 417 | output.Write([]byte{'}', '\n'}) 418 | return err 419 | }) 420 | } 421 | 422 | func getDocument(parts []string, w http.ResponseWriter, req *http.Request) { 423 | d, err := dbGetDoc(parts[0], parts[1]) 424 | if err == nil { 425 | w.Write(d) 426 | } else { 427 | emitError(404, w, "Error retrieving value", err.Error()) 428 | } 429 | } 430 | 431 | func dbInfo(args []string, w http.ResponseWriter, req *http.Request) { 432 | db, err := dbopen(args[0]) 433 | if err != nil { 434 | emitError(500, w, "Error opening DB", err.Error()) 435 | return 436 | } 437 | defer closeDBConn(db) 438 | 439 | inf, err := db.DatabaseInfo() 440 | if err == nil { 441 | mustEncode(200, w, map[string]interface{}{ 442 | "last_seq": inf.LastSeq, 443 | "doc_count": inf.DocumentCount, 444 | "deleted_count": inf.DeletedCount, 445 | "space_used": inf.SpaceUsed, 446 | "header_pos": inf.HeaderPosition, 447 | }) 448 | } else { 449 | emitError(500, w, "Error getting db info", err.Error()) 450 | } 451 | } 452 | 453 | // TODO: 454 | 455 | func dbChanges(parts []string, w http.ResponseWriter, req *http.Request) { 456 | notImplemented(parts, w, req) 457 | } 458 | 459 | func rmDocument(parts []string, w http.ResponseWriter, req *http.Request) { 460 | notImplemented(parts, w, req) 461 | } 462 | -------------------------------------------------------------------------------- /handlers_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net/http" 5 | "testing" 6 | ) 7 | 8 | func TestCanGZip(t *testing.T) { 9 | tests := []struct { 10 | headers http.Header 11 | can bool 12 | }{ 13 | {http.Header{}, false}, 14 | {http.Header{"Accept-Encoding": nil}, false}, 15 | {http.Header{"Accept-Encoding": []string{"x"}}, false}, 16 | // Should this one be true? 17 | {http.Header{"Accept-Encoding": []string{"x", "gzip"}}, false}, 18 | {http.Header{"Accept-Encoding": []string{"gzip"}}, true}, 19 | } 20 | 21 | for _, test := range tests { 22 | req := &http.Request{Header: test.headers} 23 | if canGzip(req) != test.can { 24 | t.Errorf("Expected %v with headers %v", 25 | test.can, test.headers) 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "log/syslog" 8 | "net" 9 | "net/http" 10 | "os" 11 | "os/signal" 12 | "regexp" 13 | "runtime" 14 | "runtime/pprof" 15 | "strings" 16 | "syscall" 17 | "time" 18 | 19 | "github.com/dustin/gojson" 20 | "github.com/dustin/yellow" 21 | ) 22 | 23 | var dbRoot = flag.String("root", "db", "Root directory for database files.") 24 | var flushTime = flag.Duration("flushDelay", time.Second*5, 25 | "Maximum amount of time to wait before flushing") 26 | var liveTime = flag.Duration("liveTime", time.Minute*5, 27 | "How long to keep an idle DB open") 28 | var maxOpQueue = flag.Int("maxOpQueue", 1000, 29 | "Maximum number of queued items before flushing") 30 | var staticPath = flag.String("static", "static", "Path to static data") 31 | var queryTimeout = flag.Duration("maxQueryTime", time.Minute*5, 32 | "Maximum amount of time a query is allowed to process.") 33 | var queryBacklog = flag.Int("queryBacklog", 0, "Query scan/group backlog size") 34 | var docBacklog = flag.Int("docBacklog", 0, "MR group request backlog size") 35 | var cacheAddr = flag.String("memcache", "", "Memcached server to connect to") 36 | var cacheBacklog = flag.Int("cacheBacklog", 1000, "Cache backlog size") 37 | var cacheWorkers = flag.Int("cacheWorkers", 4, "Number of cache workers") 38 | var verbose = flag.Bool("v", false, "Verbose logging") 39 | var logAccess = flag.Bool("logAccess", false, "Log HTTP Requests") 40 | var useSyslog = flag.Bool("syslog", false, "Log to syslog") 41 | var minQueryLogDuration = flag.Duration("minQueryLogDuration", 42 | time.Millisecond*100, "minimum query duration to log") 43 | 44 | // Profiling 45 | var pprofFile = flag.String("proFile", "", "File to write profiling info into") 46 | var pprofStart = flag.Duration("proStart", 5*time.Second, 47 | "How long after startup to start profiling") 48 | var pprofDuration = flag.Duration("proDuration", 5*time.Minute, 49 | "How long to run the profiler before shutting it down") 50 | 51 | type routeHandler func(parts []string, w http.ResponseWriter, req *http.Request) 52 | 53 | type routingEntry struct { 54 | Method string 55 | Path *regexp.Regexp 56 | Handler routeHandler 57 | Deadline time.Duration 58 | } 59 | 60 | const dbMatch = "[-%+()$_a-zA-Z0-9]+" 61 | 62 | var defaultDeadline = time.Millisecond * 50 63 | 64 | var routingTable []routingEntry 65 | 66 | func init() { 67 | routingTable = []routingEntry{ 68 | routingEntry{"GET", regexp.MustCompile("^/$"), 69 | serverInfo, defaultDeadline}, 70 | routingEntry{"GET", regexp.MustCompile("^/_static/(.*)"), 71 | staticHandler, defaultDeadline}, 72 | routingEntry{"GET", regexp.MustCompile("^/_debug/open$"), 73 | debugListOpenDBs, defaultDeadline}, 74 | routingEntry{"GET", regexp.MustCompile("^/_debug/vars"), 75 | debugVars, defaultDeadline}, 76 | // Database stuff 77 | routingEntry{"GET", regexp.MustCompile("^/_all_dbs$"), 78 | listDatabases, defaultDeadline}, 79 | routingEntry{"GET", regexp.MustCompile("^/_(.*)"), 80 | reservedHandler, defaultDeadline}, 81 | routingEntry{"GET", regexp.MustCompile("^/(" + dbMatch + ")/?$"), 82 | dbInfo, defaultDeadline}, 83 | routingEntry{"HEAD", regexp.MustCompile("^/(" + dbMatch + ")/?$"), 84 | checkDB, defaultDeadline}, 85 | routingEntry{"GET", regexp.MustCompile("^/(" + dbMatch + ")/_changes$"), 86 | dbChanges, defaultDeadline}, 87 | routingEntry{"GET", regexp.MustCompile("^/(" + dbMatch + ")/_query$"), 88 | query, *queryTimeout}, 89 | routingEntry{"DELETE", regexp.MustCompile("^/(" + dbMatch + ")/_bulk$"), 90 | deleteBulk, *queryTimeout}, 91 | routingEntry{"GET", regexp.MustCompile("^/(" + dbMatch + ")/_all"), 92 | allDocs, *queryTimeout}, 93 | routingEntry{"GET", regexp.MustCompile("^/(" + dbMatch + ")/_dump"), 94 | dumpDocs, *queryTimeout}, 95 | routingEntry{"POST", regexp.MustCompile("^/(" + dbMatch + ")/_compact"), 96 | compact, time.Second * 30}, 97 | routingEntry{"PUT", regexp.MustCompile("^/(" + dbMatch + ")/?$"), 98 | createDB, defaultDeadline}, 99 | routingEntry{"DELETE", regexp.MustCompile("^/(" + dbMatch + ")/?$"), 100 | deleteDB, defaultDeadline}, 101 | routingEntry{"POST", regexp.MustCompile("^/(" + dbMatch + ")/?$"), 102 | newDocument, defaultDeadline}, 103 | // Document stuff 104 | routingEntry{"PUT", regexp.MustCompile("^/(" + dbMatch + ")/([^/]+)$"), 105 | putDocument, defaultDeadline}, 106 | routingEntry{"GET", regexp.MustCompile("^/(" + dbMatch + ")/([^/]+)$"), 107 | getDocument, defaultDeadline}, 108 | routingEntry{"DELETE", regexp.MustCompile("^/(" + dbMatch + ")/([^/]+)$"), 109 | rmDocument, defaultDeadline}, 110 | // Pre-flight goodness 111 | routingEntry{"OPTIONS", regexp.MustCompile(".*"), 112 | handleOptions, defaultDeadline}, 113 | } 114 | } 115 | 116 | func mustEncode(status int, w http.ResponseWriter, ob interface{}) { 117 | b, err := json.Marshal(ob) 118 | if err != nil { 119 | log.Fatalf("Error encoding %v.", ob) 120 | } 121 | w.Header().Set("Content-Length", fmt.Sprintf("%d", len(b))) 122 | w.WriteHeader(status) 123 | w.Write(b) 124 | } 125 | 126 | func emitError(status int, w http.ResponseWriter, e, reason string) { 127 | m := map[string]string{"error": e, "reason": reason} 128 | mustEncode(status, w, m) 129 | } 130 | 131 | func staticHandler(parts []string, w http.ResponseWriter, req *http.Request) { 132 | w.Header().Del("Content-type") 133 | http.StripPrefix("/_static/", 134 | http.FileServer(http.Dir(*staticPath))).ServeHTTP(w, req) 135 | } 136 | 137 | func reservedHandler(parts []string, w http.ResponseWriter, req *http.Request) { 138 | emitError(400, 139 | w, "illegal_database_name", 140 | "Only lowercase characters (a-z), digits (0-9), "+ 141 | "and any of the characters _, $, (, ), +, -, and / are allowed. "+ 142 | "Must begin with a letter.") 143 | 144 | } 145 | 146 | func defaultHandler(parts []string, w http.ResponseWriter, req *http.Request) { 147 | emitError(400, w, "no_handler", 148 | fmt.Sprintf("Can't handle %v to %v\n", req.Method, req.URL.Path)) 149 | 150 | } 151 | 152 | func handleOptions(parts []string, w http.ResponseWriter, req *http.Request) { 153 | methods := []string{} 154 | for _, r := range routingTable { 155 | if len(r.Path.FindAllStringSubmatch(req.URL.Path, 1)) > 0 { 156 | methods = append(methods, r.Method) 157 | } 158 | } 159 | w.Header().Set("Access-Control-Allow-Origin", "*") 160 | w.Header().Set("Access-Control-Allow-Methods", strings.Join(methods, ", ")) 161 | w.WriteHeader(204) 162 | } 163 | 164 | func findHandler(method, path string) (routingEntry, []string) { 165 | for _, r := range routingTable { 166 | if r.Method == method { 167 | matches := r.Path.FindAllStringSubmatch(path, 1) 168 | if len(matches) > 0 { 169 | return r, matches[0][1:] 170 | } 171 | } 172 | } 173 | return routingEntry{"DEFAULT", nil, defaultHandler, defaultDeadline}, 174 | []string{} 175 | } 176 | 177 | func handler(w http.ResponseWriter, req *http.Request) { 178 | if *logAccess { 179 | log.Printf("%s %s %s", req.RemoteAddr, req.Method, req.URL) 180 | } 181 | route, hparts := findHandler(req.Method, req.URL.Path) 182 | defer yellow.DeadlineLog(route.Deadline, "%v:%v deadlined at %v", 183 | req.Method, req.URL.Path, route.Deadline).Done() 184 | 185 | w.Header().Set("Access-Control-Allow-Origin", "*") 186 | w.Header().Set("Content-type", "application/json") 187 | route.Handler(hparts, w, req) 188 | } 189 | 190 | func startProfiler() { 191 | time.Sleep(*pprofStart) 192 | log.Printf("Starting profiler") 193 | f, err := os.OpenFile(*pprofFile, os.O_WRONLY|os.O_CREATE, 0666) 194 | if err == nil { 195 | err = pprof.StartCPUProfile(f) 196 | if err != nil { 197 | log.Fatalf("Can't start profiler") 198 | } 199 | time.AfterFunc(*pprofDuration, func() { 200 | log.Printf("Shutting down profiler") 201 | pprof.StopCPUProfile() 202 | f.Close() 203 | }) 204 | } else { 205 | log.Printf("Can't open profilefile") 206 | } 207 | } 208 | 209 | // globalShutdownChan is closed when it's time to shut down. 210 | var globalShutdownChan = make(chan bool) 211 | 212 | func listener(addr string) net.Listener { 213 | if addr == "" { 214 | addr = ":http" 215 | } 216 | l, err := net.Listen("tcp", addr) 217 | if err != nil { 218 | log.Fatalf("Error setting up listener: %v", err) 219 | } 220 | return l 221 | } 222 | 223 | func shutdownHandler(ls []net.Listener, ch <-chan os.Signal) { 224 | s := <-ch 225 | log.Printf("Shutting down on sig %v", s) 226 | for _, l := range ls { 227 | l.Close() 228 | } 229 | dbCloseAll() 230 | time.AfterFunc(time.Minute, func() { 231 | log.Fatalf("Timed out waiting for connections to close.") 232 | }) 233 | } 234 | 235 | func main() { 236 | halfProcs := runtime.GOMAXPROCS(0) / 2 237 | if halfProcs < 1 { 238 | halfProcs = 1 239 | } 240 | queryWorkers := flag.Int("queryWorkers", halfProcs, 241 | "Number of query tree walkers.") 242 | docWorkers := flag.Int("docWorkers", halfProcs, 243 | "Number of document mapreduce workers.") 244 | 245 | addr := flag.String("addr", ":3133", "Address to bind to") 246 | mcaddr := flag.String("memcbind", "", "Memcached server bind address") 247 | flag.Parse() 248 | 249 | if *useSyslog { 250 | sl, err := syslog.New(syslog.LOG_INFO, "seriesly") 251 | if err != nil { 252 | log.Fatalf("Can't initialize syslog: %v", err) 253 | } 254 | log.SetOutput(sl) 255 | log.SetFlags(0) 256 | } 257 | 258 | if err := os.MkdirAll(*dbRoot, 0777); err != nil { 259 | log.Fatalf("Could not create %v: %v", *dbRoot, err) 260 | } 261 | 262 | // Update the query handler deadline to the query timeout 263 | found := false 264 | for i := range routingTable { 265 | matches := routingTable[i].Path.FindAllStringSubmatch("/x/_query", 1) 266 | if len(matches) > 0 { 267 | routingTable[i].Deadline = *queryTimeout 268 | found = true 269 | break 270 | } 271 | } 272 | if !found { 273 | log.Fatalf("Programming error: Could not find query handler") 274 | } 275 | 276 | processorInput = make(chan *processIn, *docBacklog) 277 | for i := 0; i < *docWorkers; i++ { 278 | go docProcessor(processorInput) 279 | } 280 | 281 | if *cacheAddr == "" { 282 | cacheInput = processorInput 283 | // Note: cacheInputSet will be null here, there should be no caching 284 | } else { 285 | cacheInput = make(chan *processIn, *cacheBacklog) 286 | cacheInputSet = make(chan *processOut, *cacheBacklog) 287 | for i := 0; i < *cacheWorkers; i++ { 288 | go cacheProcessor(cacheInput, cacheInputSet) 289 | } 290 | } 291 | 292 | queryInput = make(chan *queryIn, *queryBacklog) 293 | for i := 0; i < *queryWorkers; i++ { 294 | go queryExecutor() 295 | } 296 | 297 | if *pprofFile != "" { 298 | go startProfiler() 299 | } 300 | 301 | listeners := []net.Listener{} 302 | if *mcaddr != "" { 303 | listeners = append(listeners, listenMC(*mcaddr)) 304 | } 305 | 306 | s := &http.Server{ 307 | Addr: *addr, 308 | Handler: http.HandlerFunc(handler), 309 | ReadTimeout: 5 * time.Second, 310 | } 311 | log.Printf("Listening to web requests on %s", *addr) 312 | l := listener(*addr) 313 | listeners = append(listeners, l) 314 | 315 | // Need signal handler to shut down listeners 316 | sigch := make(chan os.Signal, 1) 317 | signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM) 318 | go shutdownHandler(listeners, sigch) 319 | 320 | err := s.Serve(l) 321 | log.Printf("Web server finished with %v", err) 322 | 323 | log.Printf("Waiting for databases to finish") 324 | dbWg.Wait() 325 | } 326 | -------------------------------------------------------------------------------- /mcserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "log" 6 | "net" 7 | "time" 8 | 9 | "github.com/dustin/gomemcached" 10 | "github.com/dustin/gomemcached/server" 11 | "github.com/dustin/seriesly/timelib" 12 | ) 13 | 14 | const ( 15 | CREATE_BUCKET = gomemcached.CommandCode(0x85) 16 | DELETE_BUCKET = gomemcached.CommandCode(0x86) 17 | LIST_BUCKETS = gomemcached.CommandCode(0x87) 18 | SELECT_BUCKET = gomemcached.CommandCode(0x89) 19 | ) 20 | 21 | type mcSession struct { 22 | dbname string 23 | } 24 | 25 | func (sess *mcSession) HandleMessage( 26 | w io.Writer, req *gomemcached.MCRequest) *gomemcached.MCResponse { 27 | 28 | switch req.Opcode { 29 | case SELECT_BUCKET: 30 | log.Printf("Selecting bucket %s", req.Key) 31 | sess.dbname = string(req.Key) 32 | case gomemcached.SETQ, gomemcached.SET: 33 | fk := string(req.Key) 34 | var k string 35 | if fk == "" { 36 | k = time.Now().UTC().Format(time.RFC3339Nano) 37 | } else { 38 | t, err := timelib.ParseTime(fk) 39 | if err != nil { 40 | return &gomemcached.MCResponse{ 41 | Status: gomemcached.EINVAL, 42 | Body: []byte("Invalid key"), 43 | } 44 | } 45 | k = t.UTC().Format(time.RFC3339Nano) 46 | } 47 | 48 | err := dbstore(sess.dbname, k, req.Body) 49 | if err != nil { 50 | return &gomemcached.MCResponse{ 51 | Status: gomemcached.NOT_STORED, 52 | Body: []byte(err.Error()), 53 | } 54 | } 55 | 56 | if req.Opcode == gomemcached.SETQ { 57 | return nil 58 | } 59 | case gomemcached.NOOP: 60 | default: 61 | return &gomemcached.MCResponse{Status: gomemcached.UNKNOWN_COMMAND} 62 | } 63 | 64 | return &gomemcached.MCResponse{} 65 | } 66 | 67 | func waitForMCConnections(ls net.Listener) { 68 | for { 69 | s, e := ls.Accept() 70 | if e == nil { 71 | log.Printf("Got a connection from %s", s.RemoteAddr()) 72 | go memcached.HandleIO(s, &mcSession{}) 73 | } else { 74 | log.Printf("Error accepting from %s", ls) 75 | } 76 | } 77 | } 78 | 79 | func listenMC(bindaddr string) net.Listener { 80 | ls, e := net.Listen("tcp", bindaddr) 81 | if e != nil { 82 | log.Fatalf("Error binding to memcached socket: %s", e) 83 | } 84 | 85 | log.Printf("Listening for memcached connections on %v", bindaddr) 86 | 87 | go waitForMCConnections(ls) 88 | return ls 89 | } 90 | -------------------------------------------------------------------------------- /query.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log" 7 | "math" 8 | "strconv" 9 | "sync/atomic" 10 | "time" 11 | 12 | "github.com/dustin/go-jsonpointer" 13 | "github.com/dustin/gojson" 14 | "github.com/mschoch/gouchstore" 15 | ) 16 | 17 | var errTimeout = errors.New("query timed out") 18 | 19 | type ptrval struct { 20 | di *gouchstore.DocumentInfo 21 | val interface{} 22 | included bool 23 | } 24 | 25 | type reducer func(input chan ptrval) interface{} 26 | 27 | type processOut struct { 28 | cacheKey string 29 | key int64 30 | value []interface{} 31 | err error 32 | cacheOpaque uint32 33 | } 34 | 35 | func (p processOut) MarshalJSON() ([]byte, error) { 36 | return json.Marshal(map[string]interface{}{"v": p.value}) 37 | } 38 | 39 | type processIn struct { 40 | cacheKey string 41 | dbname string 42 | key int64 43 | infos []*gouchstore.DocumentInfo 44 | nextInfo *gouchstore.DocumentInfo 45 | ptrs []string 46 | reds []string 47 | before time.Time 48 | filters []string 49 | filtervals []string 50 | out chan<- *processOut 51 | } 52 | 53 | type queryIn struct { 54 | dbname string 55 | from string 56 | to string 57 | group int 58 | ptrs []string 59 | reds []string 60 | start time.Time 61 | before time.Time 62 | filters []string 63 | filtervals []string 64 | started int32 65 | totalKeys int32 66 | out chan *processOut 67 | cherr chan error 68 | } 69 | 70 | func resolveFetch(j []byte, keys []string) map[string]interface{} { 71 | rv := map[string]interface{}{} 72 | found, err := jsonpointer.FindMany(j, keys) 73 | if err != nil { 74 | return rv 75 | } 76 | for k, v := range found { 77 | var val interface{} 78 | err = json.Unmarshal(v, &val) 79 | if err == nil { 80 | rv[k] = val 81 | } 82 | } 83 | return rv 84 | } 85 | 86 | func processDoc(di *gouchstore.DocumentInfo, chs []chan ptrval, 87 | doc []byte, ptrs []string, 88 | filters []string, filtervals []string, 89 | included bool) { 90 | 91 | pv := ptrval{di, nil, included} 92 | 93 | // Find all keys for filters and comparisons so we can do a 94 | // single pass through the document. 95 | keys := make([]string, 0, len(filters)+len(ptrs)) 96 | seen := map[string]bool{} 97 | for _, f := range filters { 98 | if !seen[f] { 99 | keys = append(keys, f) 100 | seen[f] = true 101 | } 102 | } 103 | for _, f := range ptrs { 104 | if !seen[f] { 105 | keys = append(keys, f) 106 | seen[f] = true 107 | } 108 | } 109 | 110 | fetched := resolveFetch(doc, keys) 111 | 112 | for i, p := range filters { 113 | val := fetched[p] 114 | checkVal := filtervals[i] 115 | switch val.(type) { 116 | case string: 117 | if val != checkVal { 118 | return 119 | } 120 | case int, uint, int64, float64, uint64, bool: 121 | v := fmt.Sprintf("%v", val) 122 | if v != checkVal { 123 | return 124 | } 125 | default: 126 | return 127 | } 128 | } 129 | 130 | for i, p := range ptrs { 131 | val := fetched[p] 132 | if p == "_id" { 133 | val = di.ID 134 | } 135 | switch x := val.(type) { 136 | case int, uint, int64, float64, uint64, bool: 137 | v := fmt.Sprintf("%v", val) 138 | pv.val = v 139 | chs[i] <- pv 140 | default: 141 | pv.val = x 142 | chs[i] <- pv 143 | } 144 | } 145 | } 146 | 147 | func processDocs(pi *processIn) { 148 | 149 | result := processOut{pi.cacheKey, pi.key, nil, nil, 0} 150 | 151 | if len(pi.ptrs) == 0 { 152 | log.Panicf("No pointers specified in query: %#v", *pi) 153 | } 154 | 155 | db, err := dbopen(pi.dbname) 156 | if err != nil { 157 | result.err = err 158 | pi.out <- &result 159 | return 160 | } 161 | defer closeDBConn(db) 162 | 163 | chans := make([]chan ptrval, 0, len(pi.ptrs)) 164 | resultchs := make([]chan interface{}, 0, len(pi.ptrs)) 165 | for i, r := range pi.reds { 166 | chans = append(chans, make(chan ptrval)) 167 | resultchs = append(resultchs, make(chan interface{})) 168 | 169 | go func(fi int, fr string) { 170 | resultchs[fi] <- reducers[fr](chans[fi]) 171 | }(i, r) 172 | } 173 | 174 | go func() { 175 | defer closeAll(chans) 176 | 177 | dodoc := func(di *gouchstore.DocumentInfo, included bool) { 178 | doc, err := db.DocumentByDocumentInfo(di) 179 | if err == nil { 180 | processDoc(di, chans, doc.Body, pi.ptrs, 181 | pi.filters, pi.filtervals, included) 182 | } else { 183 | for i := range pi.ptrs { 184 | chans[i] <- ptrval{di, nil, included} 185 | } 186 | } 187 | } 188 | 189 | for _, di := range pi.infos { 190 | dodoc(di, true) 191 | } 192 | if pi.nextInfo != nil { 193 | dodoc(pi.nextInfo, false) 194 | } 195 | }() 196 | 197 | results := make([]interface{}, len(pi.ptrs)) 198 | for i := 0; i < len(pi.ptrs); i++ { 199 | results[i] = <-resultchs[i] 200 | if f, fok := results[i].(float64); fok && 201 | (math.IsNaN(f) || math.IsInf(f, 0)) { 202 | results[i] = nil 203 | } 204 | } 205 | result.value = results 206 | 207 | if result.cacheOpaque == 0 && result.cacheKey != "" { 208 | // It's OK if we can't store our newly pulled item in 209 | // the cache, but it's most definitely not OK to stop 210 | // here because of this. 211 | select { 212 | case cacheInputSet <- &result: 213 | default: 214 | } 215 | } 216 | pi.out <- &result 217 | } 218 | 219 | func docProcessor(ch <-chan *processIn) { 220 | for pi := range ch { 221 | if time.Now().Before(pi.before) { 222 | processDocs(pi) 223 | } else { 224 | pi.out <- &processOut{"", pi.key, nil, errTimeout, 0} 225 | } 226 | } 227 | } 228 | 229 | func fetchDocs(dbname string, key int64, infos []*gouchstore.DocumentInfo, 230 | nextInfo *gouchstore.DocumentInfo, ptrs []string, reds []string, 231 | filters []string, filtervals []string, 232 | before time.Time, out chan<- *processOut) { 233 | 234 | i := processIn{"", dbname, key, infos, nextInfo, 235 | ptrs, reds, before, filters, filtervals, out} 236 | 237 | cacheInput <- &i 238 | } 239 | 240 | func runQuery(q *queryIn) { 241 | if len(q.ptrs) == 0 { 242 | q.cherr <- fmt.Errorf("at least one pointer is required") 243 | return 244 | } 245 | if q.group == 0 { 246 | q.cherr <- fmt.Errorf("group level cannot be zero") 247 | return 248 | } 249 | 250 | db, err := dbopen(q.dbname) 251 | if err != nil { 252 | log.Printf("Error opening db: %v - %v", q.dbname, err) 253 | q.cherr <- err 254 | return 255 | } 256 | defer closeDBConn(db) 257 | 258 | chunk := int64(time.Duration(q.group) * time.Millisecond) 259 | 260 | infos := []*gouchstore.DocumentInfo{} 261 | g := int64(0) 262 | nextg := "" 263 | 264 | err = db.AllDocuments(q.from, q.to, func(db *gouchstore.Gouchstore, di *gouchstore.DocumentInfo, userContext interface{}) error { 265 | kstr := di.ID 266 | var err error 267 | 268 | atomic.AddInt32(&q.totalKeys, 1) 269 | 270 | if kstr >= nextg { 271 | if len(infos) > 0 { 272 | atomic.AddInt32(&q.started, 1) 273 | fetchDocs(q.dbname, g, infos, di, 274 | q.ptrs, q.reds, q.filters, q.filtervals, 275 | q.before, q.out) 276 | 277 | infos = make([]*gouchstore.DocumentInfo, 0, len(infos)) 278 | } 279 | 280 | k := parseKey(kstr) 281 | g = (k / chunk) * chunk 282 | nextgi := g + chunk 283 | nextgt := time.Unix(nextgi/1e9, nextgi%1e9).UTC() 284 | nextg = nextgt.Format(time.RFC3339Nano) 285 | } 286 | infos = append(infos, di) 287 | 288 | return err 289 | }, nil) 290 | 291 | if err == nil && len(infos) > 0 { 292 | atomic.AddInt32(&q.started, 1) 293 | fetchDocs(q.dbname, g, infos, nil, 294 | q.ptrs, q.reds, q.filters, q.filtervals, 295 | q.before, q.out) 296 | } 297 | 298 | q.cherr <- err 299 | } 300 | 301 | func queryExecutor() { 302 | for q := range queryInput { 303 | if time.Now().Before(q.before) { 304 | runQuery(q) 305 | } else { 306 | log.Printf("Timed out query that's %v late", 307 | time.Since(q.before)) 308 | q.cherr <- errTimeout 309 | } 310 | } 311 | } 312 | 313 | func executeQuery(dbname, from, to string, group int, 314 | ptrs, reds, filters, filtervals []string) *queryIn { 315 | now := time.Now() 316 | 317 | rv := &queryIn{ 318 | dbname: dbname, 319 | from: from, 320 | to: to, 321 | group: group, 322 | ptrs: ptrs, 323 | reds: reds, 324 | start: now, 325 | before: now.Add(*queryTimeout), 326 | filters: filters, 327 | filtervals: filtervals, 328 | out: make(chan *processOut), 329 | cherr: make(chan error), 330 | } 331 | queryInput <- rv 332 | return rv 333 | } 334 | 335 | var processorInput chan *processIn 336 | var queryInput chan *queryIn 337 | 338 | func convertTofloat64(in chan ptrval) chan float64 { 339 | ch := make(chan float64) 340 | go func() { 341 | defer close(ch) 342 | for v := range in { 343 | if v.included && v.val != nil { 344 | switch value := v.val.(type) { 345 | case string: 346 | x, err := strconv.ParseFloat(value, 64) 347 | if err == nil { 348 | ch <- x 349 | } 350 | } 351 | } 352 | } 353 | }() 354 | 355 | return ch 356 | } 357 | 358 | func convertTofloat64Rate(in chan ptrval) chan float64 { 359 | ch := make(chan float64) 360 | go func() { 361 | defer close(ch) 362 | var prevts int64 363 | var preval float64 364 | 365 | // First, find a part of the stream that has usable data. 366 | FIND_USABLE: 367 | for v := range in { 368 | if v.di != nil && v.val != nil { 369 | switch value := v.val.(type) { 370 | case string: 371 | x, err := strconv.ParseFloat(value, 64) 372 | if err == nil { 373 | prevts = parseKey(v.di.ID) 374 | preval = x 375 | break FIND_USABLE 376 | } 377 | } 378 | } 379 | } 380 | // Then emit floats based on deltas from previous values. 381 | for v := range in { 382 | if v.di != nil && v.val != nil { 383 | switch value := v.val.(type) { 384 | case string: 385 | x, err := strconv.ParseFloat(value, 64) 386 | if err == nil { 387 | thists := parseKey(v.di.ID) 388 | 389 | val := ((x - preval) / 390 | (float64(thists-prevts) / 1e9)) 391 | 392 | if !(math.IsNaN(val) || math.IsInf(val, 0)) { 393 | ch <- val 394 | } 395 | 396 | prevts = thists 397 | preval = x 398 | } 399 | } 400 | } 401 | } 402 | }() 403 | 404 | return ch 405 | } 406 | 407 | var reducers = map[string]reducer{ 408 | "identity": func(input chan ptrval) interface{} { 409 | rv := []interface{}{} 410 | for s := range input { 411 | if s.included { 412 | rv = append(rv, s.val) 413 | } 414 | } 415 | return rv 416 | }, 417 | "any": func(input chan ptrval) interface{} { 418 | var rv interface{} 419 | for v := range input { 420 | if rv == nil && v.included && v.val != nil { 421 | rv = v.val 422 | } 423 | } 424 | return rv 425 | }, 426 | "distinct": func(input chan ptrval) interface{} { 427 | uvm := map[interface{}]bool{} 428 | for v := range input { 429 | if v.included { 430 | 431 | switch value := v.val.(type) { 432 | case map[string]interface{}: 433 | case []interface{}: 434 | //unhashable 435 | continue 436 | default: 437 | uvm[value] = true 438 | } 439 | 440 | } 441 | } 442 | rv := make([]interface{}, 0, len(uvm)) 443 | for k := range uvm { 444 | rv = append(rv, k) 445 | } 446 | return rv 447 | }, 448 | "count": func(input chan ptrval) interface{} { 449 | rv := 0 450 | for v := range input { 451 | if v.included && v.val != nil { 452 | rv++ 453 | } 454 | } 455 | return rv 456 | }, 457 | "sum": func(input chan ptrval) interface{} { 458 | rv := float64(0) 459 | for v := range convertTofloat64(input) { 460 | rv += v 461 | } 462 | return rv 463 | }, 464 | "sumsq": func(input chan ptrval) interface{} { 465 | rv := float64(0) 466 | for v := range convertTofloat64(input) { 467 | rv += (v * v) 468 | } 469 | return rv 470 | }, 471 | "max": func(input chan ptrval) interface{} { 472 | rv := math.NaN() 473 | for v := range convertTofloat64(input) { 474 | if v > rv || math.IsNaN(rv) || math.IsInf(rv, 0) { 475 | rv = v 476 | } 477 | } 478 | return rv 479 | }, 480 | "min": func(input chan ptrval) interface{} { 481 | rv := math.NaN() 482 | for v := range convertTofloat64(input) { 483 | if v < rv || math.IsNaN(rv) || math.IsInf(rv, 0) { 484 | rv = v 485 | } 486 | } 487 | return rv 488 | }, 489 | "avg": func(input chan ptrval) interface{} { 490 | nums := float64(0) 491 | sum := float64(0) 492 | for v := range convertTofloat64(input) { 493 | nums++ 494 | sum += v 495 | } 496 | if nums > 0 { 497 | return sum / nums 498 | } 499 | return math.NaN() 500 | }, 501 | "c": func(input chan ptrval) interface{} { 502 | sum := float64(0) 503 | for v := range convertTofloat64Rate(input) { 504 | sum += v 505 | } 506 | return sum 507 | }, 508 | "c_min": func(input chan ptrval) interface{} { 509 | rv := math.NaN() 510 | for v := range convertTofloat64Rate(input) { 511 | if v < rv || math.IsNaN(rv) || math.IsInf(rv, 0) { 512 | rv = v 513 | } 514 | } 515 | return rv 516 | }, 517 | "c_avg": func(input chan ptrval) interface{} { 518 | nums := float64(0) 519 | sum := float64(0) 520 | for v := range convertTofloat64Rate(input) { 521 | nums++ 522 | sum += v 523 | } 524 | if nums > 0 { 525 | return sum / nums 526 | } 527 | return math.NaN() 528 | }, 529 | "c_max": func(input chan ptrval) interface{} { 530 | rv := math.NaN() 531 | for v := range convertTofloat64Rate(input) { 532 | if v > rv || math.IsNaN(rv) || math.IsInf(rv, 0) { 533 | rv = v 534 | } 535 | } 536 | return rv 537 | }, 538 | "obj_keys": func(input chan ptrval) interface{} { 539 | rv := []string{} 540 | for v := range input { 541 | if v.included { 542 | switch value := v.val.(type) { 543 | case map[string]interface{}: 544 | for mapk := range value { 545 | rv = append(rv, mapk) 546 | } 547 | } 548 | } 549 | } 550 | return rv 551 | }, 552 | "obj_distinct_keys": func(input chan ptrval) interface{} { 553 | ukm := map[string]bool{} 554 | for v := range input { 555 | if v.included { 556 | switch value := v.val.(type) { 557 | case map[string]interface{}: 558 | for mapk := range value { 559 | ukm[mapk] = true 560 | } 561 | } 562 | } 563 | } 564 | rv := make([]string, 0, len(ukm)) 565 | for k := range ukm { 566 | rv = append(rv, k) 567 | } 568 | return rv 569 | }, 570 | } 571 | -------------------------------------------------------------------------------- /query_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io/ioutil" 5 | "math" 6 | "reflect" 7 | "testing" 8 | "time" 9 | 10 | "github.com/mschoch/gouchstore" 11 | ) 12 | 13 | var testInput = []interface{}{} 14 | var nextValue = "29" 15 | 16 | var bigInput []byte 17 | 18 | func init() { 19 | s := []interface{}{"31", "63", "foo", "17", 20 | "foo", "foo", "foo", "foo", 21 | map[string]interface{}{"key": "value1"}, 22 | map[string]interface{}{"key": "value2"}, 23 | map[string]interface{}{"key": "value3"}} 24 | for i := range s { 25 | testInput = append(testInput, s[i]) 26 | } 27 | 28 | var err error 29 | bigInput, err = ioutil.ReadFile("sample.json") 30 | if err != nil { 31 | panic("Couldn't read sample.json") 32 | } 33 | } 34 | 35 | func streamCollection(s []interface{}) chan ptrval { 36 | ch := make(chan ptrval) 37 | go func() { 38 | defer close(ch) 39 | t := time.Unix(1347255646, 418514126).UTC() 40 | for _, r := range s { 41 | t = t.Add(time.Second) 42 | ts := t.Format(time.RFC3339Nano) 43 | ch <- ptrval{gouchstore.NewDocumentInfo(ts), r, true} 44 | } 45 | t = t.Add(time.Second) 46 | ts := t.Format(time.RFC3339Nano) 47 | ch <- ptrval{gouchstore.NewDocumentInfo(ts), nextValue, false} 48 | }() 49 | return ch 50 | } 51 | 52 | func TestEmptyRateConversion(t *testing.T) { 53 | ch := make(chan ptrval) 54 | rch := convertTofloat64Rate(ch) 55 | close(ch) 56 | val, got := <-rch 57 | if got { 58 | t.Fatalf("Expected empty channel, got %v", val) 59 | } 60 | } 61 | 62 | func TestSingleRateConversion(t *testing.T) { 63 | ch := make(chan ptrval, 1) 64 | rch := convertTofloat64Rate(ch) 65 | ch <- ptrval{nil, &nextValue, true} 66 | close(ch) 67 | val, got := <-rch 68 | if got { 69 | t.Fatalf("Expected empty channel, got %v", val) 70 | } 71 | } 72 | 73 | func TestPairRateConversion(t *testing.T) { 74 | ch := make(chan ptrval, 2) 75 | rch := convertTofloat64Rate(ch) 76 | 77 | tm := time.Now().UTC() 78 | val1 := "20" 79 | ch <- ptrval{gouchstore.NewDocumentInfo(tm.Format(time.RFC3339Nano)), 80 | val1, true} 81 | 82 | tm = tm.Add(5 * time.Second) 83 | val2 := "25" 84 | ch <- ptrval{gouchstore.NewDocumentInfo(tm.Format(time.RFC3339Nano)), 85 | val2, false} 86 | 87 | close(ch) 88 | exp := 1.0 89 | val, got := <-rch 90 | if !got { 91 | t.Fatalf("Expected value, got empty channel") 92 | } 93 | if val != exp { 94 | t.Fatalf("Expected %v, got %v", exp, val) 95 | } 96 | } 97 | 98 | func TestReducers(t *testing.T) { 99 | tests := []struct { 100 | reducer string 101 | exp interface{} 102 | }{ 103 | {"any", "31"}, 104 | {"count", 11}, 105 | {"sum", float64(111)}, 106 | {"sumsq", float64(5219)}, 107 | {"max", float64(63)}, 108 | {"min", float64(17)}, 109 | {"avg", float64(37)}, 110 | {"c", float64(10.5)}, 111 | {"c_min", float64(-23)}, 112 | {"c_avg", float64(3.5)}, 113 | {"c_max", float64(32)}, 114 | {"identity", testInput}, 115 | {"obj_keys", []string{"key", "key", "key"}}, 116 | {"obj_distinct_keys", []string{"key"}}, 117 | // distinct reducer tested separately 118 | } 119 | 120 | for _, test := range tests { 121 | got := reducers[test.reducer](streamCollection(testInput)) 122 | if !reflect.DeepEqual(got, test.exp) { 123 | t.Errorf("Expected %v for %v, got %v", 124 | test.exp, test.reducer, got) 125 | t.Fail() 126 | } 127 | } 128 | } 129 | 130 | // the order of items returned by distinct is not gauaranteed 131 | // which makes using TestReducers problematic as is 132 | // also numeric values become strings do to some interal behavior 133 | // this could change over time, for now its primarily intended 134 | // to work with string values 135 | func TestDistinctReducer(t *testing.T) { 136 | test := struct { 137 | reducer string 138 | exp []interface{} 139 | }{"distinct", []interface{}{"foo", "17", "31", "63"}} 140 | 141 | got := reducers[test.reducer](streamCollection(testInput)).([]interface{}) 142 | if len(got) != len(test.exp) { 143 | t.Errorf("Expected length of result to be %v for %v, got %v", 144 | len(test.exp), test.reducer, len(got)) 145 | } 146 | OUTER: 147 | for _, expval := range test.exp { 148 | for _, gotval := range got { 149 | if reflect.DeepEqual(expval, gotval) { 150 | continue OUTER 151 | } 152 | } 153 | t.Errorf("Expected result to contain %v for %v, got %v", 154 | expval, test.reducer, got) 155 | } 156 | } 157 | 158 | func TestEmptyReducers(t *testing.T) { 159 | emptyInput := []interface{}{} 160 | tests := []struct { 161 | reducer string 162 | exp interface{} 163 | }{ 164 | {"any", nil}, 165 | {"count", 0}, 166 | {"sum", 0.0}, 167 | {"sumsq", 0.0}, 168 | {"max", math.NaN()}, 169 | {"min", math.NaN()}, 170 | {"avg", math.NaN()}, 171 | {"c", 0.0}, 172 | {"c_min", math.NaN()}, 173 | {"c_avg", math.NaN()}, 174 | {"c_max", math.NaN()}, 175 | {"identity", emptyInput}, 176 | {"obj_keys", []string{}}, 177 | {"obj_distinct_keys", []string{}}, 178 | {"distinct", emptyInput}, 179 | } 180 | 181 | eq := func(a, b interface{}) bool { 182 | if !reflect.DeepEqual(a, b) { 183 | af, aok := a.(float64) 184 | bf, bok := b.(float64) 185 | return aok && bok && (math.IsNaN(af) == math.IsNaN(bf)) 186 | } 187 | return true 188 | } 189 | 190 | for _, test := range tests { 191 | got := reducers[test.reducer](streamCollection(emptyInput)) 192 | if !eq(got, test.exp) { 193 | t.Errorf("Expected %v for %v, got %v", 194 | test.exp, test.reducer, got) 195 | t.Fail() 196 | } 197 | } 198 | } 199 | 200 | func TestNilReducers(t *testing.T) { 201 | emptyInput := []interface{}{nil} 202 | tests := []struct { 203 | reducer string 204 | exp interface{} 205 | }{ 206 | {"any", nil}, 207 | {"count", 0}, 208 | {"sum", 0.0}, 209 | {"sumsq", 0.0}, 210 | {"max", math.NaN()}, 211 | {"min", math.NaN()}, 212 | {"avg", math.NaN()}, 213 | {"c", 0.0}, 214 | {"c_min", math.NaN()}, 215 | {"c_avg", math.NaN()}, 216 | {"c_max", math.NaN()}, 217 | {"identity", emptyInput}, 218 | {"obj_keys", []string{}}, 219 | {"obj_distinct_keys", []string{}}, 220 | {"distinct", emptyInput}, 221 | } 222 | 223 | eq := func(a, b interface{}) bool { 224 | if !reflect.DeepEqual(a, b) { 225 | af, aok := a.(float64) 226 | bf, bok := b.(float64) 227 | return aok && bok && (math.IsNaN(af) == math.IsNaN(bf)) 228 | } 229 | return true 230 | } 231 | 232 | for _, test := range tests { 233 | got := reducers[test.reducer](streamCollection(emptyInput)) 234 | if !eq(got, test.exp) { 235 | t.Errorf("Expected %v for %v, got %v", 236 | test.exp, test.reducer, got) 237 | t.Fail() 238 | } 239 | } 240 | } 241 | 242 | func TestPointers(t *testing.T) { 243 | docID := "2013-02-22T16:29:19.750264Z" 244 | di := gouchstore.NewDocumentInfo(docID) 245 | tests := []struct { 246 | pointer string 247 | exp interface{} 248 | }{ 249 | {"/kind", "Listing"}, 250 | {"_id", docID}, 251 | } 252 | 253 | for _, test := range tests { 254 | chans := make([]chan ptrval, 0, 1) 255 | chans = append(chans, make(chan ptrval)) 256 | go processDoc(di, chans, bigInput, []string{test.pointer}, []string{}, []string{}, true) 257 | got := <-chans[0] 258 | if test.exp != got.val { 259 | t.Errorf("Expected %v for %v, got %v", 260 | test.exp, test.pointer, got.val) 261 | } 262 | } 263 | 264 | } 265 | -------------------------------------------------------------------------------- /sample.json: -------------------------------------------------------------------------------- 1 | {"kind": "Listing", "data": {"modhash": "", "children": [{"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w568e", "clicked": false, "title": "It's 103+ out, so my daughter and niece waited for our garbage men to come by so they could run out and give them some ice cold Gatorades ", "num_comments": 1232, "score": 2675, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://a.thumbs.redditmedia.com/OoulK88mGUsZMp-B.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 22897, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w568e/its_103_out_so_my_daughter_and_niece_waited_for/", "name": "t3_w568e", "created": 1341628133.0, "url": "http://imgur.com/bW1mm", "author_flair_text": null, "author": "shellykidd", "created_utc": 1341602933.0, "media": null, "num_reports": null, "ups": 25572}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w58hj", "clicked": false, "title": "My roommate after winning over $18million in the biggest buy-in poker tournament in history a few days ago.", "num_comments": 598, "score": 1827, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://f.thumbs.redditmedia.com/vZK4yn6NhliqFIrG.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 3533, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w58hj/my_roommate_after_winning_over_18million_in_the/", "name": "t3_w58hj", "created": 1341630384.0, "url": "http://i.imgur.com/qBlai.jpg", "author_flair_text": null, "author": "executiveproducer", "created_utc": 1341605184.0, "media": null, "num_reports": null, "ups": 5360}}, {"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w5aor", "clicked": false, "title": "I'm a garbage man, and this made my day!", "num_comments": 156, "score": 1555, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://e.thumbs.redditmedia.com/KC0hWtJEM6ziiCG4.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 1556, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w5aor/im_a_garbage_man_and_this_made_my_day/", "name": "t3_w5aor", "created": 1341632510.0, "url": "http://imgur.com/a/RqGYh", "author_flair_text": null, "author": "birdcanfly", "created_utc": 1341607310.0, "media": null, "num_reports": null, "ups": 3111}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w53xc", "clicked": false, "title": "The never ending story", "num_comments": 93, "score": 1728, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://a.thumbs.redditmedia.com/EO-SkS2UTxuINEgZ.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 3033, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w53xc/the_never_ending_story/", "name": "t3_w53xc", "created": 1341626000.0, "url": "http://i.imgur.com/We17d.jpg", "author_flair_text": null, "author": "BabblingCrap", "created_utc": 1341600800.0, "media": null, "num_reports": null, "ups": 4761}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w59nw", "clicked": false, "title": "So nervous I could vomit...", "num_comments": 290, "score": 972, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://c.thumbs.redditmedia.com/TyWXYiL_u6bXJVp8.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 1212, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w59nw/so_nervous_i_could_vomit/", "name": "t3_w59nw", "created": 1341631540.0, "url": "http://i.imgur.com/i5k1g.jpg", "author_flair_text": null, "author": "fknbwdwn", "created_utc": 1341606340.0, "media": null, "num_reports": null, "ups": 2184}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w52ue", "clicked": false, "title": "Grandparents' TV stand", "num_comments": 80, "score": 1304, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://c.thumbs.redditmedia.com/Gdu6Y4IjEpjxIF4J.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 766, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w52ue/grandparents_tv_stand/", "name": "t3_w52ue", "created": 1341625007.0, "url": "http://i.imgur.com/PVwS7.jpg", "author_flair_text": null, "author": "CelestialAvatar", "created_utc": 1341599807.0, "media": null, "num_reports": null, "ups": 2070}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4oq1", "clicked": false, "title": "Shot of a lifetime on the 4th of July. Perfect timing!! ", "num_comments": 899, "score": 2760, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://b.thumbs.redditmedia.com/brb289xCEnh41MuC.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 31678, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4oq1/shot_of_a_lifetime_on_the_4th_of_july_perfect/", "name": "t3_w4oq1", "created": 1341610291.0, "url": "http://i.imgur.com/9txJd.jpg", "author_flair_text": null, "author": "littlefish90", "created_utc": 1341585091.0, "media": null, "num_reports": null, "ups": 34438}}, {"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4yfo", "clicked": false, "title": "My 11 year old cousins face when he came 2nd in the National Pokemon Championships in Canada, he won himself and his dad a trip to Hawaii!", "num_comments": 308, "score": 1562, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://f.thumbs.redditmedia.com/EEegOt7uSxmwf0Kw.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 4559, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4yfo/my_11_year_old_cousins_face_when_he_came_2nd_in/", "name": "t3_w4yfo", "created": 1341620761.0, "url": "http://imgur.com/Ub094", "author_flair_text": null, "author": "sweetchilli", "created_utc": 1341595561.0, "media": null, "num_reports": null, "ups": 6121}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w5e3b", "clicked": false, "title": "Ron Perlman giving a terminally ill child his dream. It's so nice to see him go out of his way to get into character for a child's request.", "num_comments": 26, "score": 657, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://b.thumbs.redditmedia.com/09rxnolBvOFceknC.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 171, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w5e3b/ron_perlman_giving_a_terminally_ill_child_his/", "name": "t3_w5e3b", "created": 1341636072.0, "url": "http://i.imgur.com/yXUuP.jpg", "author_flair_text": null, "author": "Shiftyze", "created_utc": 1341610872.0, "media": null, "num_reports": null, "ups": 828}}, {"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4vuy", "clicked": false, "title": "You go little guy...", "num_comments": 93, "score": 1615, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://b.thumbs.redditmedia.com/T6GOYcmATXhpv2Sy.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 4098, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4vuy/you_go_little_guy/", "name": "t3_w4vuy", "created": 1341618076.0, "url": "http://www.imgur.com/xT4mp.jpeg", "author_flair_text": null, "author": "ksmith22", "created_utc": 1341592876.0, "media": null, "num_reports": null, "ups": 5713}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4qqn", "clicked": false, "title": "A clash of two worlds in London; skinheads and hippies. Circa 1969.", "num_comments": 690, "score": 1803, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://b.thumbs.redditmedia.com/Lg5SeruPj0a3dnZy.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 5903, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4qqn/a_clash_of_two_worlds_in_london_skinheads_and/", "name": "t3_w4qqn", "created": 1341612670.0, "url": "http://i.imgur.com/TcuZs.jpg", "author_flair_text": null, "author": "RarneyBubble", "created_utc": 1341587470.0, "media": null, "num_reports": null, "ups": 7706}}, {"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4qq2", "clicked": false, "title": "No Flash vs. Flash", "num_comments": 233, "score": 1742, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://c.thumbs.redditmedia.com/ooWLthXrnYWvaF-V.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 9154, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4qq2/no_flash_vs_flash/", "name": "t3_w4qq2", "created": 1341612655.0, "url": "http://imgur.com/a/JhN3l", "author_flair_text": null, "author": "nessaleigh", "created_utc": 1341587455.0, "media": null, "num_reports": null, "ups": 10896}}, {"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4q71", "clicked": false, "title": "Meanwhile in Lithuania", "num_comments": 304, "score": 1710, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://c.thumbs.redditmedia.com/Q3YogQ70Rp1CxIEb.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 2601, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4q71/meanwhile_in_lithuania/", "name": "t3_w4q71", "created": 1341612021.0, "url": "http://imgur.com/a/10wfb", "author_flair_text": null, "author": "c00lwhip", "created_utc": 1341586821.0, "media": null, "num_reports": null, "ups": 4311}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4pxv", "clicked": false, "title": "My stepson handed me playdough and said, \"Make a puppy.\" He now thinks I'm a god. [X-Post from r/Parenting]", "num_comments": 178, "score": 1671, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://d.thumbs.redditmedia.com/d53B2HCbt33qDfe9.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 6652, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4pxv/my_stepson_handed_me_playdough_and_said_make_a/", "name": "t3_w4pxv", "created": 1341611712.0, "url": "http://i.imgur.com/IJWwq.jpg", "author_flair_text": null, "author": "scruffy01", "created_utc": 1341586512.0, "media": null, "num_reports": null, "ups": 8323}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4qa5", "clicked": false, "title": "My friend Doug is running across america to raise awareness of Diabetes. He will be the first diabetic person to do this. It'd be great if we can support him! (x-post from r/diabetes)", "num_comments": 453, "score": 1567, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://d.thumbs.redditmedia.com/ye4arY4fzaACkGMW.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 6360, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4qa5/my_friend_doug_is_running_across_america_to_raise/", "name": "t3_w4qa5", "created": 1341612130.0, "url": "http://i.imgur.com/MQeJB.jpg", "author_flair_text": null, "author": "recoverelapse", "created_utc": 1341586930.0, "media": null, "num_reports": null, "ups": 7927}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w5288", "clicked": false, "title": "Ingenious ", "num_comments": 35, "score": 724, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://d.thumbs.redditmedia.com/hleJ97Ozk9E5ZPJW.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 327, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w5288/ingenious/", "name": "t3_w5288", "created": 1341624424.0, "url": "http://i.imgur.com/Rja4x.jpg", "author_flair_text": null, "author": "influenza", "created_utc": 1341599224.0, "media": null, "num_reports": null, "ups": 1051}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w55cj", "clicked": false, "title": "Reddit, it has happened. Adult sized capri suns!", "num_comments": 47, "score": 579, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://d.thumbs.redditmedia.com/M8alPpk32nx2Ulc3.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 181, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w55cj/reddit_it_has_happened_adult_sized_capri_suns/", "name": "t3_w55cj", "created": 1341627317.0, "url": "http://i.imgur.com/2goMA.jpg", "author_flair_text": null, "author": "talljewishkid", "created_utc": 1341602117.0, "media": null, "num_reports": null, "ups": 760}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4qdk", "clicked": false, "title": "Strickland moved to my town.", "num_comments": 61, "score": 1254, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://b.thumbs.redditmedia.com/gKAzS0WMZ9Y145DU.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 1146, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4qdk/strickland_moved_to_my_town/", "name": "t3_w4qdk", "created": 1341612230.0, "url": "http://i.imgur.com/hTYDB.jpg", "author_flair_text": null, "author": "RedBeardedOwl", "created_utc": 1341587030.0, "media": null, "num_reports": null, "ups": 2400}}, {"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4nyz", "clicked": false, "title": "This is a 150 million year old fossil of Sciurumimus albersdoerferi, recently discovered in a Bavarian limestone quarry.", "num_comments": 133, "score": 1346, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://f.thumbs.redditmedia.com/zqK6gMUkOGyV2GfY.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 1004, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4nyz/this_is_a_150_million_year_old_fossil_of/", "name": "t3_w4nyz", "created": 1341609405.0, "url": "http://imgur.com/7t9Nu", "author_flair_text": null, "author": "gloon", "created_utc": 1341584205.0, "media": null, "num_reports": null, "ups": 2350}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4jxa", "clicked": false, "title": "Raw Titanium", "num_comments": 237, "score": 1799, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://f.thumbs.redditmedia.com/IFqSt2IC9FEhUYAY.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 4435, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4jxa/raw_titanium/", "name": "t3_w4jxa", "created": 1341603294.0, "url": "http://i.imgur.com/EgZtw.jpg", "author_flair_text": null, "author": "Scopolamina", "created_utc": 1341578094.0, "media": null, "num_reports": null, "ups": 6234}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4lok", "clicked": false, "title": "Grafitti becomes art when the artist becomes famous enough. Our city council framed this piece behind plexiglass.", "num_comments": 166, "score": 1444, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://c.thumbs.redditmedia.com/VevdHZYhEqNBwg1h.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 1556, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4lok/grafitti_becomes_art_when_the_artist_becomes/", "name": "t3_w4lok", "created": 1341606211.0, "url": "http://i.imgur.com/XMZKa.jpg", "author_flair_text": null, "author": "Malicious78", "created_utc": 1341581011.0, "media": null, "num_reports": null, "ups": 3000}}, {"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4jnl", "clicked": false, "title": "The Dragon Gate of Harlech House, Dublin.", "num_comments": 102, "score": 1677, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://f.thumbs.redditmedia.com/HdTgBfok-QuAhMUG.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 3875, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4jnl/the_dragon_gate_of_harlech_house_dublin/", "name": "t3_w4jnl", "created": 1341602845.0, "url": "http://imgur.com/7v9WP", "author_flair_text": null, "author": "boomboomhead", "created_utc": 1341577645.0, "media": null, "num_reports": null, "ups": 5552}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4uta", "clicked": false, "title": "Found an alligator in my watch strap!", "num_comments": 23, "score": 773, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://b.thumbs.redditmedia.com/1PIgC5hSvTb0-Gsa.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 534, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4uta/found_an_alligator_in_my_watch_strap/", "name": "t3_w4uta", "created": 1341617017.0, "url": "http://i.imgur.com/ziRoX.jpg", "author_flair_text": null, "author": "remarkedvial", "created_utc": 1341591817.0, "media": null, "num_reports": null, "ups": 1307}}, {"kind": "t3", "data": {"domain": "imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4s2t", "clicked": false, "title": "\"Life, uh, finds a way.\"", "num_comments": 22, "score": 757, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://b.thumbs.redditmedia.com/5behq6xV50b_n6y7.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 263, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4s2t/life_uh_finds_a_way/", "name": "t3_w4s2t", "created": 1341614129.0, "url": "http://imgur.com/UYA2B", "author_flair_text": null, "author": "bessiemucho", "created_utc": 1341588929.0, "media": null, "num_reports": null, "ups": 1020}}, {"kind": "t3", "data": {"domain": "i.imgur.com", "banned_by": null, "media_embed": {}, "subreddit": "pics", "selftext_html": null, "selftext": "", "likes": null, "link_flair_text": null, "id": "w4nqz", "clicked": false, "title": "How I put parmesan on my food when I'm at home away from the judgmental eyes of the wait staff.", "num_comments": 72, "score": 957, "approved_by": null, "over_18": false, "hidden": false, "thumbnail": "http://a.thumbs.redditmedia.com/5ZhWE3fXdqjqaY-l.jpg", "subreddit_id": "t5_2qh0u", "edited": false, "link_flair_css_class": null, "author_flair_css_class": null, "downs": 621, "saved": false, "is_self": false, "permalink": "/r/pics/comments/w4nqz/how_i_put_parmesan_on_my_food_when_im_at_home/", "name": "t3_w4nqz", "created": 1341609102.0, "url": "http://i.imgur.com/D8CT4.gif", "author_flair_text": null, "author": "MartialFur", "created_utc": 1341583902.0, "media": null, "num_reports": null, "ups": 1578}}], "after": "t3_w4nqz", "before": null}} -------------------------------------------------------------------------------- /serieslyclient/client.go: -------------------------------------------------------------------------------- 1 | // Package serieslyclient provides access to a seriesly instance. 2 | package serieslyclient 3 | 4 | import ( 5 | "encoding/json" 6 | "net/http" 7 | "net/url" 8 | 9 | "github.com/dustin/httputil" 10 | ) 11 | 12 | // A Seriesly DB. 13 | type Seriesly struct { 14 | u *url.URL 15 | client *http.Client 16 | } 17 | 18 | // New creates a new Seriesly instance for the given URL. 19 | func New(u string) (*Seriesly, error) { 20 | return NewWithClient(u, http.DefaultClient) 21 | } 22 | 23 | // NewWithClient creates a new Seriesly instance with the specified 24 | // HTTP client. 25 | func NewWithClient(u string, client *http.Client) (*Seriesly, error) { 26 | rvu, err := url.Parse(u) 27 | if err != nil { 28 | return nil, err 29 | } 30 | return &Seriesly{rvu, client}, nil 31 | } 32 | 33 | // DBInfo represents database info. 34 | type DBInfo struct { 35 | DBName string 36 | SpaceUsed json.Number `json:"space_used"` 37 | LastSeq json.Number `json:"last_seq"` 38 | HeaderPos json.Number `json:"header_pos"` 39 | DocCount json.Number `json:"doc_count"` 40 | DeletedCount json.Number `json:"deleted_count"` 41 | Error string 42 | } 43 | 44 | // URL returns a copy of the Seriesly client's URL. 45 | func (s *Seriesly) URL() *url.URL { 46 | rv := *s.u 47 | return &rv 48 | } 49 | 50 | // List all databases. 51 | func (s *Seriesly) List() ([]string, error) { 52 | u := *s.u 53 | u.Path = "/_all_dbs" 54 | res, err := s.client.Get(u.String()) 55 | if err != nil { 56 | return nil, err 57 | } 58 | defer res.Body.Close() 59 | if res.StatusCode != 200 { 60 | return nil, httputil.HTTPError(res) 61 | } 62 | 63 | rv := []string{} 64 | err = json.NewDecoder(res.Body).Decode(&rv) 65 | return rv, err 66 | } 67 | 68 | // DB returns a SerieslyDB instance for the given DB. 69 | func (s *Seriesly) DB(db string) *SerieslyDB { 70 | return &SerieslyDB{s, db} 71 | } 72 | 73 | // Create creates a new database. 74 | func (s *Seriesly) Create(db string) error { 75 | u := *s.u 76 | u.Path = "/" + db 77 | req, err := http.NewRequest("PUT", u.String(), nil) 78 | if err != nil { 79 | return err 80 | } 81 | res, err := s.client.Do(req) 82 | if err != nil { 83 | return err 84 | } 85 | defer res.Body.Close() 86 | if res.StatusCode != 201 { 87 | return httputil.HTTPErrorf(res, "Error creating DB: %S -- %B") 88 | } 89 | return nil 90 | } 91 | 92 | // Delete destroys a database. 93 | func (s *Seriesly) Delete(db string) error { 94 | u := *s.u 95 | u.Path = "/" + db 96 | req, err := http.NewRequest("DELETE", u.String(), nil) 97 | if err != nil { 98 | return err 99 | } 100 | res, err := s.client.Do(req) 101 | if err != nil { 102 | return err 103 | } 104 | defer res.Body.Close() 105 | if res.StatusCode != 200 { 106 | return httputil.HTTPErrorf(res, "Error deleting DB: %S -- %B") 107 | } 108 | return nil 109 | } 110 | -------------------------------------------------------------------------------- /serieslyclient/db.go: -------------------------------------------------------------------------------- 1 | package serieslyclient 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | "net/http" 7 | "net/url" 8 | "strconv" 9 | 10 | "github.com/dustin/httputil" 11 | "github.com/dustin/seriesly/timelib" 12 | ) 13 | 14 | // SerieslyDB provides all the DB-specific operations. 15 | type SerieslyDB struct { 16 | s *Seriesly 17 | db string 18 | } 19 | 20 | // Name returns the name of this database. 21 | func (s *SerieslyDB) Name() string { 22 | return s.db 23 | } 24 | 25 | // URL returns a copy of the Seriesly client's URL pointed at a given DB 26 | func (s *SerieslyDB) URL() *url.URL { 27 | rv := *s.s.u 28 | rv.Path = "/" + s.db 29 | return &rv 30 | } 31 | 32 | // Info returns the info for a database. 33 | func (s *SerieslyDB) Info() (*DBInfo, error) { 34 | rv := &DBInfo{} 35 | res, err := s.s.client.Get(s.URL().String()) 36 | if err != nil { 37 | return rv, err 38 | } 39 | if res.StatusCode != 200 { 40 | return rv, httputil.HTTPError(res) 41 | } 42 | defer res.Body.Close() 43 | 44 | err = json.NewDecoder(res.Body).Decode(rv) 45 | return rv, err 46 | } 47 | 48 | // Compact the given database. 49 | func (s *SerieslyDB) Compact() error { 50 | u := s.URL().String() + "/_compact" 51 | req, err := http.NewRequest("POST", u, nil) 52 | if err != nil { 53 | return err 54 | } 55 | res, err := s.s.client.Do(req) 56 | if err != nil { 57 | return err 58 | } 59 | res.Body.Close() 60 | if res.StatusCode != 200 { 61 | return httputil.HTTPErrorf(res, "error compacting: %S -- %B") 62 | } 63 | return nil 64 | } 65 | 66 | func setTimeParam(uv url.Values, name, val string) error { 67 | if val == "" { 68 | return nil 69 | } 70 | t, err := timelib.ParseTime(val) 71 | if err != nil { 72 | return err 73 | } 74 | uv.Set(name, strconv.FormatInt(t.UnixNano(), 10)) 75 | return nil 76 | } 77 | 78 | // Dump a database or range of a database to a Writer. 79 | // 80 | // db is the name of the db to dump 81 | // from and to are both optional and will be parsed as a seriesly 82 | // timestamp. 83 | func (s *SerieslyDB) Dump(w io.Writer, from, to string) (int64, error) { 84 | u := s.URL() 85 | u.Path += "/_dump" 86 | params := url.Values{} 87 | if err := setTimeParam(params, "from", from); err != nil { 88 | return 0, err 89 | } 90 | if err := setTimeParam(params, "to", to); err != nil { 91 | return 0, err 92 | } 93 | 94 | u.RawQuery = params.Encode() 95 | 96 | res, err := s.s.client.Get(u.String()) 97 | if err != nil { 98 | return 0, err 99 | } 100 | defer res.Body.Close() 101 | 102 | if res.StatusCode != 200 { 103 | return 0, httputil.HTTPError(res) 104 | } 105 | 106 | return io.Copy(w, res.Body) 107 | } 108 | -------------------------------------------------------------------------------- /serieslyclient/query.go: -------------------------------------------------------------------------------- 1 | package serieslyclient 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | "strconv" 7 | "time" 8 | ) 9 | 10 | // Field represents a JSON pointer field and reducer for a query. 11 | type Field struct { 12 | Pointer, Reducer string 13 | } 14 | 15 | // Filter represents an exact-match in a query. 16 | type Filter struct { 17 | Pointer, Match string 18 | } 19 | 20 | // Query represents a seriesly query. 21 | type Query struct { 22 | From, To time.Time 23 | Group time.Duration 24 | Fields []Field 25 | Filters []Filter 26 | } 27 | 28 | func (q *Query) validate() error { 29 | if q.Group < 1 { 30 | return fmt.Errorf("Grouping value must be >0, was %v", q.Group) 31 | } 32 | if len(q.Fields) == 0 { 33 | return fmt.Errorf("Need at least one field, have %v", 34 | len(q.Fields)) 35 | } 36 | for _, f := range q.Fields { 37 | if !validReducer(f.Reducer) { 38 | return fmt.Errorf("Invalid reducer: %v", f.Reducer) 39 | } 40 | } 41 | return nil 42 | } 43 | 44 | func validReducer(r string) bool { 45 | return true 46 | } 47 | 48 | // Params converts this Query to query parameters. 49 | func (q *Query) Params() url.Values { 50 | rv := url.Values{} 51 | rv.Set("group", strconv.FormatUint(uint64(q.Group/time.Millisecond), 10)) 52 | if !q.From.IsZero() { 53 | rv.Set("from", q.From.Format(time.RFC3339Nano)) 54 | } 55 | if !q.To.IsZero() { 56 | rv.Set("to", q.To.Format(time.RFC3339Nano)) 57 | } 58 | for _, f := range q.Fields { 59 | rv["ptr"] = append(rv["ptr"], f.Pointer) 60 | rv["reducer"] = append(rv["reducer"], f.Reducer) 61 | } 62 | for _, f := range q.Filters { 63 | rv["f"] = append(rv["f"], f.Pointer) 64 | rv["fv"] = append(rv["fv"], f.Match) 65 | } 66 | return rv 67 | } 68 | -------------------------------------------------------------------------------- /timelib/time.go: -------------------------------------------------------------------------------- 1 | // Package timelib provides time parsing functions. 2 | package timelib 3 | 4 | import ( 5 | "errors" 6 | "fmt" 7 | "math" 8 | "strconv" 9 | "time" 10 | ) 11 | 12 | var timeFormats = []string{ 13 | time.RFC3339Nano, 14 | time.RFC3339, 15 | time.RFC1123Z, 16 | time.RFC1123, 17 | time.UnixDate, 18 | time.ANSIC, 19 | time.RubyDate, 20 | "2006-01-02T15:04", 21 | "2006-01-02T15", 22 | "2006-01-02", 23 | "2006-01", 24 | "2006", 25 | } 26 | 27 | var errUnparseableTimestamp = errors.New("unparsable timestamp") 28 | 29 | var powTable = []int{ 30 | 10e8, 31 | 10e7, 32 | 10e6, 33 | 10e5, 34 | 10e4, 35 | 10e3, 36 | 10e2, 37 | 10e1, 38 | 10, 39 | 1, 40 | } 41 | 42 | // ParseCanonicalTime parses the canonical seriesly time format. 43 | // 44 | // This is a hand crafted parser since it's a really common path and I 45 | // could make it faster this way. 46 | func ParseCanonicalTime(in string) (time.Time, error) { 47 | if len(in) < 20 || in[len(in)-1] != 'Z' { 48 | return time.Time{}, errUnparseableTimestamp 49 | } 50 | 51 | if !(in[4] == '-' && in[7] == '-' && in[10] == 'T' && 52 | in[13] == ':' && in[16] == ':' && (in[19] == '.' || in[19] == 'Z')) { 53 | return time.Time{}, fmt.Errorf("positionally incorrect: %v", in) 54 | } 55 | 56 | // 2012-08-28T21:24:35.37465188Z 57 | // 4 7 10 13 16 19 58 | // ----------------------------- 59 | // 0-4 5 8 11 14 17 20 60 | 61 | year, err := strconv.Atoi(in[0:4]) 62 | if err != nil { 63 | return time.Time{}, fmt.Errorf("error parsing year: %v", err) 64 | } 65 | 66 | month, err := strconv.Atoi(in[5:7]) 67 | if err != nil { 68 | return time.Time{}, fmt.Errorf("error parsing month: %v", err) 69 | } 70 | 71 | day, err := strconv.Atoi(in[8:10]) 72 | if err != nil { 73 | return time.Time{}, fmt.Errorf("error parsing day: %v", err) 74 | } 75 | 76 | hour, err := strconv.Atoi(in[11:13]) 77 | if err != nil { 78 | return time.Time{}, fmt.Errorf("error parsing hour: %v", err) 79 | } 80 | 81 | minute, err := strconv.Atoi(in[14:16]) 82 | if err != nil { 83 | return time.Time{}, fmt.Errorf("error parsing minute: %v", err) 84 | } 85 | 86 | second, err := strconv.Atoi(in[17:19]) 87 | if err != nil { 88 | return time.Time{}, fmt.Errorf("error parsing second: %v", err) 89 | } 90 | 91 | var nsecstr string 92 | if in[19] != 'Z' { 93 | nsecstr = in[20 : len(in)-1] 94 | } 95 | var nsec int 96 | 97 | if nsecstr != "" { 98 | nsec, err = strconv.Atoi(nsecstr) 99 | if err != nil { 100 | return time.Time{}, fmt.Errorf("error parsing nanoseconds: %v", err) 101 | } 102 | } 103 | 104 | nsec *= powTable[len(nsecstr)] 105 | 106 | return time.Date(year, time.Month(month), day, 107 | hour, minute, second, nsec, time.UTC), nil 108 | } 109 | 110 | // ParseTime parses any time format supported by seriesly quickly and 111 | // easily. 112 | // 113 | // The following timestamps are supported: 114 | // 115 | // 2012-08-28T21:24:35.37465188Z - RFC3339 (this is the canonical format) 116 | // 1346189075374651880 - nanoseconds since 1970-1-1 117 | // 1346189075374 - milliseconds since 1970-1-1, common in java 118 | // 1346189075 - seconds since 1970-1-1, common in unix 119 | // 2012-08-28T21:24:35Z - RFC3339 120 | // Tue, 28 Aug 2012 21:24:35 +0000 - RFC1123 + numeric timezone 121 | // Tue, 28 Aug 2012 21:24:35 UTC RFC1123 122 | // Tue Aug 28 21:24:35 UTC 2012 - Unix date 123 | // Tue Aug 28 21:24:35 2012 - ansi C timestamp 124 | // Tue Aug 28 21:24:35 +0000 2012 - ruby datestamp 125 | // 2012-08-28T21:24 - parsed as 2012-08-28T21:24:00Z 126 | // 2012-08-28T21 - parsed as 2012-08-28T21:00:00Z 127 | // 2012-08-28 - parsed as 2012-08-28T00:00:00Z 128 | // 2012-08 - parsed as 2012-08-01T00:00:00Z 129 | // 2012 - parsed as 2012-01-01T00:00:00Z 130 | func ParseTime(in string) (time.Time, error) { 131 | // First, try a few numerics 132 | n, err := strconv.ParseInt(in, 10, 64) 133 | if err == nil { 134 | switch { 135 | case n > int64(math.MaxInt32)*1000: 136 | // nanosecond timestamps 137 | return time.Unix(n/1e9, n%1e9), nil 138 | case n > int64(math.MaxInt32): 139 | // millisecond timestamps 140 | return time.Unix(n/1000, (n%1000)*1e6), nil 141 | case n > 10000: 142 | // second timestamps 143 | return time.Unix(n, 0), nil 144 | } 145 | } 146 | rv, err := ParseCanonicalTime(in) 147 | if err == nil { 148 | return rv, nil 149 | } 150 | for _, f := range timeFormats { 151 | parsed, err := time.Parse(f, in) 152 | if err == nil { 153 | return parsed, nil 154 | } 155 | } 156 | return time.Time{}, errUnparseableTimestamp 157 | } 158 | -------------------------------------------------------------------------------- /timelib/time_test.go: -------------------------------------------------------------------------------- 1 | package timelib 2 | 3 | import ( 4 | "log" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | const exampleTimeString = "2012-08-28T21:24:35.37465188Z" 10 | const milliAccuracy = "2012-08-28T21:24:35.374Z" 11 | const secondAccuracy = "2012-08-28T21:24:35Z" 12 | 13 | var exampleTime time.Time 14 | 15 | func init() { 16 | var err error 17 | exampleTime, err = time.Parse(time.RFC3339, exampleTimeString) 18 | if err != nil { 19 | panic(err) 20 | } 21 | if exampleTimeString != exampleTime.UTC().Format(time.RFC3339Nano) { 22 | log.Panicf("Expected %v, got %v", exampleTimeString, 23 | exampleTime.UTC().Format(time.RFC3339Nano)) 24 | } 25 | } 26 | 27 | func TestTimeParsing(t *testing.T) { 28 | tests := []struct { 29 | input string 30 | exp string 31 | }{ 32 | {"1346189075374651880", exampleTimeString}, 33 | {"1346189075374", milliAccuracy}, 34 | {"1346189075", secondAccuracy}, 35 | {"2012-08-28T21:24:35.37465188Z", exampleTimeString}, 36 | {secondAccuracy, secondAccuracy}, 37 | {"Tue, 28 Aug 2012 21:24:35 +0000", secondAccuracy}, 38 | {"Tue, 28 Aug 2012 21:24:35 UTC", secondAccuracy}, 39 | {"Tue Aug 28 21:24:35 UTC 2012", secondAccuracy}, 40 | {"Tue Aug 28 21:24:35 2012", secondAccuracy}, 41 | {"Tue Aug 28 21:24:35 +0000 2012", secondAccuracy}, 42 | {"2012-08-28T21:24", "2012-08-28T21:24:00Z"}, 43 | {"2012-08-28T21", "2012-08-28T21:00:00Z"}, 44 | {"2012-08-28", "2012-08-28T00:00:00Z"}, 45 | {"2012-08", "2012-08-01T00:00:00Z"}, 46 | {"2012", "2012-01-01T00:00:00Z"}, 47 | } 48 | 49 | for _, x := range tests { 50 | tm, err := ParseTime(x.input) 51 | if err != nil { 52 | t.Errorf("Error on %v - %v", x.input, err) 53 | t.Fail() 54 | } 55 | got := tm.UTC().Format(time.RFC3339Nano) 56 | if x.exp != got { 57 | t.Errorf("Expected %v for %v, got %v", x.exp, x.input, got) 58 | t.Fail() 59 | } 60 | } 61 | } 62 | 63 | func TestCanonicalParser(t *testing.T) { 64 | tests := []struct { 65 | input string 66 | exp string 67 | }{ 68 | {"2012-08-28T21:24:35.374651883Z", ""}, 69 | {"2012-08-28T21:24:35.37465188Z", ""}, 70 | {"2012-08-28T21:24:35.3746518Z", ""}, 71 | {"2012-08-28T21:24:35.374651Z", ""}, 72 | {"2012-08-28T21:24:35.37465Z", ""}, 73 | {"2012-08-28T21:24:35.3746Z", ""}, 74 | {"2012-08-28T21:24:35.374Z", ""}, 75 | {"2012-08-28T21:24:35.37Z", ""}, 76 | {"2012-08-28T21:24:35.3Z", ""}, 77 | {"2012-08-28T21:24:35.0Z", "2012-08-28T21:24:35Z"}, 78 | {"2012-08-28T21:24:35.Z", "2012-08-28T21:24:35Z"}, 79 | {"2012-08-28T21:24:35Z", ""}, 80 | } 81 | 82 | for _, x := range tests { 83 | tm, err := ParseCanonicalTime(x.input) 84 | if err != nil { 85 | t.Errorf("Error on %v - %v", x.input, err) 86 | t.Fail() 87 | } 88 | got := tm.UTC().Format(time.RFC3339Nano) 89 | exp := x.exp 90 | if exp == "" { 91 | exp = x.input 92 | } 93 | if exp != got { 94 | t.Errorf("Expected %v for %v, got %v", x.exp, x.input, got) 95 | t.Fail() 96 | } 97 | } 98 | } 99 | 100 | func TestCanonicalParsingErrors(t *testing.T) { 101 | tests := []string{ 102 | "ZZZZZZZZZZZZZZZZZZZZ", 103 | "ZZZZ-ZZ-ZZTZZ:ZZ:ZZZ", 104 | "2014-ZZ-ZZTZZ:ZZ:ZZZ", 105 | "2014-03-ZZTZZ:ZZ:ZZZ", 106 | "2014-03-14TZZ:ZZ:ZZZ", 107 | "2014-03-14T15:ZZ:ZZZ", 108 | "2014-03-14T15:09:ZZZ", 109 | "2014-03-14T15:09:26.S35897Z", 110 | } 111 | 112 | for _, test := range tests { 113 | tm, err := ParseCanonicalTime(test) 114 | if err == nil { 115 | t.Errorf("No error on %q, got %v", test, tm) 116 | } 117 | } 118 | } 119 | 120 | func TestUnparseable(t *testing.T) { 121 | tm, err := ParseTime("an hour ago") 122 | if err != errUnparseableTimestamp { 123 | t.Fatalf("Expected unparseable, got %v/%v", tm, err) 124 | } 125 | } 126 | 127 | func benchTimeParsing(b *testing.B, input string) { 128 | for i := 0; i < b.N; i++ { 129 | _, err := ParseTime(input) 130 | if err != nil { 131 | b.Fatalf("Error on %v - %v", input, err) 132 | } 133 | } 134 | } 135 | 136 | func BenchmarkParseTimeCanonicalDirect(b *testing.B) { 137 | input := "2012-08-28T21:24:35.37465188Z" 138 | for i := 0; i < b.N; i++ { 139 | _, err := ParseCanonicalTime(input) 140 | if err != nil { 141 | b.Fatalf("Error on %v - %v", input, err) 142 | } 143 | } 144 | } 145 | 146 | func BenchmarkParseTimeCanonicalStdlib(b *testing.B) { 147 | input := "2012-08-28T21:24:35.37465188Z" 148 | for i := 0; i < b.N; i++ { 149 | _, err := time.Parse(time.RFC3339, input) 150 | if err != nil { 151 | b.Fatalf("Error on %v - %v", input, err) 152 | } 153 | } 154 | } 155 | 156 | func BenchmarkParseTimeCanonical(b *testing.B) { 157 | benchTimeParsing(b, "2012-08-28T21:24:35.37465188Z") 158 | } 159 | 160 | func BenchmarkParseTimeMisc(b *testing.B) { 161 | benchTimeParsing(b, "Tue, 28 Aug 2012 21:24:35 +0000") 162 | } 163 | 164 | func BenchmarkParseTimeIntNano(b *testing.B) { 165 | benchTimeParsing(b, "1346189075374651880") 166 | } 167 | 168 | func BenchmarkParseTimeIntMillis(b *testing.B) { 169 | benchTimeParsing(b, "1346189075374") 170 | } 171 | 172 | func BenchmarkParseTimeIntSecs(b *testing.B) { 173 | benchTimeParsing(b, "1346189075") 174 | } 175 | -------------------------------------------------------------------------------- /tools/compact/compact.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "log" 6 | "sync" 7 | "time" 8 | 9 | "github.com/dustin/seriesly/serieslyclient" 10 | ) 11 | 12 | var verbose = flag.Bool("v", false, "verbosity") 13 | var noop = flag.Bool("n", false, "don't actually compact") 14 | var concurrency = flag.Int("j", 2, 15 | "number of concurrent compactions") 16 | 17 | func init() { 18 | log.SetFlags(log.Lmicroseconds) 19 | } 20 | 21 | func maybeFatal(err error, fmt string, args ...interface{}) { 22 | if err != nil { 23 | log.Fatalf(fmt, args...) 24 | } 25 | } 26 | 27 | func vlog(s string, a ...interface{}) { 28 | if *verbose { 29 | log.Printf(s, a...) 30 | } 31 | } 32 | 33 | func compact(wg *sync.WaitGroup, s *serieslyclient.Seriesly, ch <-chan string) { 34 | defer wg.Done() 35 | 36 | for db := range ch { 37 | start := time.Now() 38 | vlog("Compacting %v", db) 39 | if !*noop { 40 | s.DB(db).Compact() 41 | } 42 | vlog("Finished compacting %v in %v", db, time.Since(start)) 43 | } 44 | } 45 | 46 | func main() { 47 | flag.Parse() 48 | 49 | if flag.NArg() == 0 { 50 | log.Fatalf("Seriesly URL required") 51 | } 52 | 53 | s, err := serieslyclient.New(flag.Arg(0)) 54 | maybeFatal(err, "Parsing %v: %v", flag.Arg(0), err) 55 | 56 | wg := &sync.WaitGroup{} 57 | ch := make(chan string) 58 | 59 | for i := 0; i < *concurrency; i++ { 60 | wg.Add(1) 61 | go compact(wg, s, ch) 62 | } 63 | 64 | dbs := flag.Args()[1:] 65 | if len(dbs) == 0 { 66 | dbs, err = s.List() 67 | maybeFatal(err, "Error listing DBs: %v", err) 68 | } 69 | 70 | for _, db := range dbs { 71 | ch <- db 72 | } 73 | close(ch) 74 | 75 | wg.Wait() 76 | } 77 | -------------------------------------------------------------------------------- /tools/dump/dump.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "compress/gzip" 5 | "flag" 6 | "io" 7 | "log" 8 | "os" 9 | "sync" 10 | "syscall" 11 | "time" 12 | 13 | "github.com/dustin/go-humanize" 14 | "github.com/dustin/httputil" 15 | "github.com/dustin/seriesly/serieslyclient" 16 | ) 17 | 18 | var ( 19 | verbose = flag.Bool("v", false, "verbosity") 20 | concurrency = flag.Int("j", 2, "number of concurrent dumps") 21 | dbName = flag.String("db", "", "which db to dump (default: all)") 22 | noop = flag.Bool("n", false, "if true, don't actually write dumps") 23 | from = flag.String("from", "", "oldest key to dump") 24 | to = flag.String("to", "", "newest key to dump") 25 | formatStr = flag.String("format", "%n.json.gz", "dump name format") 26 | ) 27 | 28 | const sigInfo = syscall.Signal(29) 29 | 30 | func init() { 31 | log.SetFlags(log.Lmicroseconds) 32 | } 33 | 34 | func maybeFatal(err error, fmt string, args ...interface{}) { 35 | if err != nil { 36 | log.Fatalf(fmt, args...) 37 | } 38 | } 39 | 40 | func vlog(s string, a ...interface{}) { 41 | if *verbose { 42 | log.Printf(s, a...) 43 | } 44 | } 45 | 46 | func compress(w io.Writer) io.WriteCloser { 47 | z, err := gzip.NewWriterLevel(w, gzip.BestCompression) 48 | maybeFatal(err, "NewWriterLevel: %v", err) 49 | return z 50 | } 51 | 52 | func dumpOne(s *serieslyclient.SerieslyDB, t time.Time) (int64, error) { 53 | fn := format(*formatStr, s.Name(), t) 54 | outf, err := os.Create(fn) 55 | if err != nil { 56 | return 0, err 57 | } 58 | defer outf.Close() 59 | 60 | z := compress(outf) 61 | defer z.Close() 62 | 63 | return s.Dump(z, *from, *to) 64 | } 65 | 66 | func dump(wg *sync.WaitGroup, s *serieslyclient.Seriesly, ch <-chan string) { 67 | defer wg.Done() 68 | 69 | t := time.Now() 70 | for db := range ch { 71 | start := time.Now() 72 | vlog("Dumping %v", db) 73 | n, err := dumpOne(s.DB(db), t) 74 | maybeFatal(err, "Error dumping %v: %v", db, err) 75 | 76 | if !*noop { 77 | vlog("Dumped %v of %v in %v", 78 | humanize.Bytes(uint64(n)), db, time.Since(start)) 79 | } 80 | } 81 | } 82 | 83 | func main() { 84 | flag.Parse() 85 | 86 | httputil.InitHTTPTracker(false) 87 | 88 | if flag.NArg() == 0 { 89 | log.Fatalf("Seriesly URL required") 90 | } 91 | 92 | s, err := serieslyclient.New(flag.Arg(0)) 93 | maybeFatal(err, "Parsing %v: %v", flag.Arg(0), err) 94 | 95 | wg := &sync.WaitGroup{} 96 | ch := make(chan string) 97 | 98 | for i := 0; i < *concurrency; i++ { 99 | wg.Add(1) 100 | go dump(wg, s, ch) 101 | } 102 | 103 | if *dbName == "" { 104 | dbs, err := s.List() 105 | maybeFatal(err, "Error listing: %v", err) 106 | for _, db := range dbs { 107 | ch <- db 108 | } 109 | } else { 110 | ch <- *dbName 111 | } 112 | close(ch) 113 | 114 | wg.Wait() 115 | } 116 | -------------------------------------------------------------------------------- /tools/dump/format.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "strconv" 6 | "strings" 7 | "text/scanner" 8 | "time" 9 | ) 10 | 11 | func twodig(s string) string { 12 | if len(s) == 1 { 13 | return "0" + s 14 | } 15 | return s 16 | } 17 | 18 | func format(str, dbname string, tm time.Time) string { 19 | buf := bytes.Buffer{} 20 | 21 | var s scanner.Scanner 22 | s.Init(strings.NewReader(str)) 23 | s.Mode = 0 24 | s.Whitespace = 0 25 | for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() { 26 | if tok != '%' { 27 | buf.WriteRune(tok) 28 | continue 29 | } 30 | 31 | switch s := s.Scan(); s { 32 | case '%': 33 | buf.WriteRune('%') 34 | case 'n': 35 | buf.WriteString(dbname) 36 | case 'Y', 'y': 37 | buf.WriteString(strconv.Itoa(tm.Year())) 38 | case 'm': 39 | buf.WriteString(strconv.Itoa(int(tm.Month()))) 40 | case 'd': 41 | buf.WriteString(strconv.Itoa(tm.Day())) 42 | case 'H': 43 | buf.WriteString(twodig(strconv.Itoa(tm.Hour()))) 44 | case 'M': 45 | buf.WriteString(twodig(strconv.Itoa(tm.Minute()))) 46 | case 'S': 47 | buf.WriteString(twodig(strconv.Itoa(tm.Second()))) 48 | } 49 | } 50 | 51 | return buf.String() 52 | } 53 | -------------------------------------------------------------------------------- /tools/dump/format_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | var testTime = time.Date(2014, 3, 16, 23, 9, 11, 82859, time.UTC) 9 | 10 | func TestFormats(t *testing.T) { 11 | tests := map[string]string{ 12 | "": "", 13 | "plain": "plain", 14 | "per%%cent": "per%cent", 15 | "%n-thing": "dbname-thing", 16 | "dump/%y/%m/%d-%H-%M-%S-%n": "dump/2014/3/16-23-09-11-dbname", 17 | } 18 | 19 | for in, exp := range tests { 20 | got := format(in, "dbname", testTime) 21 | if got != exp { 22 | t.Errorf("Failed on %q, got %q, expected %q", in, got, exp) 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /tools/load/load.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "flag" 7 | "io" 8 | "log" 9 | "net/http" 10 | "os" 11 | "time" 12 | 13 | "github.com/dustin/httputil" 14 | "github.com/dustin/seriesly/timelib" 15 | ) 16 | 17 | var min = flag.String("min", "", "minimum timestamp (RFC3339)") 18 | 19 | func maybeFatal(err error) { 20 | if err != nil { 21 | log.Fatal(err) 22 | } 23 | } 24 | 25 | func setupDb(u string) { 26 | req, err := http.NewRequest("PUT", u, nil) 27 | maybeFatal(err) 28 | res, err := http.DefaultClient.Do(req) 29 | maybeFatal(err) 30 | res.Body.Close() 31 | } 32 | 33 | func sendOne(u, k string, body []byte) { 34 | resp, err := http.DefaultClient.Post(u+"?ts="+k, 35 | "application/json", bytes.NewReader(body)) 36 | maybeFatal(err) 37 | defer resp.Body.Close() 38 | if resp.StatusCode >= 300 || resp.StatusCode < 200 { 39 | log.Fatal(httputil.HTTPErrorf(resp, "error on %v: %S -- %B", k)) 40 | } 41 | } 42 | 43 | func parseMinTime() time.Time { 44 | tm, err := timelib.ParseTime(*min) 45 | if err != nil { 46 | tm = time.Time{} 47 | } 48 | return tm 49 | } 50 | 51 | func main() { 52 | flag.Parse() 53 | 54 | if flag.NArg() < 1 { 55 | log.Fatalf("Usage: gzip -dc backup.gz | %v http://seriesly:3133/dbname", 56 | os.Args[0]) 57 | } 58 | 59 | httputil.InitHTTPTracker(false) 60 | 61 | u := flag.Arg(0) 62 | setupDb(u) 63 | 64 | minTime := parseMinTime() 65 | 66 | t := time.Tick(5 * time.Second) 67 | i := 0 68 | var latestKey string 69 | 70 | d := json.NewDecoder(os.Stdin) 71 | for { 72 | kv := map[string]*json.RawMessage{} 73 | 74 | err := d.Decode(&kv) 75 | if err == io.EOF { 76 | log.Printf("Done! Processed %v items. Last was %v", 77 | i, latestKey) 78 | break 79 | } 80 | maybeFatal(err) 81 | 82 | for k, v := range kv { 83 | if !minTime.IsZero() { 84 | thist, err := timelib.ParseTime(k) 85 | if err == nil && minTime.After(thist) { 86 | continue 87 | } 88 | } 89 | body := []byte(*v) 90 | sendOne(u, k, body) 91 | latestKey = k 92 | } 93 | 94 | i++ 95 | select { 96 | case <-t: 97 | log.Printf("Processed %v items, latest was %v", i, latestKey) 98 | default: 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /tools/sample/sample.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "log" 9 | "net/http" 10 | "net/url" 11 | "os" 12 | "strconv" 13 | "time" 14 | ) 15 | 16 | var ( 17 | pollFreq = flag.Duration("freq", 5*time.Second, 18 | "How often to poll URLs (0 == one-shot)") 19 | responseTimeout = flag.Duration("hdrtimeout", 500*time.Millisecond, 20 | "HTTP response header timeout") 21 | verbose = flag.Bool("v", false, "verbose logging") 22 | ) 23 | 24 | func init() { 25 | flag.Usage = func() { 26 | fmt.Fprintf(os.Stderr, "Usage: %v [flags] fromurl tourl\n", os.Args[0]) 27 | fmt.Fprintf(os.Stderr, "Flags:\n") 28 | flag.PrintDefaults() 29 | } 30 | } 31 | 32 | func httpCopy(dest, src string) error { 33 | sres, err := http.Get(src) 34 | if err != nil { 35 | return err 36 | } 37 | defer sres.Body.Close() 38 | if sres.StatusCode != 200 { 39 | return fmt.Errorf("HTTP error getting src data from %v: %v", src, sres.Status) 40 | } 41 | 42 | dres, err := http.Post(dest, sres.Header.Get("Content-Type"), sres.Body) 43 | if err != nil { 44 | return err 45 | } 46 | defer dres.Body.Close() 47 | 48 | if dres.StatusCode != 201 { 49 | errmsg, _ := ioutil.ReadAll(io.LimitReader(dres.Body, 512)) 50 | return fmt.Errorf("HTTP Error posting result to %v: %v\n%s", 51 | dest, dres.StatusCode, errmsg) 52 | } 53 | return nil 54 | } 55 | 56 | func poll(tourl, fromurl string, t time.Time) { 57 | if *verbose { 58 | log.Printf("Copying from %v to %v", fromurl, tourl) 59 | defer func() { log.Printf("Finished copy in %v", time.Since(t)) }() 60 | } 61 | du := mustParseURL(tourl) 62 | du.RawQuery = "ts=" + strconv.FormatInt(t.UnixNano(), 10) 63 | 64 | if err := httpCopy(du.String(), fromurl); err != nil { 65 | log.Printf("Error copying data: %v", err) 66 | } 67 | } 68 | 69 | func mustParseURL(ustr string) *url.URL { 70 | u, e := url.Parse(ustr) 71 | if e != nil { 72 | log.Fatalf("Error parsing URL %q: %v", ustr, e) 73 | } 74 | return u 75 | } 76 | 77 | func initHTTP() { 78 | http.DefaultClient = &http.Client{ 79 | Transport: &http.Transport{ 80 | DisableKeepAlives: true, 81 | ResponseHeaderTimeout: *responseTimeout, 82 | }, 83 | } 84 | } 85 | 86 | func main() { 87 | flag.Parse() 88 | 89 | if flag.NArg() < 2 { 90 | flag.Usage() 91 | os.Exit(64) 92 | } 93 | 94 | fromurl, tourl := flag.Arg(0), flag.Arg(1) 95 | 96 | initHTTP() 97 | 98 | poll(tourl, fromurl, time.Now()) 99 | if *pollFreq > 0 { 100 | for t := range time.Tick(*pollFreq) { 101 | poll(tourl, fromurl, t) 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /tools/serieslyinfo/serieslyinfo.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "flag" 6 | "fmt" 7 | "html/template" 8 | "io/ioutil" 9 | "log" 10 | "os" 11 | "text/tabwriter" 12 | 13 | "github.com/dustin/go-humanize" 14 | "github.com/dustin/seriesly/serieslyclient" 15 | ) 16 | 17 | var ( 18 | verbose = flag.Bool("v", false, "verbose") 19 | short = flag.Bool("short", false, "short form") 20 | templateSrc = flag.String("t", "", "Display template") 21 | templateFile = flag.String("T", "", "Display template filename") 22 | ) 23 | 24 | const defaultTemplate = `{{range .}} 25 | {{.DBName}}: 26 | Space Used: {{.SpaceUsed|bytes}} 27 | Last Sequence: {{.SpaceUsed|comma}} 28 | Header Position: {{.HeaderPos|comma}} 29 | Document Count: {{.DocCount|comma}} 30 | Deleted Count: {{.DeletedCount|comma}} 31 | {{end}} 32 | ` 33 | 34 | const shortTemplate = `dbname docs space used 35 | ------ ---- ---------- 36 | {{range .}}{{.DBName}} {{.DocCount|comma}} {{.SpaceUsed|bytes}} 37 | {{end}}` 38 | 39 | var funcMap = template.FuncMap{ 40 | "comma": func(n json.Number) string { 41 | nint, err := n.Int64() 42 | maybeFatal(err, "Invalid int64: %v: %v", n, err) 43 | return humanize.Comma(nint) 44 | }, 45 | "bytes": func(n json.Number) string { 46 | nint, err := n.Int64() 47 | maybeFatal(err, "Invalid int64: %v: %v", n, err) 48 | return humanize.Bytes(uint64(nint)) 49 | }} 50 | 51 | func init() { 52 | log.SetFlags(log.Lmicroseconds) 53 | flag.Usage = func() { 54 | fmt.Fprintf(os.Stderr, "Usage: %v [-v] http://serieslyhost:3133/ [dbnames...]\n", 55 | os.Args[0]) 56 | flag.PrintDefaults() 57 | } 58 | } 59 | 60 | func maybeFatal(err error, fmt string, args ...interface{}) { 61 | if err != nil { 62 | log.Fatalf(fmt, args...) 63 | } 64 | } 65 | 66 | func vlog(s string, a ...interface{}) { 67 | if *verbose { 68 | log.Printf(s, a...) 69 | } 70 | } 71 | 72 | func describe(s *serieslyclient.Seriesly, dbs ...string) <-chan *serieslyclient.DBInfo { 73 | rv := make(chan *serieslyclient.DBInfo) 74 | go func() { 75 | defer close(rv) 76 | for _, db := range dbs { 77 | di, err := s.DB(db).Info() 78 | maybeFatal(err, "Couldn't fetch info for %v: %v", db, err) 79 | di.DBName = db 80 | rv <- di 81 | } 82 | }() 83 | return rv 84 | } 85 | 86 | func getTemplate(tdefault string) *template.Template { 87 | tmplstr := *templateSrc 88 | if tmplstr == "" { 89 | switch *templateFile { 90 | case "": 91 | tmplstr = tdefault 92 | case "-": 93 | td, err := ioutil.ReadAll(os.Stdin) 94 | maybeFatal(err, "Error reading template from stdin: %v", err) 95 | tmplstr = string(td) 96 | default: 97 | td, err := ioutil.ReadFile(*templateFile) 98 | maybeFatal(err, "Error reading template file: %v", err) 99 | tmplstr = string(td) 100 | } 101 | } 102 | 103 | tmpl, err := template.New("").Funcs(funcMap).Parse(tmplstr) 104 | maybeFatal(err, "Error parsing template: %v", err) 105 | return tmpl 106 | } 107 | 108 | func main() { 109 | flag.Parse() 110 | 111 | if flag.NArg() < 1 { 112 | flag.Usage() 113 | os.Exit(64) 114 | } 115 | 116 | s, err := serieslyclient.New(flag.Arg(0)) 117 | maybeFatal(err, "Couldn't set up client: %v", err) 118 | 119 | dbs := flag.Args()[1:] 120 | if len(dbs) == 0 { 121 | dbs, err = s.List() 122 | maybeFatal(err, "Error listing DBs: %v", err) 123 | } 124 | 125 | tsrc := defaultTemplate 126 | if *short { 127 | tsrc = shortTemplate 128 | } 129 | 130 | tmpl := getTemplate(tsrc) 131 | 132 | tw := tabwriter.NewWriter(os.Stdout, 8, 8, 2, ' ', 0) 133 | tmpl.Execute(tw, describe(s, dbs...)) 134 | defer tw.Flush() 135 | } 136 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "reflect" 5 | ) 6 | 7 | // Thanks to remy_o in #go-nuts for this. 8 | func closeAll(chans interface{}) { 9 | v := reflect.ValueOf(chans) 10 | for i, imax := 0, v.Len(); i < imax; i++ { 11 | v.Index(i).Close() 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestCloseAllEmpty(t *testing.T) { 8 | closeAll([]chan int{}) 9 | } 10 | 11 | func TestCloseAllTwo(t *testing.T) { 12 | ichan := make(chan int) 13 | i2chan := make(chan int) 14 | closeAll([]chan int{ichan, i2chan}) 15 | _, ok := <-ichan 16 | if ok { 17 | t.Errorf("ichan wasn't closed") 18 | } 19 | _, ok = <-i2chan 20 | if ok { 21 | t.Errorf("bchan wasn't closed") 22 | } 23 | } 24 | 25 | func buildTestChans(n int) []chan int { 26 | s := make([]chan int, 0, n) 27 | for i := 0; i < n; i++ { 28 | s = append(s, make(chan int)) 29 | } 30 | return s 31 | } 32 | 33 | func runBenchmarkCloseRange(b *testing.B, n int) { 34 | b.StopTimer() 35 | for i := 0; i < b.N; i++ { 36 | s := buildTestChans(n) 37 | 38 | b.StartTimer() 39 | for i := range s { 40 | close(s[i]) 41 | } 42 | b.StopTimer() 43 | } 44 | } 45 | 46 | func runBenchmarkCloseAll(b *testing.B, n int) { 47 | b.StopTimer() 48 | for i := 0; i < b.N; i++ { 49 | s := buildTestChans(n) 50 | 51 | b.StartTimer() 52 | closeAll(s) 53 | b.StopTimer() 54 | } 55 | } 56 | 57 | func BenchmarkCloseRange1(b *testing.B) { 58 | runBenchmarkCloseRange(b, 1) 59 | } 60 | 61 | func BenchmarkCloseRange3(b *testing.B) { 62 | runBenchmarkCloseRange(b, 3) 63 | } 64 | 65 | func BenchmarkCloseRange1000(b *testing.B) { 66 | runBenchmarkCloseRange(b, 1000) 67 | } 68 | 69 | func BenchmarkCloseAll1(b *testing.B) { 70 | runBenchmarkCloseAll(b, 1) 71 | } 72 | 73 | func BenchmarkCloseAll3(b *testing.B) { 74 | runBenchmarkCloseAll(b, 3) 75 | } 76 | 77 | func BenchmarkCloseAll1000(b *testing.B) { 78 | runBenchmarkCloseAll(b, 1000) 79 | } 80 | --------------------------------------------------------------------------------