├── LICENSE ├── README.md ├── binarysearch ├── .gitignore ├── README.txt ├── main.go └── main_test.go ├── concurrent ├── map.go └── map_test.go ├── convert ├── README.md ├── hash.go └── hash_test.go ├── cproto ├── README.md ├── plist │ ├── go.capnp │ ├── go.capnp.go │ ├── schema.capnp │ └── schema.capnp.go └── rw │ ├── main.go │ └── rw ├── db ├── .gitignore ├── README.md ├── main.go └── main_test.go ├── etcdRAFT ├── .gitignore ├── doc.go └── raft.go ├── flats ├── flats.go ├── flats_test.go ├── fuids.fb ├── fuids │ └── UidList.go ├── uids.pb.go └── uids.proto ├── gob ├── README.md ├── main.go └── main_test.go ├── goraft ├── .gitignore └── node.go ├── grpc ├── codec │ └── codec.go ├── fb │ ├── fb.pb.go │ └── fb.proto ├── fbclient │ ├── fbclient │ └── main.go └── fbserver │ ├── fbserver │ └── main.go ├── icu └── icudt57l.dat ├── intersects ├── NewVSOld.txt ├── README.txt ├── gnuplot │ ├── 100-0.00.png │ ├── 100-0.80.png │ ├── 1000-0.00.png │ ├── 1000-0.80.png │ ├── 10000-0.00.png │ ├── 10000-0.80.png │ ├── Bin.0.00.100.dat │ ├── Bin.0.00.1000.dat │ ├── Bin.0.00.10000.dat │ ├── Bin.0.80.100.dat │ ├── Bin.0.80.1000.dat │ ├── Bin.0.80.10000.dat │ ├── Cur.0.00.100.dat │ ├── Cur.0.00.1000.dat │ ├── Cur.0.00.10000.dat │ ├── Cur.0.80.100.dat │ ├── Cur.0.80.1000.dat │ ├── Cur.0.80.10000.dat │ ├── Two.0.00.100.dat │ ├── Two.0.00.1000.dat │ ├── Two.0.00.10000.dat │ ├── Two.0.80.100.dat │ ├── Two.0.80.1000.dat │ ├── Two.0.80.10000.dat │ ├── bench.data │ ├── convert.sh │ ├── gnu.plot │ ├── prev │ │ ├── Bin.100.dat │ │ ├── Bin.1000.dat │ │ ├── Bin.10000.dat │ │ ├── Bin.100000.dat │ │ ├── Mer.100.dat │ │ ├── Mer.1000.dat │ │ ├── Mer.10000.dat │ │ ├── Mer.100000.dat │ │ ├── Two.100.dat │ │ ├── Two.1000.dat │ │ ├── Two.10000.dat │ │ ├── Two.100000.dat │ │ ├── sz-hundred.png │ │ ├── sz-tenthousand.png │ │ └── sz-thousand.png │ ├── size.overlap.dat │ ├── sz100.txt │ └── tues-afternoon.bench ├── list.go ├── list.pb.go ├── list.proto ├── list_test.go └── normallist.go ├── jchiu ├── benchhash │ ├── README.md │ ├── benchhash.go │ ├── benchhash_test.go │ ├── gomap.go │ ├── gotomicmap.go │ ├── results │ │ ├── benchhash.q10.txt │ │ ├── benchhash.q100.txt │ │ └── benchhash.q1000.txt │ └── run.sh ├── benchhash2 │ ├── README.md │ ├── benchhash_test.go │ ├── gomap.go │ ├── gotomicmap.go │ ├── plot.png │ ├── plot.py │ └── results.txt ├── chanqueue │ ├── README.md │ ├── chanqueue_test.go │ ├── cqueue.go │ └── queue.go ├── heaporstack │ └── main.go ├── profile_shardedhash │ ├── 20160918_155605.cpu.svg │ ├── 20160918_155605.mem.svg │ ├── README.md │ ├── loader.20160918_155605.cpu.pprof │ ├── loader.20160918_155605.mem.pprof │ ├── loader.20160918_170016.cpu.pprof │ ├── loader.20160918_170016.cpu.svg │ ├── loader.20160918_170016.mem.pprof │ └── loader.20160918_170016.mem.svg └── wstring │ ├── cgo.go │ ├── sample │ ├── main.go │ └── sample │ ├── util.go │ ├── wstring.cc │ ├── wstring.go │ └── wstring.h ├── raft ├── .gitignore └── main.go ├── rdbdemo ├── cpp │ └── main.cc └── main.go ├── rocksdbswig ├── README.md ├── add_cgo_flags.py ├── build.sh ├── cc │ ├── extra.h │ ├── librocksdbswigwrap.a │ ├── rocksdbswig.i │ ├── rocksdbswig_wrap.cxx │ ├── rocksdbswig_wrap.h │ └── rocksdbswig_wrap.o ├── cgo_flags.txt ├── clean.sh ├── rocksdbswig.go └── tmp │ ├── add_cgo_flags.py │ ├── build.sh │ ├── cc │ ├── extra.h │ ├── libtmp.a │ ├── tmp.i │ ├── tmp_wrap.cxx │ ├── tmp_wrap.h │ └── tmp_wrap.o │ ├── cgo_flags.txt │ ├── tmp.go │ └── tmp_test.go ├── rpc ├── .gitignore ├── client.go ├── main.go └── server.go ├── sortedencoding └── sortedencoding.go ├── usecgo ├── .gitignore └── main.go ├── vrpc ├── .gitignore ├── cert.pem ├── key.pem ├── server.go ├── server_test.go └── tls │ ├── .gitignore │ └── main.go ├── x.go └── x_test.go /README.md: -------------------------------------------------------------------------------- 1 | # experiments 2 | -------------------------------------------------------------------------------- /binarysearch/.gitignore: -------------------------------------------------------------------------------- 1 | /binarysearch 2 | -------------------------------------------------------------------------------- /binarysearch/README.txt: -------------------------------------------------------------------------------- 1 | Based on benchmarks, Iterative implementation runs ~25% faster than recursive. 2 | 3 | $ go test -v -bench . 4 | testing: warning: no tests to run 5 | PASS 6 | BenchmarkRec_100-6 10000000 139 ns/op 7 | BenchmarkRec_1000-6 10000000 179 ns/op 8 | BenchmarkRec_10000-6 10000000 220 ns/op 9 | BenchmarkIter_100-6 20000000 110 ns/op 10 | BenchmarkIter_1000-6 10000000 138 ns/op 11 | BenchmarkIter_10000-6 10000000 166 ns/op 12 | ok github.com/dgraph-io/experiments/binarysearch 11.642s 13 | 14 | -------------------------------------------------------------------------------- /binarysearch/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | ) 7 | 8 | var fval = flag.Int("val", 100, "Val") 9 | 10 | func findSmallerOrEqualsRecr(ar []int, maxv, left, right int) int { 11 | if left > right { 12 | return -1 13 | } 14 | 15 | pos := (left + right) / 2 16 | val := ar[pos] 17 | if val > maxv { 18 | return findSmallerOrEqualsRecr(ar, maxv, left, pos-1) 19 | } 20 | 21 | if val == maxv { 22 | return pos 23 | } 24 | 25 | tidx := findSmallerOrEqualsRecr(ar, maxv, pos+1, right) 26 | if tidx == -1 { 27 | return pos 28 | } 29 | return tidx 30 | } 31 | 32 | func findSmallerOrEqualsIter(ar []int, maxv int) int { 33 | left, right := 0, len(ar)-1 34 | sofar := -1 35 | for left <= right { 36 | pos := (left + right) / 2 37 | val := ar[pos] 38 | if val > maxv { 39 | right = pos - 1 40 | continue 41 | } 42 | 43 | if val == maxv { 44 | return pos 45 | } 46 | 47 | sofar = pos 48 | left = pos + 1 49 | } 50 | return sofar 51 | } 52 | 53 | func findSmallerOrEqualsLinear(ar []int, maxv int) int { 54 | found := -1 55 | for i := 0; i < len(ar); i++ { 56 | if ar[i] <= maxv { 57 | found = i 58 | } else { 59 | break 60 | } 61 | } 62 | return found 63 | } 64 | 65 | func main() { 66 | flag.Parse() 67 | ar := []int{2, 3, 5} 68 | { 69 | idx := findSmallerOrEqualsRecr(ar, *fval, 0, len(ar)-1) 70 | if idx >= 0 { 71 | fmt.Printf("Idx: %v. Value: %v\n", idx, ar[idx]) 72 | } else { 73 | fmt.Println("On the left bound") 74 | } 75 | } 76 | 77 | { 78 | i := findSmallerOrEqualsIter(ar, *fval) 79 | if i >= 0 { 80 | fmt.Printf("Idx: %v. Value: %v\n", i, ar[i]) 81 | } else { 82 | fmt.Println("On the left bound") 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /binarysearch/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | ) 7 | 8 | func benchRecr(b *testing.B, sz int) { 9 | ar := make([]int, sz) 10 | for i := 0; i < len(ar); i++ { 11 | ar[i] = 3 * i 12 | } 13 | b.ResetTimer() 14 | for i := 0; i < b.N; i++ { 15 | val := rand.Intn(3 * sz) 16 | findSmallerOrEqualsRecr(ar, val, 0, sz-1) 17 | } 18 | } 19 | 20 | func BenchmarkRec_100(b *testing.B) { benchRecr(b, 100) } 21 | func BenchmarkRec_1000(b *testing.B) { benchRecr(b, 1000) } 22 | func BenchmarkRec_10000(b *testing.B) { benchRecr(b, 10000) } 23 | 24 | func benchIter(b *testing.B, sz int) { 25 | ar := make([]int, sz) 26 | for i := 0; i < len(ar); i++ { 27 | ar[i] = 3 * i 28 | } 29 | b.ResetTimer() 30 | for i := 0; i < b.N; i++ { 31 | val := rand.Intn(3 * sz) 32 | findSmallerOrEqualsIter(ar, val) 33 | } 34 | } 35 | 36 | func BenchmarkIter_100(b *testing.B) { benchIter(b, 100) } 37 | func BenchmarkIter_1000(b *testing.B) { benchIter(b, 1000) } 38 | func BenchmarkIter_10000(b *testing.B) { benchIter(b, 10000) } 39 | 40 | func benchLinear(b *testing.B, sz int) { 41 | ar := make([]int, sz) 42 | for i := 0; i < len(ar); i++ { 43 | ar[i] = 3 * i 44 | } 45 | b.ResetTimer() 46 | for i := 0; i < b.N; i++ { 47 | val := rand.Intn(3 * sz) 48 | findSmallerOrEqualsLinear(ar, val) 49 | } 50 | } 51 | 52 | func BenchmarkLinear_100(b *testing.B) { benchLinear(b, 100) } 53 | func BenchmarkLinear_1000(b *testing.B) { benchLinear(b, 1000) } 54 | func BenchmarkLinear_10000(b *testing.B) { benchLinear(b, 10000) } 55 | -------------------------------------------------------------------------------- /concurrent/map.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 Manish R Jain 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package concurrent 18 | 19 | import ( 20 | "log" 21 | "math/rand" 22 | "sync/atomic" 23 | "unsafe" 24 | ) 25 | 26 | type kv struct { 27 | k uint64 28 | v unsafe.Pointer 29 | } 30 | 31 | type bucket struct { 32 | elems [8]kv 33 | } 34 | 35 | const ( 36 | MUTABLE = iota 37 | IMMUTABLE 38 | ) 39 | 40 | type container struct { 41 | status int32 42 | sz uint64 43 | list []*bucket 44 | numElems uint32 45 | } 46 | 47 | type Map struct { 48 | cs [2]unsafe.Pointer 49 | size uint32 50 | } 51 | 52 | func powOf2(sz int) bool { 53 | return sz > 0 && (sz&(sz-1)) == 0 54 | } 55 | 56 | func initContainer(cs *container, sz uint64) { 57 | cs.status = MUTABLE 58 | cs.sz = sz 59 | cs.list = make([]*bucket, sz) 60 | for i := range cs.list { 61 | cs.list[i] = new(bucket) 62 | } 63 | } 64 | 65 | func NewMap(sz int) *Map { 66 | if !powOf2(sz) { 67 | log.Fatal("Map can only be created for a power of 2.") 68 | } 69 | 70 | c := new(container) 71 | initContainer(c, uint64(sz)) 72 | 73 | m := new(Map) 74 | m.cs[MUTABLE] = unsafe.Pointer(c) 75 | m.cs[IMMUTABLE] = nil 76 | return m 77 | } 78 | 79 | func (c *container) get(k uint64) unsafe.Pointer { 80 | bi := k & (c.sz - 1) 81 | b := c.list[bi] 82 | for i := range b.elems { 83 | e := &b.elems[i] 84 | if ek := atomic.LoadUint64(&e.k); ek == k { 85 | return e.v 86 | } 87 | } 88 | return nil 89 | } 90 | 91 | func (c *container) getOrInsert(k uint64, v unsafe.Pointer) unsafe.Pointer { 92 | bi := k & (c.sz - 1) 93 | b := c.list[bi] 94 | for i := range b.elems { 95 | e := &b.elems[i] 96 | // Once allocated a valid key, it would never change. So, first check if 97 | // it's allocated. If not, then allocate it. If can't, or not allocated, 98 | // then check if it's k. If it is, then replace value. Otherwise continue. 99 | // This sequence could be problematic, if this happens: 100 | // Main thread runs Step 1. Check 101 | if atomic.CompareAndSwapUint64(&e.k, 0, k) { // Step 1. 102 | atomic.AddUint32(&c.numElems, 1) 103 | if atomic.CompareAndSwapPointer(&e.v, nil, v) { 104 | return v 105 | } 106 | return atomic.LoadPointer(&e.v) 107 | } 108 | 109 | if atomic.LoadUint64(&e.k) == k { 110 | // Swap if previous pointer is nil. 111 | if atomic.CompareAndSwapPointer(&e.v, nil, v) { 112 | return v 113 | } 114 | return atomic.LoadPointer(&e.v) 115 | } 116 | } 117 | return nil 118 | } 119 | 120 | func (m *Map) GetOrInsert(k uint64, v unsafe.Pointer) unsafe.Pointer { 121 | if v == nil { 122 | log.Fatal("GetOrInsert doesn't allow setting nil pointers.") 123 | return nil 124 | } 125 | 126 | // Check immutable first. 127 | cval := atomic.LoadPointer(&m.cs[IMMUTABLE]) 128 | if cval != nil { 129 | c := (*container)(cval) 130 | if pv := c.get(k); pv != nil { 131 | return pv 132 | } 133 | } 134 | 135 | // Okay, deal with mutable container now. 136 | cval = atomic.LoadPointer(&m.cs[MUTABLE]) 137 | if cval == nil { 138 | log.Fatal("This is disruptive in a bad way.") 139 | } 140 | c := (*container)(cval) 141 | if pv := c.getOrInsert(k, v); pv != nil { 142 | return pv 143 | } 144 | 145 | // We still couldn't insert the key. Time to grow. 146 | // TODO: Handle this case. 147 | return nil 148 | } 149 | 150 | func (m *Map) SetNilIfPresent(k uint64) bool { 151 | for _, c := range m.cs { 152 | if atomic.LoadInt32(&c.status) == 0 { 153 | continue 154 | } 155 | bi := k & (c.sz - 1) 156 | b := c.list[bi] 157 | for i := range b.elems { 158 | e := &b.elems[i] 159 | if atomic.LoadUint64(&e.k) == k { 160 | // Set to nil. 161 | atomic.StorePointer(&e.v, nil) 162 | return true 163 | } 164 | } 165 | } 166 | return false 167 | } 168 | 169 | func (m *Map) StreamUntilCap(ch chan uint64) { 170 | for { 171 | ci := rand.Intn(2) 172 | c := m.cs[ci] 173 | if atomic.LoadInt32(&c.status) == 0 { 174 | ci += 1 175 | c = m.cs[ci%2] // use the other. 176 | } 177 | bi := rand.Intn(int(c.sz)) 178 | 179 | for _, e := range c.list[bi].elems { 180 | if len(ch) >= cap(ch) { 181 | return 182 | } 183 | if k := atomic.LoadUint64(&e.k); k > 0 { 184 | ch <- k 185 | } 186 | } 187 | } 188 | } 189 | 190 | func (m *Map) StreamAll(ch chan uint64) { 191 | for _, c := range m.cs { 192 | if atomic.LoadInt32(&c.status) == 0 { 193 | continue 194 | } 195 | for i := 0; i < int(c.sz); i++ { 196 | for _, e := range c.list[i].elems { 197 | if k := atomic.LoadUint64(&e.k); k > 0 { 198 | ch <- k 199 | } 200 | } 201 | } 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /concurrent/map_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015 Manish R Jain 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package concurrent 18 | 19 | import ( 20 | "math/rand" 21 | "testing" 22 | "unsafe" 23 | 24 | "github.com/dgraph-io/dgraph/posting" 25 | "github.com/zond/gotomic" 26 | ) 27 | 28 | func TestGetAndPut(t *testing.T) { 29 | m := NewMap(1024) 30 | var i uint64 31 | for i = 1; i < 100; i++ { 32 | v := new(uint64) 33 | *v = i 34 | b := unsafe.Pointer(v) 35 | if ok := m.Put(i, b); !ok { 36 | t.Errorf("Couldn't put key: %v", i) 37 | } 38 | } 39 | for i = 1; i < 100; i++ { 40 | p := m.Get(i) 41 | v := (*uint64)(p) 42 | if v == nil { 43 | t.Errorf("Didn't expect nil for i: %v", i) 44 | return 45 | } 46 | if *v != i { 47 | t.Errorf("Expected: %v. Got: %v", i, *v) 48 | } 49 | } 50 | } 51 | 52 | func BenchmarkGetAndPut(b *testing.B) { 53 | m := NewMap(1 << 16) 54 | b.RunParallel(func(pb *testing.PB) { 55 | for pb.Next() { 56 | key := uint64(rand.Int63()) 57 | p := m.Get(key) 58 | if p == nil { 59 | l := posting.NewList() 60 | m.Put(key, unsafe.Pointer(l)) 61 | } 62 | } 63 | }) 64 | } 65 | 66 | func BenchmarkGotomic(b *testing.B) { 67 | h := gotomic.NewHash() 68 | b.RunParallel(func(pb *testing.PB) { 69 | for pb.Next() { 70 | key := uint64(rand.Int63()) 71 | _, has := h.Get(gotomic.IntKey(key)) 72 | if !has { 73 | l := posting.NewList() 74 | h.Put(gotomic.IntKey(key), l) 75 | } 76 | } 77 | }) 78 | } 79 | -------------------------------------------------------------------------------- /convert/README.md: -------------------------------------------------------------------------------- 1 | ### About 2 | 3 | Tested CRC with ISO and ECMA polynomials, and a custom library implementing 4 | murmur hash. None of them showed any collissions for 100 million unique ids. 5 | 6 | ### Test Results 7 | 8 | ``` 9 | # go test -v . 10 | === RUN TestUseCrc 11 | --- PASS: TestUseCrc (0.00s) 12 | === RUN TestUseCrc_ISOCollissions 13 | --- PASS: TestUseCrc_ISOCollissions (11.55s) 14 | === RUN TestUseCrc_ECMACollissions 15 | --- PASS: TestUseCrc_ECMACollissions (11.07s) 16 | === RUN TestUseMurmur_Collissions 17 | --- PASS: TestUseMurmur_Collissions (11.28s) 18 | PASS 19 | ok github.com/dgraph-io/experiments/convert 34.015s 20 | ``` 21 | 22 | ### Benchmark Results 23 | 24 | ``` 25 | BenchmarkUseCrc_ISO 10000000 168 ns/op 26 | BenchmarkUseCrc_ECMA 10000000 168 ns/op 27 | BenchmarkUseMurmur 10000000 194 ns/op 28 | ``` 29 | -------------------------------------------------------------------------------- /convert/hash.go: -------------------------------------------------------------------------------- 1 | package convert 2 | 3 | // Dummy function to test and benchmark CRC. 4 | func useCrc() { 5 | } 6 | 7 | func useMurmur() { 8 | } 9 | -------------------------------------------------------------------------------- /convert/hash_test.go: -------------------------------------------------------------------------------- 1 | package convert 2 | 3 | import ( 4 | "bytes" 5 | "hash" 6 | "hash/crc64" 7 | "io" 8 | "math/rand" 9 | "testing" 10 | "time" 11 | 12 | "github.com/spaolacci/murmur3" 13 | ) 14 | 15 | func TestUseCrc(t *testing.T) { 16 | table := crc64.MakeTable(crc64.ISO) 17 | h := crc64.New(table) 18 | 19 | input := "someUniqueId" 20 | io.WriteString(h, input) 21 | sum1 := h.Sum64() 22 | 23 | h.Reset() 24 | input = "someOtherId" 25 | io.WriteString(h, input) 26 | sum2 := h.Sum64() 27 | 28 | if sum1 == sum2 { 29 | t.Errorf("Sums shouldn't match [%x] [%x]\n", sum1, sum2) 30 | t.Fail() 31 | return 32 | } 33 | 34 | h.Reset() 35 | input = "someUniqueId" 36 | io.WriteString(h, input) 37 | sum3 := h.Sum64() 38 | 39 | if sum1 != sum3 { 40 | t.Errorf("Sums should match [%x] [%x]\n", sum1, sum3) 41 | t.Fail() 42 | return 43 | } 44 | } 45 | 46 | const alphachars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 47 | 48 | var r = rand.New(rand.NewSource(time.Now().UnixNano())) 49 | 50 | // UniqueString generates a unique string only using the characters from 51 | // alphachars constant, with length as specified. 52 | func uniqueString(alpha int) string { 53 | var buf bytes.Buffer 54 | for i := 0; i < alpha; i++ { 55 | idx := r.Intn(len(alphachars)) 56 | buf.WriteByte(alphachars[idx]) 57 | } 58 | return buf.String() 59 | } 60 | 61 | func getUids(num int) []string { 62 | l := make([]string, num) 63 | for i := 0; i < num; i++ { 64 | l[i] = uniqueString(10) 65 | } 66 | return l 67 | } 68 | 69 | const uidSize = 10000000 // At 10M, this would take over 100MB of RAM per test. 70 | func testCollissions(t *testing.T, h hash.Hash64) { 71 | uids := getUids(uidSize) 72 | results := make(map[uint64]bool) 73 | cols := 0 74 | 75 | for i := 0; i < uidSize; i++ { 76 | h.Reset() 77 | io.WriteString(h, uids[i]) 78 | s := h.Sum64() 79 | if _, col := results[s]; col { 80 | cols += 1 81 | } else { 82 | results[s] = true 83 | } 84 | } 85 | if cols > 0 { 86 | t.Errorf("Found %v collissions for uidSize %v\n", cols, uidSize) 87 | } 88 | } 89 | 90 | func TestUseCrc_ISOCollissions(t *testing.T) { 91 | table := crc64.MakeTable(crc64.ISO) 92 | h := crc64.New(table) 93 | testCollissions(t, h) 94 | } 95 | 96 | func TestUseCrc_ECMACollissions(t *testing.T) { 97 | table := crc64.MakeTable(crc64.ECMA) 98 | h := crc64.New(table) 99 | testCollissions(t, h) 100 | } 101 | 102 | func TestUseMurmur_Collissions(t *testing.T) { 103 | h := murmur3.New64() 104 | testCollissions(t, h) 105 | } 106 | 107 | var result uint64 108 | 109 | func benchmarkHash(b *testing.B, h hash.Hash64) { 110 | uids := getUids(b.N) 111 | var s uint64 112 | b.ResetTimer() 113 | for i := 0; i < b.N; i++ { 114 | h.Reset() 115 | io.WriteString(h, uids[i]) 116 | s = h.Sum64() 117 | } 118 | result = s 119 | } 120 | 121 | func BenchmarkUseCrc_ISO(b *testing.B) { 122 | table := crc64.MakeTable(crc64.ISO) 123 | h := crc64.New(table) 124 | benchmarkHash(b, h) 125 | } 126 | 127 | func BenchmarkUseCrc_ECMA(b *testing.B) { 128 | table := crc64.MakeTable(crc64.ECMA) 129 | h := crc64.New(table) 130 | benchmarkHash(b, h) 131 | } 132 | 133 | func BenchmarkUseMurmur(b *testing.B) { 134 | h := murmur3.New64() 135 | benchmarkHash(b, h) 136 | } 137 | -------------------------------------------------------------------------------- /cproto/README.md: -------------------------------------------------------------------------------- 1 | # Benchmark of various encoding systems. 2 | ## Generated via https://github.com/cloudflare/goser 3 | 4 | ### Populate 5 | BenchmarkPopulatePb 20000000 1264 ns/op 417 B/op 16 allocs/op 6 | BenchmarkPopulateGogopb 50000000 370 ns/op 48 B/op 3 allocs/op 7 | BenchmarkPopulateCapnp 10000000 2714 ns/op 114 B/op 2 allocs/op 8 | 9 | ### Marshal 10 | BenchmarkMarshalJSON 2000000 9690 ns/op 61.71 MB/s 756 B/op 17 allocs/op 11 | BenchmarkMarshalPb 20000000 1092 ns/op 264.58 MB/s 0 B/op 0 allocs/op 12 | BenchmarkMarshalGogopb 50000000 607 ns/op 475.69 MB/s 320 B/op 1 allocs/op 13 | BenchmarkMarshalCapnp 100000000 167 ns/op 2769.93 MB/s 8 B/op 0 allocs/op 14 | 15 | ### Unmarshal 16 | BenchmarkUnmarshalJSON 500000 28562 ns/op 20.94 MB/s 2275 B/op 26 allocs/op 17 | BenchmarkUnmarshalPb 5000000 3377 ns/op 85.57 MB/s 871 B/op 20 allocs/op 18 | BenchmarkUnmarshalGogopb 20000000 1136 ns/op 254.27 MB/s 266 B/op 7 allocs/op 19 | BenchmarkUnmarshalCapnp 20000000 936 ns/op 495.28 MB/s 263 B/op 5 allocs/op 20 | BenchmarkUnmarshalCapnpZeroCopy 50000000 366 ns/op 1267.18 MB/s 91 B/op 3 allocs/op 21 | go tool pprof --svg goser.test cpu.prof > cpu.svg 22 | 23 | -------------------------------------------------------------------------------- /cproto/plist/go.capnp: -------------------------------------------------------------------------------- 1 | @0xd12a1c51fedd6c88; 2 | 3 | annotation package(file) :Text; 4 | # The Go package name for the generated file. 5 | 6 | annotation import(file) :Text; 7 | # The Go import path that the generated file is accessible from. 8 | # Used to generate import statements and check if two types are in the 9 | # same package. 10 | 11 | annotation doc(struct, field, enum) :Text; 12 | # Adds a doc comment to the generated code. 13 | 14 | annotation tag(enumerant) :Text; 15 | # Changes the string representation of the enum in the generated code. 16 | 17 | annotation notag(enumerant) :Void; 18 | # Removes the string representation of the enum in the generated code. 19 | 20 | annotation customtype(field) :Text; 21 | # OBSOLETE, not used by code generator. 22 | 23 | annotation name(struct, field, union, enum, enumerant, interface, method, param, annotation, const, group) :Text; 24 | # Used to rename the element in the generated code. 25 | 26 | $package("plist"); 27 | -------------------------------------------------------------------------------- /cproto/plist/go.capnp.go: -------------------------------------------------------------------------------- 1 | package plist 2 | 3 | // AUTO GENERATED - DO NOT EDIT 4 | 5 | const Package = uint64(0xbea97f1023792be0) 6 | const Import = uint64(0xe130b601260e44b5) 7 | const Doc = uint64(0xc58ad6bd519f935e) 8 | const Tag = uint64(0xa574b41924caefc7) 9 | const Notag = uint64(0xc8768679ec52e012) 10 | const Customtype = uint64(0xfa10659ae02f2093) 11 | const Name = uint64(0xc2b96012172f8df1) 12 | -------------------------------------------------------------------------------- /cproto/plist/schema.capnp: -------------------------------------------------------------------------------- 1 | @0xd44fb1ce6b5a1003; 2 | using Go = import "go.capnp"; 3 | $Go.package("plist"); 4 | $Go.import("testpkg"); 5 | 6 | struct PostingList { 7 | ids @0: List(UInt64); 8 | title @1: Text; 9 | } 10 | -------------------------------------------------------------------------------- /cproto/plist/schema.capnp.go: -------------------------------------------------------------------------------- 1 | package plist 2 | 3 | // AUTO GENERATED - DO NOT EDIT 4 | 5 | import ( 6 | "bufio" 7 | "bytes" 8 | "encoding/json" 9 | C "github.com/glycerine/go-capnproto" 10 | "io" 11 | ) 12 | 13 | type PostingList C.Struct 14 | 15 | func NewPostingList(s *C.Segment) PostingList { return PostingList(s.NewStruct(0, 2)) } 16 | func NewRootPostingList(s *C.Segment) PostingList { return PostingList(s.NewRootStruct(0, 2)) } 17 | func AutoNewPostingList(s *C.Segment) PostingList { return PostingList(s.NewStructAR(0, 2)) } 18 | func ReadRootPostingList(s *C.Segment) PostingList { return PostingList(s.Root(0).ToStruct()) } 19 | func (s PostingList) Ids() C.UInt64List { return C.UInt64List(C.Struct(s).GetObject(0)) } 20 | func (s PostingList) SetIds(v C.UInt64List) { C.Struct(s).SetObject(0, C.Object(v)) } 21 | func (s PostingList) Title() string { return C.Struct(s).GetObject(1).ToText() } 22 | func (s PostingList) SetTitle(v string) { C.Struct(s).SetObject(1, s.Segment.NewText(v)) } 23 | func (s PostingList) WriteJSON(w io.Writer) error { 24 | b := bufio.NewWriter(w) 25 | var err error 26 | var buf []byte 27 | _ = buf 28 | err = b.WriteByte('{') 29 | if err != nil { 30 | return err 31 | } 32 | _, err = b.WriteString("\"ids\":") 33 | if err != nil { 34 | return err 35 | } 36 | { 37 | s := s.Ids() 38 | { 39 | err = b.WriteByte('[') 40 | if err != nil { 41 | return err 42 | } 43 | for i, s := range s.ToArray() { 44 | if i != 0 { 45 | _, err = b.WriteString(", ") 46 | } 47 | if err != nil { 48 | return err 49 | } 50 | buf, err = json.Marshal(s) 51 | if err != nil { 52 | return err 53 | } 54 | _, err = b.Write(buf) 55 | if err != nil { 56 | return err 57 | } 58 | } 59 | err = b.WriteByte(']') 60 | } 61 | if err != nil { 62 | return err 63 | } 64 | } 65 | err = b.WriteByte(',') 66 | if err != nil { 67 | return err 68 | } 69 | _, err = b.WriteString("\"title\":") 70 | if err != nil { 71 | return err 72 | } 73 | { 74 | s := s.Title() 75 | buf, err = json.Marshal(s) 76 | if err != nil { 77 | return err 78 | } 79 | _, err = b.Write(buf) 80 | if err != nil { 81 | return err 82 | } 83 | } 84 | err = b.WriteByte('}') 85 | if err != nil { 86 | return err 87 | } 88 | err = b.Flush() 89 | return err 90 | } 91 | func (s PostingList) MarshalJSON() ([]byte, error) { 92 | b := bytes.Buffer{} 93 | err := s.WriteJSON(&b) 94 | return b.Bytes(), err 95 | } 96 | func (s PostingList) WriteCapLit(w io.Writer) error { 97 | b := bufio.NewWriter(w) 98 | var err error 99 | var buf []byte 100 | _ = buf 101 | err = b.WriteByte('(') 102 | if err != nil { 103 | return err 104 | } 105 | _, err = b.WriteString("ids = ") 106 | if err != nil { 107 | return err 108 | } 109 | { 110 | s := s.Ids() 111 | { 112 | err = b.WriteByte('[') 113 | if err != nil { 114 | return err 115 | } 116 | for i, s := range s.ToArray() { 117 | if i != 0 { 118 | _, err = b.WriteString(", ") 119 | } 120 | if err != nil { 121 | return err 122 | } 123 | buf, err = json.Marshal(s) 124 | if err != nil { 125 | return err 126 | } 127 | _, err = b.Write(buf) 128 | if err != nil { 129 | return err 130 | } 131 | } 132 | err = b.WriteByte(']') 133 | } 134 | if err != nil { 135 | return err 136 | } 137 | } 138 | _, err = b.WriteString(", ") 139 | if err != nil { 140 | return err 141 | } 142 | _, err = b.WriteString("title = ") 143 | if err != nil { 144 | return err 145 | } 146 | { 147 | s := s.Title() 148 | buf, err = json.Marshal(s) 149 | if err != nil { 150 | return err 151 | } 152 | _, err = b.Write(buf) 153 | if err != nil { 154 | return err 155 | } 156 | } 157 | err = b.WriteByte(')') 158 | if err != nil { 159 | return err 160 | } 161 | err = b.Flush() 162 | return err 163 | } 164 | func (s PostingList) MarshalCapLit() ([]byte, error) { 165 | b := bytes.Buffer{} 166 | err := s.WriteCapLit(&b) 167 | return b.Bytes(), err 168 | } 169 | 170 | type PostingList_List C.PointerList 171 | 172 | func NewPostingListList(s *C.Segment, sz int) PostingList_List { 173 | return PostingList_List(s.NewCompositeList(0, 2, sz)) 174 | } 175 | func (s PostingList_List) Len() int { return C.PointerList(s).Len() } 176 | func (s PostingList_List) At(i int) PostingList { return PostingList(C.PointerList(s).At(i).ToStruct()) } 177 | func (s PostingList_List) ToArray() []PostingList { 178 | n := s.Len() 179 | a := make([]PostingList, n) 180 | for i := 0; i < n; i++ { 181 | a[i] = s.At(i) 182 | } 183 | return a 184 | } 185 | func (s PostingList_List) Set(i int, item PostingList) { C.PointerList(s).Set(i, C.Object(item)) } 186 | -------------------------------------------------------------------------------- /cproto/rw/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | 9 | "github.com/dgraph-io/experiments/zrpc/plist" 10 | C "github.com/glycerine/go-capnproto" 11 | ) 12 | 13 | func write(sz int) (result bytes.Buffer, err error) { 14 | seg := C.NewBuffer(nil) 15 | pl := plist.NewRootPostingList(seg) 16 | 17 | l := seg.NewUInt64List(sz) 18 | for i := 0; i < sz; i++ { 19 | l.Set(i, uint64(rand.Int63())) 20 | } 21 | 22 | pl.SetIds(l) 23 | // pl.SetTitle("capnproto") 24 | 25 | r, err := seg.WriteTo(&result) 26 | if err != nil { 27 | return result, err 28 | } 29 | fmt.Println("Written:", r) 30 | return result, nil 31 | } 32 | 33 | func read(buf bytes.Buffer) error { 34 | seg, err := C.ReadFromStream(&buf, nil) 35 | if err != nil { 36 | log.Print("While decoding") 37 | return err 38 | } 39 | 40 | pl := plist.ReadRootPostingList(seg) 41 | ids := pl.Ids() 42 | title := pl.Title() 43 | fmt.Printf("Num ids: [%v] Title: [%v]\n", ids.Len(), title) 44 | return nil 45 | } 46 | 47 | func main() { 48 | sz := 1000 49 | 50 | buf, err := write(sz) 51 | if err != nil { 52 | log.Fatal(err) 53 | return 54 | } 55 | fmt.Println("Buffer len:", buf.Len()) 56 | if err := read(buf); err != nil { 57 | log.Fatal(err) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /cproto/rw/rw: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/cproto/rw/rw -------------------------------------------------------------------------------- /db/.gitignore: -------------------------------------------------------------------------------- 1 | /db 2 | -------------------------------------------------------------------------------- /db/README.md: -------------------------------------------------------------------------------- 1 | # Benchmarks 2 | 3 | ## BoltDB 4 | 5 | Without copying the resulting byte slice from Bolt. **Unsafe** 6 | ``` 7 | $ go test -bench BenchmarkRead . 8 | testing: warning: no tests to run 9 | PASS 10 | BenchmarkReadBolt_1024 500000 3858 ns/op 11 | BenchmarkReadBolt_10KB 500000 3738 ns/op 12 | BenchmarkReadBolt_500KB 1000000 3141 ns/op 13 | BenchmarkReadBolt_1MB 1000000 3026 ns/op 14 | ok github.com/dgraph-io/experiments/db 102.513s 15 | ``` 16 | 17 | Copying the resulting byte slice. **Safe** 18 | ``` 19 | $ go test -bench BenchmarkRead . 20 | testing: warning: no tests to run 21 | PASS 22 | BenchmarkReadBolt_1024 200000 6760 ns/op 23 | BenchmarkReadBolt_10KB 100000 21249 ns/op 24 | BenchmarkReadBolt_500KB 10000 214449 ns/op 25 | BenchmarkReadBolt_1MB 3000 350712 ns/op 26 | ok github.com/dgraph-io/experiments/db 80.890s 27 | ``` 28 | 29 | ## RocksDB 30 | 31 | ``` 32 | $ go test -bench BenchmarkGet . 33 | PASS 34 | BenchmarkGet_valsize1024 300000 5715 ns/op 35 | BenchmarkGet_valsize10KB 50000 27619 ns/op 36 | BenchmarkGet_valsize500KB 2000 604185 ns/op 37 | BenchmarkGet_valsize1MB 2000 1064685 ns/op 38 | ok github.com/dgraph-io/dgraph/store 55.029s 39 | ``` 40 | -------------------------------------------------------------------------------- /db/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log" 7 | 8 | "github.com/boltdb/bolt" 9 | ) 10 | 11 | func writeNBytes(bdb *bolt.DB, k []byte, N int) error { 12 | buf := make([]byte, N) 13 | return bdb.Update(func(tx *bolt.Tx) error { 14 | bucket, err := tx.CreateBucketIfNotExists([]byte("predicate")) 15 | if err != nil { 16 | return err 17 | } 18 | return bucket.Put(k, buf) 19 | }) 20 | } 21 | 22 | func readValue(bdb *bolt.DB, k []byte) (N int) { 23 | bdb.View(func(tx *bolt.Tx) error { 24 | bucket := tx.Bucket([]byte("predicate")) 25 | if bucket == nil { 26 | return errors.New("Bucket not found") 27 | } 28 | val := bucket.Get(k) 29 | m := make([]byte, len(val)) 30 | copy(m, val) 31 | N = len(m) 32 | return nil 33 | }) 34 | return 35 | } 36 | 37 | func main() { 38 | db, err := bolt.Open("bolt.db", 0600, nil) 39 | if err != nil { 40 | log.Fatal(err) 41 | } 42 | defer db.Close() 43 | k := []byte("key") 44 | N := 512 45 | if err := writeNBytes(db, k, N); err != nil { 46 | log.Fatal(err) 47 | } 48 | fmt.Println(readValue(db, k)) 49 | } 50 | -------------------------------------------------------------------------------- /db/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io/ioutil" 5 | "math/rand" 6 | "os" 7 | "strconv" 8 | "testing" 9 | 10 | "github.com/boltdb/bolt" 11 | ) 12 | 13 | func benchWriteBolt(b *testing.B, N int) { 14 | dir, err := ioutil.TempDir("", "bolt") 15 | if err != nil { 16 | b.Error(err) 17 | return 18 | } 19 | defer os.RemoveAll(dir) 20 | 21 | db, err := bolt.Open(dir+"/bolt.db", 0600, nil) 22 | b.ResetTimer() 23 | for i := 0; i < b.N; i++ { 24 | k := []byte(strconv.Itoa(i)) 25 | if err := writeNBytes(db, k, N); err != nil { 26 | b.Error(err) 27 | } 28 | } 29 | } 30 | 31 | func BenchmarkWriteBolt_1024(b *testing.B) { benchWriteBolt(b, 1024) } 32 | func BenchmarkWriteBolt_10KB(b *testing.B) { benchWriteBolt(b, 10240) } 33 | func BenchmarkWriteBolt_500KB(b *testing.B) { benchWriteBolt(b, 1<<19) } 34 | func BenchmarkWriteBolt_1MB(b *testing.B) { benchWriteBolt(b, 1<<20) } 35 | 36 | func benchReadBolt(b *testing.B, N int) { 37 | dir, err := ioutil.TempDir("", "bolt") 38 | if err != nil { 39 | b.Error(err) 40 | return 41 | } 42 | defer os.RemoveAll(dir) 43 | 44 | numKeys := 100 45 | db, err := bolt.Open(dir+"/bolt.db", 0600, nil) 46 | for i := 0; i < numKeys; i++ { 47 | k := []byte(strconv.Itoa(i)) 48 | if err := writeNBytes(db, k, N); err != nil { 49 | b.Error(err) 50 | } 51 | } 52 | 53 | b.ResetTimer() 54 | for i := 0; i < b.N; i++ { 55 | k := rand.Int() % numKeys 56 | key := []byte(strconv.Itoa(k)) 57 | n := readValue(db, key) 58 | if n != N { 59 | b.Errorf("Expected: %v. Got: %v", N, n) 60 | } 61 | } 62 | } 63 | 64 | func BenchmarkReadBolt_1024(b *testing.B) { benchReadBolt(b, 1024) } 65 | func BenchmarkReadBolt_10KB(b *testing.B) { benchReadBolt(b, 10240) } 66 | func BenchmarkReadBolt_500KB(b *testing.B) { benchReadBolt(b, 1<<19) } 67 | func BenchmarkReadBolt_1MB(b *testing.B) { benchReadBolt(b, 1<<20) } 68 | -------------------------------------------------------------------------------- /etcdRAFT/.gitignore: -------------------------------------------------------------------------------- 1 | /etcdRAFT 2 | 3 | -------------------------------------------------------------------------------- /etcdRAFT/doc.go: -------------------------------------------------------------------------------- 1 | // Usage instruction : 2 | // ./etcdRAFT --idx 1 --workerport ":12345" 3 | // ./etcdRAFT --idx 2 --workerport ":12346" --clusterIP ":12345" 4 | // ./etcdRAFT --idx 3 --workerport ":12347" --clusterIP ":12345" 5 | // ./etcdRAFT --idx 4 --workerport ":12348" --clusterIP ":12345" 6 | // ./etcdRAFT --idx 5 --workerport ":12349" --clusterIP ":12345" 7 | // 8 | // Each process will propose a different key value pair to be stored. 9 | // The cluster reaches concensus over the proposed values 10 | // 11 | // Can be extended to any number of nodes. 12 | // 13 | package main 14 | -------------------------------------------------------------------------------- /flats/flats.go: -------------------------------------------------------------------------------- 1 | package flats 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/dgraph-io/experiments/flats/fuids" 7 | flatbuffers "github.com/google/flatbuffers/go" 8 | ) 9 | 10 | func ToAndFrom() { 11 | } 12 | 13 | func ToAndFromProto(uids []uint64) (error, int) { 14 | var ul UidList 15 | ul.Uid = make([]uint64, len(uids)) 16 | copy(ul.Uid, uids) 17 | 18 | data, err := ul.Marshal() 19 | if err != nil { 20 | return err, 0 21 | } 22 | var nl UidList 23 | if err := nl.Unmarshal(data); err != nil { 24 | return err, 0 25 | } 26 | if len(nl.Uid) != len(ul.Uid) { 27 | return fmt.Errorf("Length doesn't match"), 0 28 | } 29 | for i := 0; i < len(uids); i++ { 30 | if nl.Uid[i] != uids[i] { 31 | return fmt.Errorf("ID doesn't match at index: %v", i), 0 32 | } 33 | } 34 | return nil, len(data) 35 | } 36 | 37 | func ToAndFromProtoAlt(uids []uint64) (error, int) { 38 | var ul UidListAlt 39 | ul.Uid = make([]uint64, len(uids)) 40 | copy(ul.Uid, uids) 41 | 42 | data, err := ul.Marshal() 43 | if err != nil { 44 | return err, 0 45 | } 46 | var nl UidListAlt 47 | if err := nl.Unmarshal(data); err != nil { 48 | return err, 0 49 | } 50 | if len(nl.Uid) != len(ul.Uid) { 51 | return fmt.Errorf("Length doesn't match"), 0 52 | } 53 | for i := 0; i < len(uids); i++ { 54 | if nl.Uid[i] != uids[i] { 55 | return fmt.Errorf("ID doesn't match at index: %v", i), 0 56 | } 57 | } 58 | return nil, len(data) 59 | } 60 | 61 | func ToAndFromFlat(uids []uint64) (error, int) { 62 | b := flatbuffers.NewBuilder(0) 63 | fuids.UidListStartUidsVector(b, len(uids)) 64 | for i := len(uids) - 1; i >= 0; i-- { 65 | b.PrependUint64(uids[i]) 66 | } 67 | ve := b.EndVector(len(uids)) 68 | fuids.UidListStart(b) 69 | fuids.UidListAddUids(b, ve) 70 | ue := fuids.UidListEnd(b) 71 | b.Finish(ue) 72 | data := b.FinishedBytes() 73 | 74 | nl := fuids.GetRootAsUidList(data, 0) 75 | if nl.UidsLength() != len(uids) { 76 | return fmt.Errorf("Length doesn't match"), 0 77 | } 78 | for i := 0; i < len(uids); i++ { 79 | if nl.Uids(i) != uids[i] { 80 | return fmt.Errorf("ID doesn't match at index: %v Expected: %v. Got: %v", 81 | i, uids[i], nl.Uids(i)), 0 82 | } 83 | } 84 | 85 | return nil, len(data) 86 | } 87 | -------------------------------------------------------------------------------- /flats/flats_test.go: -------------------------------------------------------------------------------- 1 | package flats 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "testing" 7 | ) 8 | 9 | func BenchmarkToAndFrom(b *testing.B) { 10 | benchmarks := []struct { 11 | k int 12 | }{ 13 | {10}, 14 | {100}, 15 | {1000}, 16 | {10000}, 17 | {100000}, 18 | {1000000}, 19 | {10000000}, 20 | } 21 | 22 | for _, bm := range benchmarks { 23 | for which := 0; which < 3; which++ { 24 | var name string 25 | if which == 0 { 26 | fmt.Println() 27 | name = "Flatb" 28 | } else if which == 1 { 29 | name = "Fixed" 30 | } else if which == 2 { 31 | name = "Proto" 32 | } 33 | 34 | b.Run(fmt.Sprintf("%s-%d", name, bm.k), func(b *testing.B) { 35 | uids := make([]uint64, bm.k) 36 | for i := 0; i < bm.k; i++ { 37 | uids[i] = uint64(rand.Int63()) 38 | } 39 | 40 | var max, sz int 41 | b.ResetTimer() 42 | for i := 0; i < b.N; i++ { 43 | var err error 44 | if which == 0 { 45 | err, sz = ToAndFromFlat(uids) 46 | } else if which == 1 { 47 | err, sz = ToAndFromProtoAlt(uids) 48 | } else if which == 2 { 49 | err, sz = ToAndFromProto(uids) 50 | } 51 | if err != nil { 52 | b.Error(err) 53 | b.Fail() 54 | } 55 | if max < sz { 56 | max = sz 57 | } 58 | // runtime.GC() -- Actually makes FB looks worse. 59 | } 60 | }) 61 | } 62 | } 63 | } 64 | 65 | func TestToAndFrom(t *testing.T) { 66 | ar := []int{10, 100, 1000, 10000, 100000, 1000000, 10000000} 67 | for _, k := range ar { 68 | fmt.Println() 69 | uids := make([]uint64, k) 70 | for i := 0; i < k; i++ { 71 | uids[i] = uint64(rand.Int63()) 72 | } 73 | var err error 74 | var sz int 75 | err, sz = ToAndFromFlat(uids) 76 | if err != nil { 77 | t.Error(err) 78 | t.Fail() 79 | } 80 | fmt.Printf("Flatb k:%d sz:%d\n", k, sz) 81 | 82 | err, sz = ToAndFromProtoAlt(uids) 83 | if err != nil { 84 | t.Error(err) 85 | t.Fail() 86 | } 87 | fmt.Printf("Fixed k:%d sz:%d\n", k, sz) 88 | 89 | err, sz = ToAndFromProto(uids) 90 | if err != nil { 91 | t.Error(err) 92 | t.Fail() 93 | } 94 | fmt.Printf("Proto k:%d sz:%d\n", k, sz) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /flats/fuids.fb: -------------------------------------------------------------------------------- 1 | namespace fuids; 2 | 3 | table UidList { 4 | uids:[ulong]; 5 | } 6 | -------------------------------------------------------------------------------- /flats/fuids/UidList.go: -------------------------------------------------------------------------------- 1 | // automatically generated by the FlatBuffers compiler, do not modify 2 | 3 | package fuids 4 | 5 | import ( 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | type UidList struct { 10 | _tab flatbuffers.Table 11 | } 12 | 13 | func GetRootAsUidList(buf []byte, offset flatbuffers.UOffsetT) *UidList { 14 | n := flatbuffers.GetUOffsetT(buf[offset:]) 15 | x := &UidList{} 16 | x.Init(buf, n+offset) 17 | return x 18 | } 19 | 20 | func (rcv *UidList) Init(buf []byte, i flatbuffers.UOffsetT) { 21 | rcv._tab.Bytes = buf 22 | rcv._tab.Pos = i 23 | } 24 | 25 | func (rcv *UidList) Uids(j int) uint64 { 26 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 27 | if o != 0 { 28 | a := rcv._tab.Vector(o) 29 | return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) 30 | } 31 | return 0 32 | } 33 | 34 | func (rcv *UidList) UidsLength() int { 35 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 36 | if o != 0 { 37 | return rcv._tab.VectorLen(o) 38 | } 39 | return 0 40 | } 41 | 42 | func UidListStart(builder *flatbuffers.Builder) { 43 | builder.StartObject(1) 44 | } 45 | func UidListAddUids(builder *flatbuffers.Builder, uids flatbuffers.UOffsetT) { 46 | builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(uids), 0) 47 | } 48 | func UidListStartUidsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 49 | return builder.StartVector(8, numElems, 8) 50 | } 51 | func UidListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { 52 | return builder.EndObject() 53 | } 54 | -------------------------------------------------------------------------------- /flats/uids.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package flats; 4 | 5 | message UidList { 6 | repeated uint64 uid = 1; 7 | } 8 | 9 | message UidListAlt { 10 | repeated fixed64 uid = 1 [packed=true]; 11 | } 12 | -------------------------------------------------------------------------------- /gob/README.md: -------------------------------------------------------------------------------- 1 | Benchmark was to test how much decline in performance we'd get for Gob.Encode, v/s directly writing our binary data to io.Writer. 2 | 3 | These are the results: 4 | 5 | ``` 6 | $ go test -bench . 7 | testing: warning: no tests to run 8 | PASS 9 | BenchmarkGobEncode_1K 200000 11388 ns/op 10 | BenchmarkGobEncode_1M 5000 253726 ns/op 11 | BenchmarkGobEncode_50M 100 18707543 ns/op 12 | BenchmarkEncode_1K 100000000 10.0 ns/op 13 | BenchmarkEncode_1M 200000000 9.84 ns/op 14 | BenchmarkEncode_50M 200000000 9.91 ns/op 15 | ok github.com/dgraph-io/experiments/gob 52.960s 16 | 17 | ``` 18 | -------------------------------------------------------------------------------- /gob/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/gob" 5 | "io" 6 | ) 7 | 8 | type Query struct { 9 | Data []byte 10 | } 11 | 12 | func GobEncode(q *Query, w io.Writer) error { 13 | enc := gob.NewEncoder(w) 14 | return enc.Encode(*q) 15 | } 16 | 17 | func Encode(data []byte, w io.Writer) error { 18 | _, err := w.Write(data) 19 | return err 20 | } 21 | -------------------------------------------------------------------------------- /gob/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/rand" 5 | "io/ioutil" 6 | "testing" 7 | ) 8 | 9 | func benchGobEncode(b *testing.B, sz int) { 10 | buf := make([]byte, sz) 11 | _, err := rand.Read(buf) 12 | if err != nil { 13 | b.Error(err) 14 | b.Fail() 15 | } 16 | q := new(Query) 17 | q.Data = buf 18 | b.ResetTimer() 19 | 20 | for i := 0; i < b.N; i++ { 21 | if err := GobEncode(q, ioutil.Discard); err != nil { 22 | b.Error(err) 23 | } 24 | } 25 | } 26 | 27 | func BenchmarkGobEncode_1K(b *testing.B) { benchGobEncode(b, 1000) } 28 | func BenchmarkGobEncode_1M(b *testing.B) { benchGobEncode(b, 1000000) } 29 | func BenchmarkGobEncode_50M(b *testing.B) { benchGobEncode(b, 50000000) } 30 | 31 | func benchEncode(b *testing.B, sz int) { 32 | buf := make([]byte, sz) 33 | _, err := rand.Read(buf) 34 | if err != nil { 35 | b.Error(err) 36 | b.Fail() 37 | } 38 | b.ResetTimer() 39 | 40 | for i := 0; i < b.N; i++ { 41 | if err := Encode(buf, ioutil.Discard); err != nil { 42 | b.Error(err) 43 | } 44 | } 45 | } 46 | 47 | func BenchmarkEncode_1K(b *testing.B) { benchEncode(b, 1000) } 48 | func BenchmarkEncode_1M(b *testing.B) { benchEncode(b, 1000000) } 49 | func BenchmarkEncode_50M(b *testing.B) { benchEncode(b, 50000000) } 50 | -------------------------------------------------------------------------------- /goraft/.gitignore: -------------------------------------------------------------------------------- 1 | /goraft 2 | -------------------------------------------------------------------------------- /goraft/node.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "time" 7 | 8 | "golang.org/x/net/context" 9 | 10 | "github.com/Sirupsen/logrus" 11 | "github.com/coreos/etcd/raft" 12 | "github.com/coreos/etcd/raft/raftpb" 13 | ) 14 | 15 | func save(rd raft.Ready, st *raft.MemoryStorage) error { 16 | if !raft.IsEmptyHardState(rd.HardState) { 17 | if err := st.SetHardState(rd.HardState); err != nil { 18 | return err 19 | } 20 | } 21 | 22 | if len(rd.Entries) > 0 { 23 | if err := st.Append(rd.Entries); err != nil { 24 | return err 25 | } 26 | } 27 | 28 | if !raft.IsEmptySnap(rd.Snapshot) { 29 | if err := st.ApplySnapshot(rd.Snapshot); err != nil { 30 | return err 31 | } 32 | } 33 | return nil 34 | } 35 | 36 | func startNode(id int, chans []chan raftpb.Message) { 37 | l := log.WithField("id", id) 38 | storage := raft.NewMemoryStorage() 39 | c := &raft.Config{ 40 | ID: uint64(id), 41 | ElectionTick: 10, 42 | HeartbeatTick: 1, 43 | Storage: storage, 44 | MaxSizePerMsg: 4096, 45 | MaxInflightMsgs: 256, 46 | } 47 | var peers []raft.Peer 48 | for i := 1; i <= 3; i++ { 49 | if id == i { 50 | continue 51 | } 52 | peer := raft.Peer{ID: uint64(i)} 53 | peers = append(peers, peer) 54 | } 55 | l.WithField("peers", peers).Debug("Peers") 56 | 57 | n := raft.StartNode(c, peers) 58 | tick := time.Tick(3 * time.Second) 59 | for count := 0; ; count++ { 60 | l.Debug("Waiting for something to happen") 61 | select { 62 | case <-tick: 63 | l.Debug("Got a tick") 64 | n.Tick() 65 | { 66 | r := 1 + rand.Intn(3) 67 | l.WithField("r", r).Debugf("Got rand value") 68 | if id == r { 69 | // I should send some data. 70 | data := fmt.Sprintf("This is me: %v at count %v", id, count) 71 | l.WithField("data", data).Debug("Proposing some data") 72 | ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) 73 | go n.Propose(ctx, []byte(data)) 74 | } 75 | } 76 | 77 | case rd := <-n.Ready(): 78 | l.Debug("Got ready") 79 | // Save initial state. 80 | if err := save(rd, storage); err != nil { 81 | log.WithField("error", err).Error("While saving") 82 | return 83 | } 84 | // Send all messages to other nodes. 85 | for i := 1; i <= 3; i++ { 86 | if id == i { 87 | continue 88 | } 89 | for _, msg := range rd.Messages { 90 | chans[i-1] <- msg 91 | } 92 | } 93 | // Apply Snapshot (already done) and CommittedEntries 94 | storage.Append(rd.CommittedEntries) // No config change in test. 95 | n.Advance() 96 | 97 | case msg := <-chans[id-1]: 98 | l.WithField("msg", msg.String()).Debug("GOT MESSAGE") 99 | ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) 100 | n.Step(ctx, msg) 101 | } 102 | } 103 | } 104 | 105 | var log = logrus.WithField("package", "goraft") 106 | 107 | func main() { 108 | logrus.SetLevel(logrus.DebugLevel) 109 | 110 | chans := make([]chan raftpb.Message, 3) 111 | for i := range chans { 112 | chans[i] = make(chan raftpb.Message, 100) 113 | } 114 | go startNode(1, chans) 115 | go startNode(2, chans) 116 | go startNode(3, chans) 117 | 118 | tick := time.Tick(60 * time.Second) 119 | <-tick 120 | log.Debug("DONE") 121 | } 122 | -------------------------------------------------------------------------------- /grpc/codec/codec.go: -------------------------------------------------------------------------------- 1 | package codec 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | 7 | "github.com/dgraph-io/experiments/grpc/fb" 8 | ) 9 | 10 | type Buffer struct{} 11 | 12 | func (cb *Buffer) Marshal(v interface{}) ([]byte, error) { 13 | fmt.Println("Marshal") 14 | p, ok := v.(*fb.Payload) 15 | if !ok { 16 | log.Fatal("Invalid type of struct") 17 | } 18 | return p.Data, nil 19 | } 20 | 21 | func (cb *Buffer) Unmarshal(data []byte, v interface{}) error { 22 | fmt.Println("Unmarshal") 23 | p, ok := v.(*fb.Payload) 24 | if !ok { 25 | log.Fatal("Invalid type of struct") 26 | } 27 | p.Data = data 28 | return nil 29 | } 30 | 31 | func (cb *Buffer) String() string { 32 | fmt.Println("String") 33 | return "codec.Buffer" 34 | } 35 | -------------------------------------------------------------------------------- /grpc/fb/fb.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. 2 | // source: fb.proto 3 | // DO NOT EDIT! 4 | 5 | /* 6 | Package fb is a generated protocol buffer package. 7 | 8 | It is generated from these files: 9 | fb.proto 10 | 11 | It has these top-level messages: 12 | Payload 13 | */ 14 | package fb 15 | 16 | import proto "github.com/golang/protobuf/proto" 17 | import fmt "fmt" 18 | import math "math" 19 | 20 | import ( 21 | context "golang.org/x/net/context" 22 | grpc "google.golang.org/grpc" 23 | ) 24 | 25 | // Reference imports to suppress errors if they are not otherwise used. 26 | var _ = proto.Marshal 27 | var _ = fmt.Errorf 28 | var _ = math.Inf 29 | 30 | // This is a compile-time assertion to ensure that this generated file 31 | // is compatible with the proto package it is being compiled against. 32 | // A compilation error at this line likely means your copy of the 33 | // proto package needs to be updated. 34 | const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package 35 | 36 | type Payload struct { 37 | Data []byte `protobuf:"bytes,1,opt,name=Data,json=data,proto3" json:"Data,omitempty"` 38 | } 39 | 40 | func (m *Payload) Reset() { *m = Payload{} } 41 | func (m *Payload) String() string { return proto.CompactTextString(m) } 42 | func (*Payload) ProtoMessage() {} 43 | func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 44 | 45 | func init() { 46 | proto.RegisterType((*Payload)(nil), "fb.Payload") 47 | } 48 | 49 | // Reference imports to suppress errors if they are not otherwise used. 50 | var _ context.Context 51 | var _ grpc.ClientConn 52 | 53 | // This is a compile-time assertion to ensure that this generated file 54 | // is compatible with the grpc package it is being compiled against. 55 | const _ = grpc.SupportPackageIsVersion2 56 | 57 | // Client API for Worker service 58 | 59 | type WorkerClient interface { 60 | Hello(ctx context.Context, in *Payload, opts ...grpc.CallOption) (*Payload, error) 61 | } 62 | 63 | type workerClient struct { 64 | cc *grpc.ClientConn 65 | } 66 | 67 | func NewWorkerClient(cc *grpc.ClientConn) WorkerClient { 68 | return &workerClient{cc} 69 | } 70 | 71 | func (c *workerClient) Hello(ctx context.Context, in *Payload, opts ...grpc.CallOption) (*Payload, error) { 72 | out := new(Payload) 73 | err := grpc.Invoke(ctx, "/fb.Worker/Hello", in, out, c.cc, opts...) 74 | if err != nil { 75 | return nil, err 76 | } 77 | return out, nil 78 | } 79 | 80 | // Server API for Worker service 81 | 82 | type WorkerServer interface { 83 | Hello(context.Context, *Payload) (*Payload, error) 84 | } 85 | 86 | func RegisterWorkerServer(s *grpc.Server, srv WorkerServer) { 87 | s.RegisterService(&_Worker_serviceDesc, srv) 88 | } 89 | 90 | func _Worker_Hello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 91 | in := new(Payload) 92 | if err := dec(in); err != nil { 93 | return nil, err 94 | } 95 | if interceptor == nil { 96 | return srv.(WorkerServer).Hello(ctx, in) 97 | } 98 | info := &grpc.UnaryServerInfo{ 99 | Server: srv, 100 | FullMethod: "/fb.Worker/Hello", 101 | } 102 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 103 | return srv.(WorkerServer).Hello(ctx, req.(*Payload)) 104 | } 105 | return interceptor(ctx, in, info, handler) 106 | } 107 | 108 | var _Worker_serviceDesc = grpc.ServiceDesc{ 109 | ServiceName: "fb.Worker", 110 | HandlerType: (*WorkerServer)(nil), 111 | Methods: []grpc.MethodDesc{ 112 | { 113 | MethodName: "Hello", 114 | Handler: _Worker_Hello_Handler, 115 | }, 116 | }, 117 | Streams: []grpc.StreamDesc{}, 118 | } 119 | 120 | var fileDescriptor0 = []byte{ 121 | // 104 bytes of a gzipped FileDescriptorProto 122 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x48, 0x4b, 0xd2, 0x2b, 123 | 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4a, 0x4b, 0x52, 0x92, 0xe5, 0x62, 0x0f, 0x48, 0xac, 0xcc, 124 | 0xc9, 0x4f, 0x4c, 0x11, 0x12, 0xe2, 0x62, 0x71, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x54, 0x60, 0xd4, 125 | 0xe0, 0x09, 0x62, 0x49, 0x01, 0xb2, 0x8d, 0x74, 0xb9, 0xd8, 0xc2, 0xf3, 0x8b, 0xb2, 0x53, 0x8b, 126 | 0x84, 0x94, 0xb9, 0x58, 0x3d, 0x52, 0x73, 0x72, 0xf2, 0x85, 0xb8, 0xf5, 0x80, 0x06, 0x40, 0xf5, 127 | 0x48, 0x21, 0x73, 0x94, 0x18, 0x92, 0xd8, 0xc0, 0x06, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 128 | 0x81, 0xdc, 0x6e, 0x22, 0x64, 0x00, 0x00, 0x00, 129 | } 130 | -------------------------------------------------------------------------------- /grpc/fb/fb.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package fb; 4 | 5 | message Payload { 6 | bytes Data = 1; 7 | } 8 | 9 | service Worker { 10 | rpc Hello (Payload) returns (Payload) {} 11 | } 12 | -------------------------------------------------------------------------------- /grpc/fbclient/fbclient: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/grpc/fbclient/fbclient -------------------------------------------------------------------------------- /grpc/fbclient/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Copyright 2015, Google Inc. 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions are 8 | * met: 9 | * 10 | * * Redistributions of source code must retain the above copyright 11 | * notice, this list of conditions and the following disclaimer. 12 | * * Redistributions in binary form must reproduce the above 13 | * copyright notice, this list of conditions and the following disclaimer 14 | * in the documentation and/or other materials provided with the 15 | * distribution. 16 | * * Neither the name of Google Inc. nor the names of its 17 | * contributors may be used to endorse or promote products derived from 18 | * this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | * 32 | */ 33 | 34 | package main 35 | 36 | import ( 37 | "log" 38 | 39 | "github.com/dgraph-io/experiments/grpc/codec" 40 | "github.com/dgraph-io/experiments/grpc/fb" 41 | "golang.org/x/net/context" 42 | "google.golang.org/grpc" 43 | ) 44 | 45 | const ( 46 | address = "localhost:50051" 47 | ) 48 | 49 | func main() { 50 | // Set up a connection to the server. 51 | conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithCodec(&codec.Buffer{})) 52 | if err != nil { 53 | log.Fatalf("did not connect: %v", err) 54 | } 55 | defer conn.Close() 56 | c := fb.NewWorkerClient(conn) 57 | 58 | // Contact the server and print out its response. 59 | r, err := c.Hello(context.Background(), &fb.Payload{Data: []byte("Client")}) 60 | if err != nil { 61 | log.Fatalf("could not greet: %v", err) 62 | } 63 | log.Printf("Greeting: %s", string(r.Data)) 64 | } 65 | -------------------------------------------------------------------------------- /grpc/fbserver/fbserver: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/grpc/fbserver/fbserver -------------------------------------------------------------------------------- /grpc/fbserver/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Copyright 2015, Google Inc. 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions are 8 | * met: 9 | * 10 | * * Redistributions of source code must retain the above copyright 11 | * notice, this list of conditions and the following disclaimer. 12 | * * Redistributions in binary form must reproduce the above 13 | * copyright notice, this list of conditions and the following disclaimer 14 | * in the documentation and/or other materials provided with the 15 | * distribution. 16 | * * Neither the name of Google Inc. nor the names of its 17 | * contributors may be used to endorse or promote products derived from 18 | * this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | * 32 | */ 33 | 34 | package main 35 | 36 | import ( 37 | "log" 38 | "net" 39 | 40 | "github.com/dgraph-io/experiments/grpc/codec" 41 | "github.com/dgraph-io/experiments/grpc/fb" 42 | 43 | "golang.org/x/net/context" 44 | "google.golang.org/grpc" 45 | ) 46 | 47 | const ( 48 | port = ":50051" 49 | ) 50 | 51 | // server is used to implement helloworld.GreeterServer. 52 | type server struct{} 53 | 54 | // SayHello implements helloworld.GreeterServer 55 | func (s *server) Hello(ctx context.Context, in *fb.Payload) (*fb.Payload, error) { 56 | b := []byte("Hello ") 57 | b = append(b, in.Data...) 58 | return &fb.Payload{Data: b}, nil 59 | } 60 | 61 | func main() { 62 | lis, err := net.Listen("tcp", port) 63 | if err != nil { 64 | log.Fatalf("failed to listen: %v", err) 65 | } 66 | s := grpc.NewServer(grpc.CustomCodec(&codec.Buffer{})) 67 | fb.RegisterWorkerServer(s, &server{}) 68 | s.Serve(lis) 69 | } 70 | -------------------------------------------------------------------------------- /icu/icudt57l.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/icu/icudt57l.dat -------------------------------------------------------------------------------- /intersects/gnuplot/100-0.00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/100-0.00.png -------------------------------------------------------------------------------- /intersects/gnuplot/100-0.80.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/100-0.80.png -------------------------------------------------------------------------------- /intersects/gnuplot/1000-0.00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/1000-0.00.png -------------------------------------------------------------------------------- /intersects/gnuplot/1000-0.80.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/1000-0.80.png -------------------------------------------------------------------------------- /intersects/gnuplot/10000-0.00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/10000-0.00.png -------------------------------------------------------------------------------- /intersects/gnuplot/10000-0.80.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/10000-0.80.png -------------------------------------------------------------------------------- /intersects/gnuplot/Bin.0.00.100.dat: -------------------------------------------------------------------------------- 1 | Bin.0.00.100 1 2177 2 | Bin.0.00.100 10 3619 3 | Bin.0.00.100 50 4496 4 | Bin.0.00.100 100 4744 5 | Bin.0.00.100 500 6148 6 | Bin.0.00.100 1000 7642 7 | Bin.0.00.100 10000 11102 8 | Bin.0.00.100 100000 15632 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/Bin.0.00.1000.dat: -------------------------------------------------------------------------------- 1 | Bin.0.00.1000 1 26022 2 | Bin.0.00.1000 10 68601 3 | Bin.0.00.1000 50 97607 4 | Bin.0.00.1000 100 116420 5 | Bin.0.00.1000 500 170243 6 | Bin.0.00.1000 1000 201288 7 | Bin.0.00.1000 10000 497427 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/Bin.0.00.10000.dat: -------------------------------------------------------------------------------- 1 | Bin.0.00.10000 1 439242 2 | Bin.0.00.10000 10 921371 3 | Bin.0.00.10000 50 1491498 4 | Bin.0.00.10000 100 1790656 5 | Bin.0.00.10000 500 3474005 6 | Bin.0.00.10000 1000 4669962 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/Bin.0.80.100.dat: -------------------------------------------------------------------------------- 1 | Bin.0.80.100 1 2057 2 | Bin.0.80.100 10 3104 3 | Bin.0.80.100 50 3670 4 | Bin.0.80.100 100 4018 5 | Bin.0.80.100 500 5191 6 | Bin.0.80.100 1000 5955 7 | Bin.0.80.100 10000 9627 8 | Bin.0.80.100 100000 12621 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/Bin.0.80.1000.dat: -------------------------------------------------------------------------------- 1 | Bin.0.80.1000 1 21128 2 | Bin.0.80.1000 10 50781 3 | Bin.0.80.1000 50 81000 4 | Bin.0.80.1000 100 139756 5 | Bin.0.80.1000 500 168377 6 | Bin.0.80.1000 1000 198255 7 | Bin.0.80.1000 10000 563448 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/Bin.0.80.10000.dat: -------------------------------------------------------------------------------- 1 | Bin.0.80.10000 1 317512 2 | Bin.0.80.10000 10 673818 3 | Bin.0.80.10000 50 1234523 4 | Bin.0.80.10000 100 1589341 5 | Bin.0.80.10000 500 3265703 6 | Bin.0.80.10000 1000 3627748 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/Cur.0.00.100.dat: -------------------------------------------------------------------------------- 1 | Cur.0.00.100 1 502 2 | Cur.0.00.100 10 1295 3 | Cur.0.00.100 50 4562 4 | Cur.0.00.100 100 2848 5 | Cur.0.00.100 500 10714 6 | Cur.0.00.100 1000 13110 7 | Cur.0.00.100 10000 17724 8 | Cur.0.00.100 100000 22210 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/Cur.0.00.1000.dat: -------------------------------------------------------------------------------- 1 | Cur.0.00.1000 1 5218 2 | Cur.0.00.1000 10 20874 3 | Cur.0.00.1000 50 46390 4 | Cur.0.00.1000 100 53533 5 | Cur.0.00.1000 500 220420 6 | Cur.0.00.1000 1000 246376 7 | Cur.0.00.1000 10000 400010 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/Cur.0.00.10000.dat: -------------------------------------------------------------------------------- 1 | Cur.0.00.10000 1 127870 2 | Cur.0.00.10000 10 232583 3 | Cur.0.00.10000 50 562843 4 | Cur.0.00.10000 100 865578 5 | Cur.0.00.10000 500 3612158 6 | Cur.0.00.10000 1000 4604797 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/Cur.0.80.100.dat: -------------------------------------------------------------------------------- 1 | Cur.0.80.100 1 249 2 | Cur.0.80.100 10 1048 3 | Cur.0.80.100 50 3864 4 | Cur.0.80.100 100 1917 5 | Cur.0.80.100 500 7240 6 | Cur.0.80.100 1000 8352 7 | Cur.0.80.100 10000 14017 8 | Cur.0.80.100 100000 17740 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/Cur.0.80.1000.dat: -------------------------------------------------------------------------------- 1 | Cur.0.80.1000 1 2862 2 | Cur.0.80.1000 10 15553 3 | Cur.0.80.1000 50 42520 4 | Cur.0.80.1000 100 44530 5 | Cur.0.80.1000 500 189259 6 | Cur.0.80.1000 1000 261181 7 | Cur.0.80.1000 10000 525353 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/Cur.0.80.10000.dat: -------------------------------------------------------------------------------- 1 | Cur.0.80.10000 1 41263 2 | Cur.0.80.10000 10 188755 3 | Cur.0.80.10000 50 484723 4 | Cur.0.80.10000 100 867529 5 | Cur.0.80.10000 500 4590360 6 | Cur.0.80.10000 1000 5043228 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/Two.0.00.100.dat: -------------------------------------------------------------------------------- 1 | Two.0.00.100 1 346 2 | Two.0.00.100 10 1558 3 | Two.0.00.100 50 2834 4 | Two.0.00.100 100 3416 5 | Two.0.00.100 500 9600 6 | Two.0.00.100 1000 14306 7 | Two.0.00.100 10000 14599 8 | Two.0.00.100 100000 18938 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/Two.0.00.1000.dat: -------------------------------------------------------------------------------- 1 | Two.0.00.1000 1 3664 2 | Two.0.00.1000 10 21383 3 | Two.0.00.1000 50 42757 4 | Two.0.00.1000 100 52907 5 | Two.0.00.1000 500 177488 6 | Two.0.00.1000 1000 194318 7 | Two.0.00.1000 10000 298290 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/Two.0.00.10000.dat: -------------------------------------------------------------------------------- 1 | Two.0.00.10000 1 142286 2 | Two.0.00.10000 10 284922 3 | Two.0.00.10000 50 691488 4 | Two.0.00.10000 100 769356 5 | Two.0.00.10000 500 2625058 6 | Two.0.00.10000 1000 3001428 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/Two.0.80.100.dat: -------------------------------------------------------------------------------- 1 | Two.0.80.100 1 294 2 | Two.0.80.100 10 1416 3 | Two.0.80.100 50 2967 4 | Two.0.80.100 100 4701 5 | Two.0.80.100 500 9850 6 | Two.0.80.100 1000 10711 7 | Two.0.80.100 10000 14331 8 | Two.0.80.100 100000 18746 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/Two.0.80.1000.dat: -------------------------------------------------------------------------------- 1 | Two.0.80.1000 1 3057 2 | Two.0.80.1000 10 21584 3 | Two.0.80.1000 50 44956 4 | Two.0.80.1000 100 56865 5 | Two.0.80.1000 500 204097 6 | Two.0.80.1000 1000 219306 7 | Two.0.80.1000 10000 377139 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/Two.0.80.10000.dat: -------------------------------------------------------------------------------- 1 | Two.0.80.10000 1 70605 2 | Two.0.80.10000 10 306465 3 | Two.0.80.10000 50 620454 4 | Two.0.80.10000 100 825185 5 | Two.0.80.10000 500 2901345 6 | Two.0.80.10000 1000 3006012 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/bench.data: -------------------------------------------------------------------------------- 1 | Cur.100 1 476 2 | Bin.100 1 2011 3 | Mer.100 1 353 4 | Two.100 1 344 5 | Cur.100 10 1373 6 | Bin.100 10 3706 7 | Mer.100 10 1225 8 | Two.100 10 1595 9 | Cur.100 50 4524 10 | Bin.100 50 4426 11 | Mer.100 50 4406 12 | Two.100 50 3059 13 | Cur.100 100 2746 14 | Bin.100 100 4781 15 | Mer.100 100 7443 16 | Two.100 100 4859 17 | Cur.100 500 12009 18 | Bin.100 500 6051 19 | Mer.100 500 30406 20 | Two.100 500 10071 21 | Cur.100 1000 14475 22 | Bin.100 1000 7068 23 | Mer.100 1000 59950 24 | Two.100 1000 11530 25 | Cur.100 10000 18895 26 | Bin.100 10000 11007 27 | Mer.100 10000 915484 28 | Two.100 10000 16127 29 | Cur.100 100000 25635 30 | Bin.100 100000 15110 31 | Mer.100 100000 7819191 32 | Two.100 100000 18710 33 | Cur.1000 1 5007 34 | Bin.1000 1 28835 35 | Mer.1000 1 3346 36 | Two.1000 1 3591 37 | Cur.1000 10 19222 38 | Bin.1000 10 64091 39 | Mer.1000 10 18252 40 | Two.1000 10 20959 41 | Cur.1000 50 45496 42 | Bin.1000 50 101820 43 | Mer.1000 50 46817 44 | Two.1000 50 43222 45 | Cur.1000 100 52070 46 | Bin.1000 100 113958 47 | Mer.1000 100 76589 48 | Two.1000 100 53603 49 | Cur.1000 500 253771 50 | Bin.1000 500 172471 51 | Mer.1000 500 400153 52 | Two.1000 500 209355 53 | Cur.1000 1000 287709 54 | Bin.1000 1000 245613 55 | Mer.1000 1000 1010848 56 | Two.1000 1000 266988 57 | Cur.1000 10000 359376 58 | Bin.1000 10000 438286 59 | Mer.1000 10000 8676965 60 | Two.1000 10000 261349 61 | Cur.10000 1 125745 62 | Bin.10000 1 402996 63 | Mer.10000 1 112426 64 | Two.10000 1 132861 65 | Cur.10000 10 236750 66 | Bin.10000 10 843713 67 | Mer.10000 10 221997 68 | Two.10000 10 280024 69 | Cur.10000 50 528143 70 | Bin.10000 50 1283781 71 | Mer.10000 50 499080 72 | Two.10000 50 585025 73 | Cur.10000 100 788320 74 | Bin.10000 100 1856169 75 | Mer.10000 100 848994 76 | Two.10000 100 773841 77 | Cur.10000 500 3776205 78 | Bin.10000 500 4064089 79 | Mer.10000 500 4734256 80 | Two.10000 500 2671240 81 | Cur.10000 1000 4343108 82 | Bin.10000 1000 4335445 83 | Mer.10000 1000 7766084 84 | Two.10000 1000 3127777 85 | Cur.100000 1 1307454 86 | Bin.100000 1 4130650 87 | Mer.100000 1 1200516 88 | Two.100000 1 1320702 89 | Cur.100000 10 1106473 90 | Bin.100000 10 9308957 91 | Mer.100000 10 1087623 92 | Two.100000 10 2960338 93 | Cur.100000 50 4493197 94 | Bin.100000 50 15564096 95 | Mer.100000 50 4233171 96 | Two.100000 50 6049663 97 | Cur.100000 100 11902295 98 | Bin.100000 100 21413719 99 | Mer.100000 100 9407519 100 | Two.100000 100 10227065 101 | -------------------------------------------------------------------------------- /intersects/gnuplot/convert.sh: -------------------------------------------------------------------------------- 1 | cat ./tues-afternoon.bench | awk '{if (length($0) > 0) { print $2"."$4,$8,$10}}' > /tmp/bench.data 2 | cat /tmp/bench.data | awk '{print > $1".dat"}' 3 | -------------------------------------------------------------------------------- /intersects/gnuplot/gnu.plot: -------------------------------------------------------------------------------- 1 | filelist = system("ls *0.80.100.dat") 2 | plot for [fname in filelist] fname using 2:3 title fname with linespoints lw 2; 3 | set logscale x; 4 | pause -1 5 | 6 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Bin.100.dat: -------------------------------------------------------------------------------- 1 | Bin.100 1 2011 2 | Bin.100 10 3706 3 | Bin.100 50 4426 4 | Bin.100 100 4781 5 | Bin.100 500 6051 6 | Bin.100 1000 7068 7 | Bin.100 10000 11007 8 | Bin.100 100000 15110 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Bin.1000.dat: -------------------------------------------------------------------------------- 1 | Bin.1000 1 28835 2 | Bin.1000 10 64091 3 | Bin.1000 50 101820 4 | Bin.1000 100 113958 5 | Bin.1000 500 172471 6 | Bin.1000 1000 245613 7 | Bin.1000 10000 438286 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Bin.10000.dat: -------------------------------------------------------------------------------- 1 | Bin.10000 1 402996 2 | Bin.10000 10 843713 3 | Bin.10000 50 1283781 4 | Bin.10000 100 1856169 5 | Bin.10000 500 4064089 6 | Bin.10000 1000 4335445 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Bin.100000.dat: -------------------------------------------------------------------------------- 1 | Bin.100000 1 4130650 2 | Bin.100000 10 9308957 3 | Bin.100000 50 15564096 4 | Bin.100000 100 21413719 5 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Mer.100.dat: -------------------------------------------------------------------------------- 1 | Mer.100 1 353 2 | Mer.100 10 1225 3 | Mer.100 50 4406 4 | Mer.100 100 7443 5 | Mer.100 500 30406 6 | Mer.100 1000 59950 7 | Mer.100 10000 915484 8 | Mer.100 100000 7819191 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Mer.1000.dat: -------------------------------------------------------------------------------- 1 | Mer.1000 1 3346 2 | Mer.1000 10 18252 3 | Mer.1000 50 46817 4 | Mer.1000 100 76589 5 | Mer.1000 500 400153 6 | Mer.1000 1000 1010848 7 | Mer.1000 10000 8676965 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Mer.10000.dat: -------------------------------------------------------------------------------- 1 | Mer.10000 1 112426 2 | Mer.10000 10 221997 3 | Mer.10000 50 499080 4 | Mer.10000 100 848994 5 | Mer.10000 500 4734256 6 | Mer.10000 1000 7766084 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Mer.100000.dat: -------------------------------------------------------------------------------- 1 | Mer.100000 1 1200516 2 | Mer.100000 10 1087623 3 | Mer.100000 50 4233171 4 | Mer.100000 100 9407519 5 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Two.100.dat: -------------------------------------------------------------------------------- 1 | Two.100 1 344 2 | Two.100 10 1595 3 | Two.100 50 3059 4 | Two.100 100 4859 5 | Two.100 500 10071 6 | Two.100 1000 11530 7 | Two.100 10000 16127 8 | Two.100 100000 18710 9 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Two.1000.dat: -------------------------------------------------------------------------------- 1 | Two.1000 1 3591 2 | Two.1000 10 20959 3 | Two.1000 50 43222 4 | Two.1000 100 53603 5 | Two.1000 500 209355 6 | Two.1000 1000 266988 7 | Two.1000 10000 261349 8 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Two.10000.dat: -------------------------------------------------------------------------------- 1 | Two.10000 1 132861 2 | Two.10000 10 280024 3 | Two.10000 50 585025 4 | Two.10000 100 773841 5 | Two.10000 500 2671240 6 | Two.10000 1000 3127777 7 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/Two.100000.dat: -------------------------------------------------------------------------------- 1 | Two.100000 1 1320702 2 | Two.100000 10 2960338 3 | Two.100000 50 6049663 4 | Two.100000 100 10227065 5 | -------------------------------------------------------------------------------- /intersects/gnuplot/prev/sz-hundred.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/prev/sz-hundred.png -------------------------------------------------------------------------------- /intersects/gnuplot/prev/sz-tenthousand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/prev/sz-tenthousand.png -------------------------------------------------------------------------------- /intersects/gnuplot/prev/sz-thousand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/intersects/gnuplot/prev/sz-thousand.png -------------------------------------------------------------------------------- /intersects/gnuplot/size.overlap.dat: -------------------------------------------------------------------------------- 1 | size.overlap 3000000 ns/op 2 | size.overlap 500000 ns/op 3 | size.overlap 3000000 ns/op 4 | size.overlap 1000000 ns/op 5 | size.overlap 500000 ns/op 6 | size.overlap 1000000 ns/op 7 | size.overlap 300000 ns/op 8 | size.overlap 300000 ns/op 9 | size.overlap 500000 ns/op 10 | size.overlap 500000 ns/op 11 | size.overlap 300000 ns/op 12 | size.overlap 500000 ns/op 13 | size.overlap 200000 ns/op 14 | size.overlap 200000 ns/op 15 | size.overlap 200000 ns/op 16 | size.overlap 100000 ns/op 17 | size.overlap 200000 ns/op 18 | size.overlap 100000 ns/op 19 | size.overlap 100000 ns/op 20 | size.overlap 200000 ns/op 21 | size.overlap 100000 ns/op 22 | size.overlap 50000 ns/op 23 | size.overlap 100000 ns/op 24 | size.overlap 100000 ns/op 25 | size.overlap 300000 ns/op 26 | size.overlap 50000 ns/op 27 | size.overlap 500000 ns/op 28 | size.overlap 100000 ns/op 29 | size.overlap 20000 ns/op 30 | size.overlap 100000 ns/op 31 | size.overlap 30000 ns/op 32 | size.overlap 20000 ns/op 33 | size.overlap 30000 ns/op 34 | size.overlap 30000 ns/op 35 | size.overlap 10000 ns/op 36 | size.overlap 30000 ns/op 37 | size.overlap 10000 ns/op 38 | size.overlap 10000 ns/op 39 | size.overlap 10000 ns/op 40 | size.overlap 5000 ns/op 41 | size.overlap 10000 ns/op 42 | size.overlap 10000 ns/op 43 | size.overlap 5000 ns/op 44 | size.overlap 3000 ns/op 45 | size.overlap 5000 ns/op 46 | size.overlap 10000 ns/op 47 | size.overlap 3000 ns/op 48 | size.overlap 10000 ns/op 49 | size.overlap 10000 ns/op 50 | size.overlap 2000 ns/op 51 | size.overlap 5000 ns/op 52 | size.overlap 3000 ns/op 53 | size.overlap 1000 ns/op 54 | size.overlap 2000 ns/op 55 | size.overlap 2000 ns/op 56 | size.overlap 1000 ns/op 57 | size.overlap 2000 ns/op 58 | size.overlap 500 ns/op 59 | size.overlap 500 ns/op 60 | size.overlap 500 ns/op 61 | size.overlap 300 ns/op 62 | size.overlap 300 ns/op 63 | size.overlap 500 ns/op 64 | size.overlap 5000000 ns/op 65 | size.overlap 1000000 ns/op 66 | size.overlap 5000000 ns/op 67 | size.overlap 1000000 ns/op 68 | size.overlap 500000 ns/op 69 | size.overlap 1000000 ns/op 70 | size.overlap 500000 ns/op 71 | size.overlap 300000 ns/op 72 | size.overlap 500000 ns/op 73 | size.overlap 1000000 ns/op 74 | size.overlap 300000 ns/op 75 | size.overlap 300000 ns/op 76 | size.overlap 200000 ns/op 77 | size.overlap 300000 ns/op 78 | size.overlap 200000 ns/op 79 | size.overlap 200000 ns/op 80 | size.overlap 200000 ns/op 81 | size.overlap 200000 ns/op 82 | size.overlap 100000 ns/op 83 | size.overlap 200000 ns/op 84 | size.overlap 100000 ns/op 85 | size.overlap 100000 ns/op 86 | size.overlap 100000 ns/op 87 | size.overlap 100000 ns/op 88 | size.overlap 500000 ns/op 89 | size.overlap 100000 ns/op 90 | size.overlap 500000 ns/op 91 | size.overlap 100000 ns/op 92 | size.overlap 30000 ns/op 93 | size.overlap 100000 ns/op 94 | size.overlap 30000 ns/op 95 | size.overlap 20000 ns/op 96 | size.overlap 30000 ns/op 97 | size.overlap 30000 ns/op 98 | size.overlap 10000 ns/op 99 | size.overlap 20000 ns/op 100 | size.overlap 10000 ns/op 101 | size.overlap 10000 ns/op 102 | size.overlap 10000 ns/op 103 | size.overlap 10000 ns/op 104 | size.overlap 10000 ns/op 105 | size.overlap 10000 ns/op 106 | size.overlap 2000 ns/op 107 | size.overlap 2000 ns/op 108 | size.overlap 3000 ns/op 109 | size.overlap 50000 ns/op 110 | size.overlap 5000 ns/op 111 | size.overlap 20000 ns/op 112 | size.overlap 10000 ns/op 113 | size.overlap 2000 ns/op 114 | size.overlap 5000 ns/op 115 | size.overlap 3000 ns/op 116 | size.overlap 1000 ns/op 117 | size.overlap 3000 ns/op 118 | size.overlap 2000 ns/op 119 | size.overlap 1000 ns/op 120 | size.overlap 2000 ns/op 121 | size.overlap 300 ns/op 122 | size.overlap 500 ns/op 123 | size.overlap 500 ns/op 124 | size.overlap 300 ns/op 125 | size.overlap 500 ns/op 126 | size.overlap 500 ns/op 127 | -------------------------------------------------------------------------------- /intersects/gnuplot/sz100.txt: -------------------------------------------------------------------------------- 1 | BenchmarkListIntersect/ Cur size 100 overlap 0.00 ratio 1 3000000 476 ns/op 2 | BenchmarkListIntersect/ Bin size 100 overlap 0.00 ratio 1 1000000 2011 ns/op 3 | BenchmarkListIntersect/ Mer size 100 overlap 0.00 ratio 1 5000000 353 ns/op 4 | BenchmarkListIntersect/ Two size 100 overlap 0.00 ratio 1 5000000 344 ns/op 5 | 6 | BenchmarkListIntersect/ Cur size 100 overlap 0.00 ratio 10 1000000 1373 ns/op 7 | BenchmarkListIntersect/ Bin size 100 overlap 0.00 ratio 10 300000 3706 ns/op 8 | BenchmarkListIntersect/ Mer size 100 overlap 0.00 ratio 10 1000000 1225 ns/op 9 | BenchmarkListIntersect/ Two size 100 overlap 0.00 ratio 10 1000000 1595 ns/op 10 | 11 | BenchmarkListIntersect/ Cur size 100 overlap 0.00 ratio 50 300000 4524 ns/op 12 | BenchmarkListIntersect/ Bin size 100 overlap 0.00 ratio 50 300000 4426 ns/op 13 | BenchmarkListIntersect/ Mer size 100 overlap 0.00 ratio 50 300000 4406 ns/op 14 | BenchmarkListIntersect/ Two size 100 overlap 0.00 ratio 50 500000 3059 ns/op 15 | 16 | BenchmarkListIntersect/ Cur size 100 overlap 0.00 ratio 100 500000 2746 ns/op 17 | BenchmarkListIntersect/ Bin size 100 overlap 0.00 ratio 100 300000 4781 ns/op 18 | BenchmarkListIntersect/ Mer size 100 overlap 0.00 ratio 100 200000 7443 ns/op 19 | BenchmarkListIntersect/ Two size 100 overlap 0.00 ratio 100 300000 4859 ns/op 20 | 21 | BenchmarkListIntersect/ Cur size 100 overlap 0.00 ratio 500 100000 12009 ns/op 22 | BenchmarkListIntersect/ Bin size 100 overlap 0.00 ratio 500 200000 6051 ns/op 23 | BenchmarkListIntersect/ Mer size 100 overlap 0.00 ratio 500 50000 30406 ns/op 24 | BenchmarkListIntersect/ Two size 100 overlap 0.00 ratio 500 200000 10071 ns/op 25 | 26 | BenchmarkListIntersect/ Cur size 100 overlap 0.00 ratio 1000 100000 14475 ns/op 27 | BenchmarkListIntersect/ Bin size 100 overlap 0.00 ratio 1000 200000 7068 ns/op 28 | BenchmarkListIntersect/ Mer size 100 overlap 0.00 ratio 1000 30000 59950 ns/op 29 | BenchmarkListIntersect/ Two size 100 overlap 0.00 ratio 1000 200000 11530 ns/op 30 | 31 | BenchmarkListIntersect/ Cur size 100 overlap 0.00 ratio 10000 100000 18895 ns/op 32 | BenchmarkListIntersect/ Bin size 100 overlap 0.00 ratio 10000 200000 11007 ns/op 33 | BenchmarkListIntersect/ Mer size 100 overlap 0.00 ratio 10000 2000 915484 ns/op 34 | BenchmarkListIntersect/ Two size 100 overlap 0.00 ratio 10000 100000 16127 ns/op 35 | 36 | BenchmarkListIntersect/ Cur size 100 overlap 0.00 ratio 100000 50000 25635 ns/op 37 | BenchmarkListIntersect/ Bin size 100 overlap 0.00 ratio 100000 100000 15110 ns/op 38 | BenchmarkListIntersect/ Mer size 100 overlap 0.00 ratio 100000 200 7819191 ns/op 39 | BenchmarkListIntersect/ Two size 100 overlap 0.00 ratio 100000 100000 18710 ns/op 40 | 41 | BenchmarkListIntersect/ Cur size 1000 overlap 0.00 ratio 1 300000 5007 ns/op 42 | BenchmarkListIntersect/ Bin size 1000 overlap 0.00 ratio 1 50000 28835 ns/op 43 | BenchmarkListIntersect/ Mer size 1000 overlap 0.00 ratio 1 500000 3346 ns/op 44 | BenchmarkListIntersect/ Two size 1000 overlap 0.00 ratio 1 300000 3591 ns/op 45 | 46 | BenchmarkListIntersect/ Cur size 1000 overlap 0.00 ratio 10 100000 19222 ns/op 47 | BenchmarkListIntersect/ Bin size 1000 overlap 0.00 ratio 10 20000 64091 ns/op 48 | BenchmarkListIntersect/ Mer size 1000 overlap 0.00 ratio 10 100000 18252 ns/op 49 | BenchmarkListIntersect/ Two size 1000 overlap 0.00 ratio 10 100000 20959 ns/op 50 | 51 | BenchmarkListIntersect/ Cur size 1000 overlap 0.00 ratio 50 30000 45496 ns/op 52 | BenchmarkListIntersect/ Bin size 1000 overlap 0.00 ratio 50 20000 101820 ns/op 53 | BenchmarkListIntersect/ Mer size 1000 overlap 0.00 ratio 50 30000 46817 ns/op 54 | BenchmarkListIntersect/ Two size 1000 overlap 0.00 ratio 50 30000 43222 ns/op 55 | 56 | BenchmarkListIntersect/ Cur size 1000 overlap 0.00 ratio 100 30000 52070 ns/op 57 | BenchmarkListIntersect/ Bin size 1000 overlap 0.00 ratio 100 10000 113958 ns/op 58 | BenchmarkListIntersect/ Mer size 1000 overlap 0.00 ratio 100 20000 76589 ns/op 59 | BenchmarkListIntersect/ Two size 1000 overlap 0.00 ratio 100 30000 53603 ns/op 60 | 61 | BenchmarkListIntersect/ Cur size 1000 overlap 0.00 ratio 500 10000 253771 ns/op 62 | BenchmarkListIntersect/ Bin size 1000 overlap 0.00 ratio 500 10000 172471 ns/op 63 | BenchmarkListIntersect/ Mer size 1000 overlap 0.00 ratio 500 5000 400153 ns/op 64 | BenchmarkListIntersect/ Two size 1000 overlap 0.00 ratio 500 10000 209355 ns/op 65 | 66 | BenchmarkListIntersect/ Cur size 1000 overlap 0.00 ratio 1000 5000 287709 ns/op 67 | BenchmarkListIntersect/ Bin size 1000 overlap 0.00 ratio 1000 5000 245613 ns/op 68 | BenchmarkListIntersect/ Mer size 1000 overlap 0.00 ratio 1000 2000 1010848 ns/op 69 | BenchmarkListIntersect/ Two size 1000 overlap 0.00 ratio 1000 5000 266988 ns/op 70 | 71 | BenchmarkListIntersect/ Cur size 1000 overlap 0.00 ratio 10000 5000 359376 ns/op 72 | BenchmarkListIntersect/ Bin size 1000 overlap 0.00 ratio 10000 3000 438286 ns/op 73 | BenchmarkListIntersect/ Mer size 1000 overlap 0.00 ratio 10000 200 8676965 ns/op 74 | BenchmarkListIntersect/ Two size 1000 overlap 0.00 ratio 10000 5000 261349 ns/op 75 | 76 | BenchmarkListIntersect/ Cur size 10000 overlap 0.00 ratio 1 10000 125745 ns/op 77 | BenchmarkListIntersect/ Bin size 10000 overlap 0.00 ratio 1 3000 402996 ns/op 78 | BenchmarkListIntersect/ Mer size 10000 overlap 0.00 ratio 1 10000 112426 ns/op 79 | BenchmarkListIntersect/ Two size 10000 overlap 0.00 ratio 1 10000 132861 ns/op 80 | 81 | BenchmarkListIntersect/ Cur size 10000 overlap 0.00 ratio 10 5000 236750 ns/op 82 | BenchmarkListIntersect/ Bin size 10000 overlap 0.00 ratio 10 2000 843713 ns/op 83 | BenchmarkListIntersect/ Mer size 10000 overlap 0.00 ratio 10 10000 221997 ns/op 84 | BenchmarkListIntersect/ Two size 10000 overlap 0.00 ratio 10 5000 280024 ns/op 85 | 86 | BenchmarkListIntersect/ Cur size 10000 overlap 0.00 ratio 50 3000 528143 ns/op 87 | BenchmarkListIntersect/ Bin size 10000 overlap 0.00 ratio 50 1000 1283781 ns/op 88 | BenchmarkListIntersect/ Mer size 10000 overlap 0.00 ratio 50 3000 499080 ns/op 89 | BenchmarkListIntersect/ Two size 10000 overlap 0.00 ratio 50 3000 585025 ns/op 90 | 91 | BenchmarkListIntersect/ Cur size 10000 overlap 0.00 ratio 100 2000 788320 ns/op 92 | BenchmarkListIntersect/ Bin size 10000 overlap 0.00 ratio 100 1000 1856169 ns/op 93 | BenchmarkListIntersect/ Mer size 10000 overlap 0.00 ratio 100 2000 848994 ns/op 94 | BenchmarkListIntersect/ Two size 10000 overlap 0.00 ratio 100 2000 773841 ns/op 95 | 96 | BenchmarkListIntersect/ Cur size 10000 overlap 0.00 ratio 500 500 3776205 ns/op 97 | BenchmarkListIntersect/ Bin size 10000 overlap 0.00 ratio 500 500 4064089 ns/op 98 | BenchmarkListIntersect/ Mer size 10000 overlap 0.00 ratio 500 300 4734256 ns/op 99 | BenchmarkListIntersect/ Two size 10000 overlap 0.00 ratio 500 500 2671240 ns/op 100 | 101 | BenchmarkListIntersect/ Cur size 10000 overlap 0.00 ratio 1000 300 4343108 ns/op 102 | BenchmarkListIntersect/ Bin size 10000 overlap 0.00 ratio 1000 300 4335445 ns/op 103 | BenchmarkListIntersect/ Mer size 10000 overlap 0.00 ratio 1000 200 7766084 ns/op 104 | BenchmarkListIntersect/ Two size 10000 overlap 0.00 ratio 1000 500 3127777 ns/op 105 | 106 | BenchmarkListIntersect/ Cur size 100000 overlap 0.00 ratio 1 1000 1307454 ns/op 107 | BenchmarkListIntersect/ Bin size 100000 overlap 0.00 ratio 1 300 4130650 ns/op 108 | BenchmarkListIntersect/ Mer size 100000 overlap 0.00 ratio 1 1000 1200516 ns/op 109 | BenchmarkListIntersect/ Two size 100000 overlap 0.00 ratio 1 1000 1320702 ns/op 110 | 111 | BenchmarkListIntersect/ Cur size 100000 overlap 0.00 ratio 10 2000 1106473 ns/op 112 | BenchmarkListIntersect/ Bin size 100000 overlap 0.00 ratio 10 200 9308957 ns/op 113 | BenchmarkListIntersect/ Mer size 100000 overlap 0.00 ratio 10 2000 1087623 ns/op 114 | BenchmarkListIntersect/ Two size 100000 overlap 0.00 ratio 10 500 2960338 ns/op 115 | 116 | BenchmarkListIntersect/ Cur size 100000 overlap 0.00 ratio 50 300 4493197 ns/op 117 | BenchmarkListIntersect/ Bin size 100000 overlap 0.00 ratio 50 100 15564096 ns/op 118 | BenchmarkListIntersect/ Mer size 100000 overlap 0.00 ratio 50 300 4233171 ns/op 119 | BenchmarkListIntersect/ Two size 100000 overlap 0.00 ratio 50 200 6049663 ns/op 120 | 121 | BenchmarkListIntersect/ Cur size 100000 overlap 0.00 ratio 100 200 11902295 ns/op 122 | BenchmarkListIntersect/ Bin size 100000 overlap 0.00 ratio 100 100 21413719 ns/op 123 | BenchmarkListIntersect/ Mer size 100000 overlap 0.00 ratio 100 200 9407519 ns/op 124 | BenchmarkListIntersect/ Two size 100000 overlap 0.00 ratio 100 100 10227065 ns/op 125 | 126 | -------------------------------------------------------------------------------- /intersects/gnuplot/tues-afternoon.bench: -------------------------------------------------------------------------------- 1 | Cur size 100 overlap 0.00 ratio 1 3000000 502 ns/op 2 | Bin size 100 overlap 0.00 ratio 1 500000 2177 ns/op 3 | Two size 100 overlap 0.00 ratio 1 3000000 346 ns/op 4 | 5 | Cur size 100 overlap 0.00 ratio 10 1000000 1295 ns/op 6 | Bin size 100 overlap 0.00 ratio 10 500000 3619 ns/op 7 | Two size 100 overlap 0.00 ratio 10 1000000 1558 ns/op 8 | 9 | Cur size 100 overlap 0.00 ratio 50 300000 4562 ns/op 10 | Bin size 100 overlap 0.00 ratio 50 300000 4496 ns/op 11 | Two size 100 overlap 0.00 ratio 50 500000 2834 ns/op 12 | 13 | Cur size 100 overlap 0.00 ratio 100 500000 2848 ns/op 14 | Bin size 100 overlap 0.00 ratio 100 300000 4744 ns/op 15 | Two size 100 overlap 0.00 ratio 100 500000 3416 ns/op 16 | 17 | Cur size 100 overlap 0.00 ratio 500 200000 10714 ns/op 18 | Bin size 100 overlap 0.00 ratio 500 200000 6148 ns/op 19 | Two size 100 overlap 0.00 ratio 500 200000 9600 ns/op 20 | 21 | Cur size 100 overlap 0.00 ratio 1000 100000 13110 ns/op 22 | Bin size 100 overlap 0.00 ratio 1000 200000 7642 ns/op 23 | Two size 100 overlap 0.00 ratio 1000 100000 14306 ns/op 24 | 25 | Cur size 100 overlap 0.00 ratio 10000 100000 17724 ns/op 26 | Bin size 100 overlap 0.00 ratio 10000 200000 11102 ns/op 27 | Two size 100 overlap 0.00 ratio 10000 100000 14599 ns/op 28 | 29 | Cur size 100 overlap 0.00 ratio 100000 50000 22210 ns/op 30 | Bin size 100 overlap 0.00 ratio 100000 100000 15632 ns/op 31 | Two size 100 overlap 0.00 ratio 100000 100000 18938 ns/op 32 | 33 | Cur size 1000 overlap 0.00 ratio 1 300000 5218 ns/op 34 | Bin size 1000 overlap 0.00 ratio 1 50000 26022 ns/op 35 | Two size 1000 overlap 0.00 ratio 1 500000 3664 ns/op 36 | 37 | Cur size 1000 overlap 0.00 ratio 10 100000 20874 ns/op 38 | Bin size 1000 overlap 0.00 ratio 10 20000 68601 ns/op 39 | Two size 1000 overlap 0.00 ratio 10 100000 21383 ns/op 40 | 41 | Cur size 1000 overlap 0.00 ratio 50 30000 46390 ns/op 42 | Bin size 1000 overlap 0.00 ratio 50 20000 97607 ns/op 43 | Two size 1000 overlap 0.00 ratio 50 30000 42757 ns/op 44 | 45 | Cur size 1000 overlap 0.00 ratio 100 30000 53533 ns/op 46 | Bin size 1000 overlap 0.00 ratio 100 10000 116420 ns/op 47 | Two size 1000 overlap 0.00 ratio 100 30000 52907 ns/op 48 | 49 | Cur size 1000 overlap 0.00 ratio 500 10000 220420 ns/op 50 | Bin size 1000 overlap 0.00 ratio 500 10000 170243 ns/op 51 | Two size 1000 overlap 0.00 ratio 500 10000 177488 ns/op 52 | 53 | Cur size 1000 overlap 0.00 ratio 1000 5000 246376 ns/op 54 | Bin size 1000 overlap 0.00 ratio 1000 10000 201288 ns/op 55 | Two size 1000 overlap 0.00 ratio 1000 10000 194318 ns/op 56 | 57 | Cur size 1000 overlap 0.00 ratio 10000 5000 400010 ns/op 58 | Bin size 1000 overlap 0.00 ratio 10000 3000 497427 ns/op 59 | Two size 1000 overlap 0.00 ratio 10000 5000 298290 ns/op 60 | 61 | Cur size 10000 overlap 0.00 ratio 1 10000 127870 ns/op 62 | Bin size 10000 overlap 0.00 ratio 1 3000 439242 ns/op 63 | Two size 10000 overlap 0.00 ratio 1 10000 142286 ns/op 64 | 65 | Cur size 10000 overlap 0.00 ratio 10 10000 232583 ns/op 66 | Bin size 10000 overlap 0.00 ratio 10 2000 921371 ns/op 67 | Two size 10000 overlap 0.00 ratio 10 5000 284922 ns/op 68 | 69 | Cur size 10000 overlap 0.00 ratio 50 3000 562843 ns/op 70 | Bin size 10000 overlap 0.00 ratio 50 1000 1491498 ns/op 71 | Two size 10000 overlap 0.00 ratio 50 2000 691488 ns/op 72 | 73 | Cur size 10000 overlap 0.00 ratio 100 2000 865578 ns/op 74 | Bin size 10000 overlap 0.00 ratio 100 1000 1790656 ns/op 75 | Two size 10000 overlap 0.00 ratio 100 2000 769356 ns/op 76 | 77 | Cur size 10000 overlap 0.00 ratio 500 500 3612158 ns/op 78 | Bin size 10000 overlap 0.00 ratio 500 500 3474005 ns/op 79 | Two size 10000 overlap 0.00 ratio 500 500 2625058 ns/op 80 | 81 | Cur size 10000 overlap 0.00 ratio 1000 300 4604797 ns/op 82 | Bin size 10000 overlap 0.00 ratio 1000 300 4669962 ns/op 83 | Two size 10000 overlap 0.00 ratio 1000 500 3001428 ns/op 84 | 85 | 86 | Cur size 100 overlap 0.80 ratio 1 5000000 249 ns/op 87 | Bin size 100 overlap 0.80 ratio 1 1000000 2057 ns/op 88 | Two size 100 overlap 0.80 ratio 1 5000000 294 ns/op 89 | 90 | Cur size 100 overlap 0.80 ratio 10 1000000 1048 ns/op 91 | Bin size 100 overlap 0.80 ratio 10 500000 3104 ns/op 92 | Two size 100 overlap 0.80 ratio 10 1000000 1416 ns/op 93 | 94 | Cur size 100 overlap 0.80 ratio 50 500000 3864 ns/op 95 | Bin size 100 overlap 0.80 ratio 50 300000 3670 ns/op 96 | Two size 100 overlap 0.80 ratio 50 500000 2967 ns/op 97 | 98 | Cur size 100 overlap 0.80 ratio 100 1000000 1917 ns/op 99 | Bin size 100 overlap 0.80 ratio 100 300000 4018 ns/op 100 | Two size 100 overlap 0.80 ratio 100 300000 4701 ns/op 101 | 102 | Cur size 100 overlap 0.80 ratio 500 200000 7240 ns/op 103 | Bin size 100 overlap 0.80 ratio 500 300000 5191 ns/op 104 | Two size 100 overlap 0.80 ratio 500 200000 9850 ns/op 105 | 106 | Cur size 100 overlap 0.80 ratio 1000 200000 8352 ns/op 107 | Bin size 100 overlap 0.80 ratio 1000 200000 5955 ns/op 108 | Two size 100 overlap 0.80 ratio 1000 200000 10711 ns/op 109 | 110 | Cur size 100 overlap 0.80 ratio 10000 100000 14017 ns/op 111 | Bin size 100 overlap 0.80 ratio 10000 200000 9627 ns/op 112 | Two size 100 overlap 0.80 ratio 10000 100000 14331 ns/op 113 | 114 | Cur size 100 overlap 0.80 ratio 100000 100000 17740 ns/op 115 | Bin size 100 overlap 0.80 ratio 100000 100000 12621 ns/op 116 | Two size 100 overlap 0.80 ratio 100000 100000 18746 ns/op 117 | 118 | Cur size 1000 overlap 0.80 ratio 1 500000 2862 ns/op 119 | Bin size 1000 overlap 0.80 ratio 1 100000 21128 ns/op 120 | Two size 1000 overlap 0.80 ratio 1 500000 3057 ns/op 121 | 122 | Cur size 1000 overlap 0.80 ratio 10 100000 15553 ns/op 123 | Bin size 1000 overlap 0.80 ratio 10 30000 50781 ns/op 124 | Two size 1000 overlap 0.80 ratio 10 100000 21584 ns/op 125 | 126 | Cur size 1000 overlap 0.80 ratio 50 30000 42520 ns/op 127 | Bin size 1000 overlap 0.80 ratio 50 20000 81000 ns/op 128 | Two size 1000 overlap 0.80 ratio 50 30000 44956 ns/op 129 | 130 | Cur size 1000 overlap 0.80 ratio 100 30000 44530 ns/op 131 | Bin size 1000 overlap 0.80 ratio 100 10000 139756 ns/op 132 | Two size 1000 overlap 0.80 ratio 100 20000 56865 ns/op 133 | 134 | Cur size 1000 overlap 0.80 ratio 500 10000 189259 ns/op 135 | Bin size 1000 overlap 0.80 ratio 500 10000 168377 ns/op 136 | Two size 1000 overlap 0.80 ratio 500 10000 204097 ns/op 137 | 138 | Cur size 1000 overlap 0.80 ratio 1000 10000 261181 ns/op 139 | Bin size 1000 overlap 0.80 ratio 1000 10000 198255 ns/op 140 | Two size 1000 overlap 0.80 ratio 1000 10000 219306 ns/op 141 | 142 | Cur size 1000 overlap 0.80 ratio 10000 2000 525353 ns/op 143 | Bin size 1000 overlap 0.80 ratio 10000 2000 563448 ns/op 144 | Two size 1000 overlap 0.80 ratio 10000 3000 377139 ns/op 145 | 146 | Cur size 10000 overlap 0.80 ratio 1 50000 41263 ns/op 147 | Bin size 10000 overlap 0.80 ratio 1 5000 317512 ns/op 148 | Two size 10000 overlap 0.80 ratio 1 20000 70605 ns/op 149 | 150 | Cur size 10000 overlap 0.80 ratio 10 10000 188755 ns/op 151 | Bin size 10000 overlap 0.80 ratio 10 2000 673818 ns/op 152 | Two size 10000 overlap 0.80 ratio 10 5000 306465 ns/op 153 | 154 | Cur size 10000 overlap 0.80 ratio 50 3000 484723 ns/op 155 | Bin size 10000 overlap 0.80 ratio 50 1000 1234523 ns/op 156 | Two size 10000 overlap 0.80 ratio 50 3000 620454 ns/op 157 | 158 | Cur size 10000 overlap 0.80 ratio 100 2000 867529 ns/op 159 | Bin size 10000 overlap 0.80 ratio 100 1000 1589341 ns/op 160 | Two size 10000 overlap 0.80 ratio 100 2000 825185 ns/op 161 | 162 | Cur size 10000 overlap 0.80 ratio 500 300 4590360 ns/op 163 | Bin size 10000 overlap 0.80 ratio 500 500 3265703 ns/op 164 | Two size 10000 overlap 0.80 ratio 500 500 2901345 ns/op 165 | 166 | Cur size 10000 overlap 0.80 ratio 1000 300 5043228 ns/op 167 | Bin size 10000 overlap 0.80 ratio 1000 500 3627748 ns/op 168 | Two size 10000 overlap 0.80 ratio 1000 500 3006012 ns/op 169 | 170 | -------------------------------------------------------------------------------- /intersects/list.go: -------------------------------------------------------------------------------- 1 | package intersect 2 | 3 | import "sort" 4 | 5 | func mergeIntersect(a, b []uint64, final *[]uint64) { 6 | ma, mb := len(a), len(b) 7 | i, j := 0, 0 8 | for i < ma && j < mb { 9 | if a[i] == b[j] { 10 | *final = append(*final, a[i]) 11 | i++ 12 | j++ 13 | 14 | } else if a[i] < b[j] { 15 | for i = i + 1; i < ma && a[i] < b[j]; i++ { 16 | } 17 | 18 | } else { 19 | for j = j + 1; j < mb && a[i] > b[j]; j++ { 20 | } 21 | } 22 | } 23 | } 24 | 25 | func twoLevelLinear(a, b *DeltaList, final *[]uint64) { 26 | if len(a.Uids) > len(b.Uids) { 27 | panic("this is wrong") 28 | } 29 | if len(a.Uids) == 0 || len(b.Uids) == 0 { 30 | return 31 | } 32 | 33 | var ea uint64 34 | bsize := int(b.BucketSize) 35 | var eb uint64 = b.Uids[0] 36 | 37 | bucketIdx := 0 38 | numb := len(b.Buckets) 39 | end := bsize 40 | endBucket := b.Buckets[0] 41 | var ai, bi int 42 | 43 | for ai < len(a.Uids) && bi < len(b.Uids) { 44 | ea = a.Uids[ai] 45 | 46 | // find the bucket 47 | for ; endBucket < ea && bucketIdx < numb; bucketIdx++ { 48 | bi = bsize * bucketIdx 49 | endBucket = b.Buckets[bucketIdx] 50 | 51 | end = bsize * (bucketIdx + 1) 52 | if bucketIdx == numb-1 { 53 | end = len(b.Uids) 54 | } 55 | } 56 | if ea > endBucket { 57 | break 58 | } 59 | 60 | // Iterate within the bucket. 61 | for ea <= endBucket && ai < len(a.Uids) && bi < end { 62 | ea = a.Uids[ai] 63 | eb = b.Uids[bi] 64 | if eb < ea { 65 | bi++ 66 | } else if eb > ea { 67 | ai++ 68 | } else { 69 | *final = append(*final, ea) 70 | ai++ 71 | bi++ 72 | } 73 | } 74 | } 75 | } 76 | 77 | func twoLevelBinary(a, b *DeltaList, final *[]uint64) { 78 | if len(a.Uids) > len(b.Uids) { 79 | panic("this is wrong") 80 | } 81 | if len(a.Uids) == 0 || len(b.Uids) == 0 { 82 | return 83 | } 84 | 85 | var ea, eb uint64 86 | eb = b.Uids[0] 87 | var bucketIdx int 88 | bsize := int(b.BucketSize) 89 | var ai, bi int 90 | end := len(b.Uids) 91 | //numb := len(b.Buckets) 92 | endBucket := b.Buckets[0] 93 | bucks := b.Buckets 94 | lastBuck := 0 95 | 96 | for ai < len(a.Uids) { 97 | ea = a.Uids[ai] 98 | 99 | if ea > endBucket { 100 | bucketIdx = sort.Search(len(bucks), func(i int) bool { 101 | return bucks[i] >= ea 102 | }) 103 | if bucketIdx == len(bucks) { 104 | return 105 | } 106 | endBucket = bucks[bucketIdx] 107 | bi = bsize * (lastBuck + bucketIdx) 108 | end = bsize * (lastBuck + bucketIdx + 1) 109 | if bucketIdx == len(bucks)-1 { 110 | end = len(b.Uids) 111 | } 112 | lastBuck += bucketIdx 113 | bucks = bucks[bucketIdx:] 114 | } 115 | if ea > endBucket { 116 | break 117 | } 118 | 119 | // LINEAR search here. 120 | for ea <= endBucket && ai < len(a.Uids) && bi < end { 121 | ea = a.Uids[ai] 122 | eb = b.Uids[bi] 123 | if eb < ea { 124 | bi++ 125 | } else if eb > ea { 126 | ai++ 127 | } else { 128 | *final = append(*final, ea) 129 | ai++ 130 | bi++ 131 | } 132 | } 133 | } 134 | } 135 | 136 | func BinIntersect(d, q []uint64, final *[]uint64) { 137 | ld := len(d) 138 | lq := len(q) 139 | if ld == 0 || lq == 0 || d[ld-1] < q[0] || q[lq-1] < d[0] { 140 | return 141 | } 142 | if ld < lq { 143 | panic("what") 144 | } 145 | 146 | val := d[0] 147 | minq := sort.Search(len(q), func(i int) bool { 148 | return q[i] >= val 149 | }) 150 | 151 | val = d[len(d)-1] 152 | maxq := sort.Search(len(q), func(i int) bool { 153 | return q[i] > val 154 | }) 155 | 156 | binIntersect(d, q[minq:maxq], final) 157 | } 158 | 159 | func binIntersect(d, q []uint64, final *[]uint64) { 160 | if len(d) == 0 || len(q) == 0 { 161 | return 162 | } 163 | midq := len(q) / 2 164 | qval := q[midq] 165 | midd := sort.Search(len(d), func(i int) bool { 166 | return d[i] >= qval 167 | }) 168 | 169 | dd := d[0:midd] 170 | qq := q[0:midq] 171 | if len(dd) > len(qq) { // D > Q 172 | binIntersect(dd, qq, final) 173 | } else { 174 | binIntersect(qq, dd, final) 175 | } 176 | 177 | if midd >= len(d) { 178 | return 179 | } 180 | if d[midd] == qval { 181 | *final = append(*final, qval) 182 | } else { 183 | midd -= 1 184 | } 185 | 186 | dd = d[midd+1:] 187 | qq = q[midq+1:] 188 | if len(dd) > len(qq) { // D > Q 189 | binIntersect(dd, qq, final) 190 | } else { 191 | binIntersect(qq, dd, final) 192 | } 193 | } 194 | 195 | func createBucketList(d []uint64, bucketSize int) []uint64 { 196 | buckets := make([]uint64, 0, len(d)/bucketSize+2) 197 | for i := bucketSize - 1; i < len(d); i += bucketSize { 198 | buckets = append(buckets, d[i]) 199 | } 200 | last := d[len(d)-1] 201 | if len(buckets) == 0 || buckets[len(buckets)-1] != last { 202 | buckets = append(buckets, last) 203 | } 204 | return buckets 205 | } 206 | 207 | func encodeDelta(d []uint64, bucketSize int) *DeltaList { 208 | l := new(DeltaList) 209 | if len(d) == 0 { 210 | return l 211 | } 212 | l.BucketSize = int32(bucketSize) 213 | 214 | var last uint64 215 | for i, cur := range d { 216 | if i%bucketSize == bucketSize-1 { // Store the max of the bucket. 217 | l.Buckets = append(l.Buckets, cur) 218 | } 219 | l.Uids = append(l.Uids, cur) 220 | last = cur 221 | } 222 | if len(l.Buckets) == 0 || l.Buckets[len(l.Buckets)-1] != last { 223 | l.Buckets = append(l.Buckets, last) 224 | } 225 | 226 | return l 227 | } 228 | 229 | func encodeFixed(d []uint64) *FixedList { 230 | f := new(FixedList) 231 | if len(d) == 0 { 232 | return f 233 | } 234 | for _, cur := range d { 235 | f.Uids = append(f.Uids, cur) 236 | } 237 | return f 238 | } 239 | -------------------------------------------------------------------------------- /intersects/list.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package intersect; 3 | 4 | message DeltaList { 5 | int32 bucket_size = 1; 6 | repeated fixed64 buckets = 2; 7 | repeated fixed64 uids = 3; 8 | } 9 | 10 | message FixedList { 11 | repeated fixed64 uids = 1; 12 | } 13 | -------------------------------------------------------------------------------- /intersects/list_test.go: -------------------------------------------------------------------------------- 1 | package intersect 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "math/rand" 7 | "sort" 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestUIDListIntersect2(t *testing.T) { 15 | u := []uint64{1, 2, 3} 16 | v := []uint64{3, 5} 17 | res := make([]uint64, 0, 3) 18 | BinIntersect(u, v, &res) 19 | require.Equal(t, []uint64{3}, res) 20 | } 21 | 22 | func intersect(a, b []uint64) []uint64 { 23 | m := make(map[uint64]struct{}) 24 | for _, i := range a { 25 | m[i] = struct{}{} 26 | } 27 | out := make([]uint64, 0, 100) 28 | for _, j := range b { 29 | if _, ok := m[j]; ok { 30 | out = append(out, j) 31 | } 32 | } 33 | return out 34 | } 35 | 36 | func createArray(sz int, limit int64) []uint64 { 37 | a := make([]uint64, sz) 38 | ma := make(map[uint64]struct{}) 39 | for i := 0; i < sz; i++ { 40 | for { 41 | ei := uint64(rand.Int63n(limit)) 42 | if _, ok := ma[ei]; !ok { 43 | a[i] = ei 44 | ma[ei] = struct{}{} 45 | break 46 | } 47 | } 48 | } 49 | sort.Slice(a, func(i, j int) bool { 50 | return a[i] < a[j] 51 | }) 52 | return a 53 | } 54 | 55 | func TestSize(t *testing.T) { 56 | rand.Seed(time.Now().UnixNano()) 57 | sz := 1 58 | for i := 0; i < 5; i++ { 59 | sz *= 10 60 | a := createArray(sz, math.MaxInt32) 61 | dl := encodeDelta(a, 32) 62 | dd, err := dl.Marshal() 63 | require.Nil(t, err) 64 | 65 | fl := encodeFixed(a) 66 | fd, err := fl.Marshal() 67 | require.Nil(t, err) 68 | fmt.Printf("Size=%d Size of delta: %v fixed: %v\n", sz, len(dd), len(fd)) 69 | 70 | mi := func(d []uint64) uint64 { 71 | var dm uint64 72 | for _, e := range d { 73 | if dm < e { 74 | dm = e 75 | } 76 | } 77 | return dm 78 | } 79 | dm := mi(dl.Uids) 80 | fm := mi(fl.Uids) 81 | fmt.Printf("Max delta: %v. Max fixed: %v. Bits delta: %v. Bits fixed: %v\n", 82 | dm, fm, math.Log2(float64(dm)), math.Log2(float64(fm))) 83 | fmt.Println() 84 | } 85 | } 86 | 87 | func TestTwoLevelIntersect(t *testing.T) { 88 | rand.Seed(time.Now().UnixNano()) 89 | a := createArray(100, 1000) 90 | da := encodeDelta(a, 3) 91 | b := createArray(200, 1000) 92 | db := encodeDelta(b, 3) 93 | fmt.Println("a=", da.Buckets, da.Uids) 94 | fmt.Println("b=", db.Buckets, db.Uids) 95 | 96 | fmt.Printf("a=%v\nb=%v\n", a, b) 97 | final := make([]uint64, 0, 100) 98 | twoLevelLinear(da, db, &final) 99 | 100 | exp := make([]uint64, 0, 100) 101 | mergeIntersect(a, b, &exp) 102 | fmt.Printf("exp=%v\n", exp) 103 | require.Equal(t, exp, final) 104 | 105 | final = final[:0] 106 | twoLevelBinary(da, db, &final) 107 | require.Equal(t, exp, final) 108 | } 109 | 110 | func TestIntersect(t *testing.T) { 111 | rand.Seed(time.Now().UnixNano()) 112 | a := createArray(100, 100) 113 | b := createArray(100, 100) 114 | 115 | res1 := make([]uint64, 0, 100) 116 | mergeIntersect(a, b, &res1) 117 | 118 | res2 := make([]uint64, 0, 100) 119 | binIntersect(a, b, &res2) 120 | 121 | //res3 := make([]uint64, 0, 100) 122 | //binIterative(a, b, &res3) 123 | 124 | exp := intersect(a, b) 125 | 126 | require.Equal(t, exp, res1, "merge not working") 127 | require.Equal(t, exp, res2, "binIntersect not working") 128 | //require.Equal(t, exp, res3, "binIterative not working") 129 | } 130 | 131 | func BenchmarkMarshal(b *testing.B) { 132 | rand.Seed(time.Now().UnixNano()) 133 | rs := []int{ /*1, 10, 50, 100,*/ 500, 1000, 10000, 100000, 1000000} 134 | for _, r := range rs { 135 | u := createArray(r, math.MaxInt32) 136 | 137 | var dsize, fsize int 138 | b.Run(fmt.Sprintf("delta-%d", r), 139 | func(b *testing.B) { 140 | var total time.Duration 141 | for i := 0; i < b.N; i++ { 142 | d := encodeDelta(u, 32) 143 | data, err := d.Marshal() 144 | if err != nil { 145 | b.Fatalf("Error: %v", err) 146 | } 147 | // Assuming 128 MB per sec. => 128 bytes per micro. 148 | dur := time.Duration(len(data) / 128) 149 | total += dur 150 | time.Sleep(dur * time.Microsecond) 151 | if i == 0 { 152 | dsize = len(data) 153 | } 154 | 155 | var out DeltaList 156 | if err := out.Unmarshal(data); err != nil { 157 | b.Fatalf("Error: %v", err) 158 | } 159 | } 160 | // fmt.Printf("Total sleep: %v\n", total) 161 | }) 162 | 163 | b.Run(fmt.Sprintf("fixed-%d", r), 164 | func(b *testing.B) { 165 | var total time.Duration 166 | for i := 0; i < b.N; i++ { 167 | d := encodeFixed(u) 168 | data, err := d.Marshal() 169 | if err != nil { 170 | b.Fatalf("Error: %v", err) 171 | } 172 | dur := time.Duration(len(data) / 128) 173 | total += dur 174 | // Assuming 128 MB per sec. => 128 bytes per micro. 175 | time.Sleep(dur * time.Microsecond) 176 | 177 | if i == 0 { 178 | fsize = len(data) 179 | } 180 | var out FixedList 181 | if err := out.Unmarshal(data); err != nil { 182 | b.Fatalf("Error: %v", err) 183 | } 184 | } 185 | // fmt.Printf("Total fixed sleep: %v\n", total) 186 | }) 187 | fmt.Printf("SIZE Delta: %v Fixed: %v\n", dsize, fsize) 188 | fmt.Println() 189 | } 190 | } 191 | 192 | func BenchmarkListIntersect(b *testing.B) { 193 | randomTests := func(sz int, overlap float64) { 194 | rs := []int{ /*1, 10, 50, 100,*/ 500, 1000, 10000, 100000, 1000000} 195 | 196 | for _, r := range rs { 197 | sz2 := sz * r 198 | if sz2 > 10000000 { 199 | break 200 | } 201 | limit := int64(float64(sz2) / overlap) 202 | 203 | u1 := createArray(sz, limit) 204 | d1 := encodeDelta(u1, 32) 205 | u2 := createArray(sz2, limit) 206 | d2 := encodeDelta(u2, 32) 207 | result := make([]uint64, 0, sz) 208 | 209 | /* 210 | u := &task.List{u1} 211 | v := &task.List{u2} 212 | ucopy := make([]uint64, len(u1), len(u1)) 213 | copy(ucopy, u1) 214 | 215 | b.Run(fmt.Sprintf(":Cur:size=%d:overlap=%.2f:ratio=%d:", sz, overlap, r), 216 | func(b *testing.B) { 217 | for k := 0; k < b.N; k++ { 218 | //u.Uids = u.Uids[:sz] 219 | //copy(u.Uids, ucopy) 220 | IntersectWith(u, v) 221 | } 222 | }) 223 | */ 224 | b.Run(fmt.Sprintf(":Bin:size=%d:overlap=%.2f:ratio=%d:", sz, overlap, r), 225 | func(b *testing.B) { 226 | for k := 0; k < b.N; k++ { 227 | BinIntersect(u2, u1, &result) 228 | result = result[:0] 229 | } 230 | }) 231 | /* 232 | b.Run(fmt.Sprintf(":Mer:size=%d:overlap=%.2f:ratio=%d", sz, overlap, r), 233 | func(b *testing.B) { 234 | for k := 0; k < b.N; k++ { 235 | mergeIntersect(u1, u2, &result) 236 | result = result[:0] 237 | } 238 | }) 239 | */ 240 | b.Run(fmt.Sprintf(":Two:size=%d:overlap=%.2f:ratio=%d", sz, overlap, r), 241 | func(b *testing.B) { 242 | var f func(a, b *DeltaList, final *[]uint64) 243 | if r < 500 { 244 | f = twoLevelLinear 245 | } else { 246 | f = twoLevelBinary 247 | } 248 | for k := 0; k < b.N; k++ { 249 | //_ = createBucketList(u1, 32) 250 | createBucketList(u2, 32) 251 | f(d1, d2, &result) 252 | result = result[:0] 253 | } 254 | }) 255 | fmt.Println() 256 | } 257 | } 258 | //randomTests(10, 0.01) 259 | //randomTests(100, 0.01) 260 | //randomTests(1000, 0.01) 261 | //randomTests(10000, 0.01) 262 | 263 | // Overlap has no effect on Bin numbers. 264 | overlaps := []float64{0.00001, 0.8} 265 | for _, overlap := range overlaps { 266 | // randomTests(10, overlap) 267 | // randomTests(100, overlap) 268 | randomTests(1000, overlap) 269 | randomTests(10000, overlap) 270 | fmt.Println() 271 | } 272 | 273 | //randomTests(10, 0.4) 274 | //randomTests(100, 0.4) 275 | //randomTests(1000, 0.4) 276 | //randomTests(10000, 0.4) 277 | //fmt.Println() 278 | 279 | //randomTests(10, 0.8) 280 | //randomTests(100, 0.8) 281 | //randomTests(1000, 0.8) 282 | //randomTests(10000, 0.8) 283 | //fmt.Println() 284 | } 285 | -------------------------------------------------------------------------------- /intersects/normallist.go: -------------------------------------------------------------------------------- 1 | package intersect 2 | 3 | import ( 4 | "sort" 5 | 6 | "github.com/dgraph-io/dgraph/task" 7 | ) 8 | 9 | func IntersectWith(u, v *task.List) { 10 | n := len(u.Uids) 11 | m := len(v.Uids) 12 | 13 | if n > m { 14 | n, m = m, n 15 | } 16 | if n == 0 { 17 | n += 1 18 | } 19 | // Select appropriate function based on heuristics. 20 | ratio := float64(m) / float64(n) 21 | if ratio < 100 { 22 | IntersectWithLin(u, v) 23 | } else if ratio < 500 { 24 | IntersectWithJump(u, v) 25 | } else { 26 | IntersectWithBin(u, v) 27 | } 28 | } 29 | 30 | // IntersectWith intersects u with v. The update is made to u. 31 | // u, v should be sorted. 32 | func IntersectWithLin(u, v *task.List) { 33 | out := u.Uids[:0] 34 | n := len(u.Uids) 35 | m := len(v.Uids) 36 | for i, k := 0, 0; i < n && k < m; { 37 | uid := u.Uids[i] 38 | vid := v.Uids[k] 39 | if uid > vid { 40 | for k = k + 1; k < m && v.Uids[k] < uid; k++ { 41 | } 42 | } else if uid == vid { 43 | out = append(out, uid) 44 | k++ 45 | i++ 46 | } else { 47 | for i = i + 1; i < n && u.Uids[i] < vid; i++ { 48 | } 49 | } 50 | } 51 | //u.Uids = out 52 | } 53 | 54 | func IntersectWithJump(u, v *task.List) { 55 | out := u.Uids[:0] 56 | n := len(u.Uids) 57 | m := len(v.Uids) 58 | jump := 30 59 | for i, k := 0, 0; i < n && k < m; { 60 | uid := u.Uids[i] 61 | vid := v.Uids[k] 62 | if uid == vid { 63 | out = append(out, uid) 64 | k++ 65 | i++ 66 | } else if k+jump < m && uid > v.Uids[k+jump] { 67 | k = k + jump 68 | } else if i+jump < n && vid > u.Uids[i+jump] { 69 | i = i + jump 70 | } else if uid > vid { 71 | for k = k + 1; k < m && v.Uids[k] < uid; k++ { 72 | } 73 | } else { 74 | for i = i + 1; i < n && u.Uids[i] < vid; i++ { 75 | } 76 | } 77 | } 78 | //u.Uids = out 79 | } 80 | 81 | func IntersectWithBin(u, v *task.List) { 82 | out := u.Uids[:0] 83 | m := len(u.Uids) 84 | n := len(v.Uids) 85 | // We want to do binary search on bigger list. 86 | smallList, bigList := u.Uids, v.Uids 87 | if m > n { 88 | smallList, bigList = bigList, smallList 89 | } 90 | // This is reduce the search space after every match. 91 | searchList := bigList 92 | for _, uid := range smallList { 93 | idx := sort.Search(len(searchList), func(i int) bool { 94 | return searchList[i] >= uid 95 | }) 96 | if idx < len(searchList) && searchList[idx] == uid { 97 | out = append(out, uid) 98 | // The next UID would never be at less than this idx 99 | // as the list is sorted. 100 | searchList = searchList[idx:] 101 | } 102 | } 103 | //u.Uids = out 104 | } 105 | -------------------------------------------------------------------------------- /jchiu/benchhash/README.md: -------------------------------------------------------------------------------- 1 | # General setup 2 | There are three different setups `Read`, `Write`, `ReadWrite`. Here are the main parameters. 3 | 4 | * `b.N`: This is the total number of reps for go bench to do its measurement. 5 | * `n`: For each rep, we will read/write `n` elements to **one single hash** across multiple goroutines. 6 | * `q`: For each rep, we will use `q` goroutines. 7 | 8 | Each of the three setups has the same structure below: 9 | 10 | * Assume `n` is divisible by `q`. 11 | * Create an array `work` of size `n`. This is a list of key-value pairs. 12 | * Start timer. 13 | * For each of the `b.N` reps, do the following: 14 | * Create an empty hash map `h`. 15 | * Create `q` goroutines. 16 | * Each goroutine will either read or write to `h`. 17 | * Each goroutine works on an equal part of the array `work`. 18 | 19 | Note that we do not use channels at all as that will cause some unnecessary blocking. 20 | 21 | For the `ReadWrite` setup, we ask the user for an additional parameter `fracRead`. There will be `fracRead * q` goroutines which do reads and `(1-fracRead) * q` goroutines which do writes. To be clear, each goroutine scans `n/q` elements. 22 | 23 | We did not use `RunParallel` because it doesn't fit the structure of our test setup. 24 | 25 | Please see `benchhash.go` for the test setups. 26 | 27 | # Results 28 | 29 | Run `run.sh` to run the benchmarks. The results are found in the `results` subdirectory. 30 | 31 | Here is the main line in `run.sh`: 32 | 33 | ``` 34 | go test -cpu $NUMCPU -benchn 100000 -benchq $q -bench=. > results/benchhash.q$q.txt 35 | ``` 36 | 37 | You might want to want to tweak the parameter `NUMCPU` in `run.sh`. 38 | 39 | ## Interpreting results 40 | 41 | Note that `BenchReadWrite7` means `7/10` of the goroutines are doing reads. Generally, reads are cheaper, so `BenchReadWrite9` should take less time than `BenchReadWrite1`. 42 | 43 | Note that `ShardedGoMap16` means we use 16 shards of GoMaps. 44 | 45 | Here are some results for `q=1000`. 46 | 47 | ``` 48 | n=100000 q=1000 49 | BenchmarkRead/GoMap-2 200 6882485 ns/op 50 | BenchmarkRead/GotomicMap-2 100 20416675 ns/op 51 | BenchmarkRead/ShardedGoMap4-2 200 6269036 ns/op 52 | BenchmarkRead/ShardedGoMap8-2 300 5677571 ns/op 53 | BenchmarkRead/ShardedGoMap16-2 300 5333423 ns/op 54 | BenchmarkRead/ShardedGoMap32-2 300 5153572 ns/op 55 | BenchmarkWrite/GoMap-2 50 20158550 ns/op 56 | BenchmarkWrite/GotomicMap-2 10 102329390 ns/op 57 | BenchmarkWrite/ShardedGoMap4-2 50 20343076 ns/op 58 | BenchmarkWrite/ShardedGoMap8-2 100 16701933 ns/op 59 | BenchmarkWrite/ShardedGoMap16-2 100 15416442 ns/op 60 | BenchmarkWrite/ShardedGoMap32-2 100 14433085 ns/op 61 | BenchmarkReadWrite1/GoMap-2 100 17983497 ns/op 62 | BenchmarkReadWrite1/GotomicMap-2 20 101842924 ns/op 63 | BenchmarkReadWrite1/ShardedGoMap4-2 50 20708505 ns/op 64 | BenchmarkReadWrite1/ShardedGoMap8-2 100 17904104 ns/op 65 | BenchmarkReadWrite1/ShardedGoMap16-2 100 15886365 ns/op 66 | BenchmarkReadWrite1/ShardedGoMap32-2 100 14662240 ns/op 67 | BenchmarkReadWrite3/GoMap-2 100 17667777 ns/op 68 | BenchmarkReadWrite3/GotomicMap-2 20 81755781 ns/op 69 | BenchmarkReadWrite3/ShardedGoMap4-2 100 19808166 ns/op 70 | BenchmarkReadWrite3/ShardedGoMap8-2 100 17530502 ns/op 71 | BenchmarkReadWrite3/ShardedGoMap16-2 100 15399216 ns/op 72 | BenchmarkReadWrite3/ShardedGoMap32-2 100 13931323 ns/op 73 | BenchmarkReadWrite5/GoMap-2 100 15603580 ns/op 74 | BenchmarkReadWrite5/GotomicMap-2 30 59283460 ns/op 75 | BenchmarkReadWrite5/ShardedGoMap4-2 100 17145842 ns/op 76 | BenchmarkReadWrite5/ShardedGoMap8-2 100 15334110 ns/op 77 | BenchmarkReadWrite5/ShardedGoMap16-2 100 13167841 ns/op 78 | BenchmarkReadWrite5/ShardedGoMap32-2 100 11573469 ns/op 79 | BenchmarkReadWrite7/GoMap-2 100 15413127 ns/op 80 | BenchmarkReadWrite7/GotomicMap-2 30 45866904 ns/op 81 | BenchmarkReadWrite7/ShardedGoMap4-2 100 15388264 ns/op 82 | BenchmarkReadWrite7/ShardedGoMap8-2 100 13555623 ns/op 83 | BenchmarkReadWrite7/ShardedGoMap16-2 100 11592148 ns/op 84 | BenchmarkReadWrite7/ShardedGoMap32-2 100 10214670 ns/op 85 | BenchmarkReadWrite9/GoMap-2 100 13929774 ns/op 86 | BenchmarkReadWrite9/GotomicMap-2 50 28248601 ns/op 87 | BenchmarkReadWrite9/ShardedGoMap4-2 100 11725283 ns/op 88 | BenchmarkReadWrite9/ShardedGoMap8-2 200 9930883 ns/op 89 | BenchmarkReadWrite9/ShardedGoMap16-2 200 8302751 ns/op 90 | BenchmarkReadWrite9/ShardedGoMap32-2 200 7418499 ns/op 91 | PASS 92 | ok github.com/jchiu0/experimental/benchhash 71.801s 93 | ``` -------------------------------------------------------------------------------- /jchiu/benchhash/benchhash.go: -------------------------------------------------------------------------------- 1 | package benchhash 2 | 3 | import ( 4 | "log" 5 | "math/rand" 6 | "sync" 7 | "testing" 8 | ) 9 | 10 | type HashMap interface { 11 | Get(key uint32) (uint32, bool) 12 | Put(key, val uint32) 13 | } 14 | 15 | type KeyValPair struct { 16 | Key, Val uint32 17 | } 18 | 19 | func intArray(n int) []uint32 { 20 | a := make([]uint32, n) 21 | for i := 0; i < n; i++ { 22 | a[i] = rand.Uint32() 23 | } 24 | return a 25 | } 26 | 27 | func intPairArray(n int) []KeyValPair { 28 | a := make([]KeyValPair, n) 29 | for i := 0; i < n; i++ { 30 | a[i] = KeyValPair{rand.Uint32(), rand.Uint32()} 31 | } 32 | return a 33 | } 34 | 35 | func check(n, q int) { 36 | if (n % q) != 0 { 37 | log.Fatalf("%d not divisible by %d", n, q) 38 | } 39 | } 40 | 41 | func workRange(n, q, j int) (int, int) { 42 | return (n / q) * j, (n / q) * (j + 1) 43 | } 44 | 45 | // MultiRead gets n items using q Go routines. Do not use a channel / locking queue. 46 | func MultiRead(n, q int, newFunc func() HashMap, b *testing.B) { 47 | check(n, q) 48 | work := intArray(n) 49 | b.StartTimer() 50 | for i := 0; i < b.N; i++ { // N reps. 51 | h := newFunc() 52 | var wg sync.WaitGroup 53 | for j := 0; j < q; j++ { 54 | wg.Add(1) 55 | go func(j int) { 56 | defer wg.Done() 57 | start, end := workRange(n, q, j) 58 | for k := start; k < end; k++ { 59 | h.Get(work[k]) 60 | } 61 | }(j) 62 | } 63 | wg.Wait() 64 | } 65 | } 66 | 67 | // MultiWrite writes n items using q Go routines. 68 | func MultiWrite(n, q int, newFunc func() HashMap, b *testing.B) { 69 | check(n, q) 70 | work := intPairArray(n) 71 | b.StartTimer() 72 | for i := 0; i < b.N; i++ { // N reps. 73 | h := newFunc() 74 | var wg sync.WaitGroup 75 | for j := 0; j < q; j++ { 76 | wg.Add(1) 77 | go func(j int) { 78 | defer wg.Done() 79 | start, end := workRange(n, q, j) 80 | for k := start; k < end; k++ { 81 | h.Put(work[k].Key, work[k].Val) 82 | } 83 | }(j) 84 | } 85 | wg.Wait() 86 | } 87 | } 88 | 89 | // ReadWrite does read and write in parallel. 90 | // qRead is num goroutines for reading. 91 | // qWrite is num goroutines for writing. 92 | // Assume n divisible by (qRead + qWrite). 93 | func ReadWrite(n, qRead, qWrite int, newFunc func() HashMap, b *testing.B) { 94 | q := qRead + qWrite 95 | check(n, q) 96 | work := intPairArray(n) 97 | b.StartTimer() 98 | for i := 0; i < b.N; i++ { // N reps. 99 | h := newFunc() 100 | var wg sync.WaitGroup 101 | for j := 0; j < qRead; j++ { // Read goroutines. 102 | wg.Add(1) 103 | go func(j int) { 104 | defer wg.Done() 105 | start, end := workRange(n, q, j) 106 | for k := start; k < end; k++ { 107 | h.Get(work[k].Key) 108 | } 109 | }(j) 110 | } 111 | 112 | for j := qRead; j < q; j++ { // Write goroutines. 113 | wg.Add(1) 114 | go func(j int) { 115 | defer wg.Done() 116 | start, end := workRange(n, q, j) 117 | for k := start; k < end; k++ { 118 | h.Put(work[k].Key, work[k].Val) 119 | } 120 | }(j) 121 | } 122 | wg.Wait() 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /jchiu/benchhash/benchhash_test.go: -------------------------------------------------------------------------------- 1 | package benchhash 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | "testing" 8 | ) 9 | 10 | type hashPair struct { 11 | label string 12 | newFunc func() HashMap 13 | } 14 | 15 | var ( 16 | benchn = flag.Int("benchn", 100000, "Number of elements to get/put to hash per rep.") 17 | benchq = flag.Int("benchq", 10, "Number of goroutines per rep.") 18 | hashPairs = []hashPair{ 19 | hashPair{"GoMap", NewGoMap}, 20 | hashPair{"GotomicMap", NewGotomicMap}, 21 | hashPair{"ShardedGoMap4", NewShardedGoMap4}, 22 | hashPair{"ShardedGoMap8", NewShardedGoMap8}, 23 | hashPair{"ShardedGoMap16", NewShardedGoMap16}, 24 | hashPair{"ShardedGoMap32", NewShardedGoMap32}, 25 | } 26 | ) 27 | 28 | func TestMain(m *testing.M) { 29 | flag.Parse() 30 | fmt.Printf("n=%d q=%d\n", *benchn, *benchq) 31 | os.Exit(m.Run()) 32 | } 33 | 34 | func BenchmarkRead(b *testing.B) { 35 | for _, p := range hashPairs { 36 | b.Run(p.label, func(b *testing.B) { 37 | MultiRead(*benchn, *benchq, p.newFunc, b) 38 | }) 39 | } 40 | } 41 | 42 | func BenchmarkWrite(b *testing.B) { 43 | for _, p := range hashPairs { 44 | b.Run(p.label, func(b *testing.B) { 45 | MultiWrite(*benchn, *benchq, p.newFunc, b) 46 | }) 47 | } 48 | } 49 | 50 | func benchmarkReadWrite(b *testing.B, fracRead float64) { 51 | numReadGoRoutines := int(fracRead * float64(*benchq)) 52 | for _, p := range hashPairs { 53 | b.Run(p.label, func(b *testing.B) { 54 | ReadWrite(*benchn, numReadGoRoutines, *benchq-numReadGoRoutines, 55 | p.newFunc, b) 56 | }) 57 | } 58 | } 59 | 60 | func BenchmarkReadWrite1(b *testing.B) { benchmarkReadWrite(b, 0.1) } 61 | func BenchmarkReadWrite3(b *testing.B) { benchmarkReadWrite(b, 0.3) } 62 | func BenchmarkReadWrite5(b *testing.B) { benchmarkReadWrite(b, 0.5) } 63 | func BenchmarkReadWrite7(b *testing.B) { benchmarkReadWrite(b, 0.7) } 64 | func BenchmarkReadWrite9(b *testing.B) { benchmarkReadWrite(b, 0.9) } 65 | -------------------------------------------------------------------------------- /jchiu/benchhash/gomap.go: -------------------------------------------------------------------------------- 1 | package benchhash 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type GoMap struct { 8 | sync.RWMutex 9 | m map[uint32]uint32 10 | } 11 | 12 | func (s *GoMap) Get(key uint32) (uint32, bool) { 13 | s.RLock() 14 | defer s.RUnlock() 15 | val, found := s.m[key] 16 | return val, found 17 | } 18 | 19 | func (s *GoMap) Put(key, val uint32) { 20 | s.Lock() 21 | defer s.Unlock() 22 | s.m[key] = val 23 | } 24 | 25 | func NewGoMap() HashMap { 26 | return &GoMap{ 27 | m: make(map[uint32]uint32), 28 | } 29 | } 30 | 31 | type ShardedGoMap struct { 32 | numShards int 33 | m []HashMap 34 | } 35 | 36 | func (s *ShardedGoMap) Get(key uint32) (uint32, bool) { 37 | shard := key % uint32(s.numShards) 38 | return s.m[shard].Get(key) 39 | } 40 | 41 | func (s *ShardedGoMap) Put(key, val uint32) { 42 | shard := key % uint32(s.numShards) 43 | s.m[shard].Put(key, val) 44 | } 45 | 46 | func NewShardedGoMap(numShards int) HashMap { 47 | r := &ShardedGoMap{ 48 | numShards: numShards, 49 | m: make([]HashMap, numShards), 50 | } 51 | for i := 0; i < numShards; i++ { 52 | r.m[i] = NewGoMap() 53 | } 54 | return r 55 | } 56 | 57 | func NewShardedGoMap4() HashMap { return NewShardedGoMap(4) } 58 | func NewShardedGoMap8() HashMap { return NewShardedGoMap(8) } 59 | func NewShardedGoMap16() HashMap { return NewShardedGoMap(16) } 60 | func NewShardedGoMap32() HashMap { return NewShardedGoMap(32) } 61 | -------------------------------------------------------------------------------- /jchiu/benchhash/gotomicmap.go: -------------------------------------------------------------------------------- 1 | package benchhash 2 | 3 | import ( 4 | "github.com/zond/gotomic" 5 | ) 6 | 7 | type GotomicMap struct { 8 | h *gotomic.Hash 9 | } 10 | 11 | func (s GotomicMap) Get(key uint32) (uint32, bool) { 12 | val, found := s.h.Get(gotomic.IntKey(key)) 13 | if val == nil { 14 | return 0, false 15 | } 16 | return uint32(val.(gotomic.IntKey)), found 17 | } 18 | 19 | func (s GotomicMap) Put(key, val uint32) { 20 | s.h.Put(gotomic.IntKey(key), gotomic.IntKey(val)) 21 | } 22 | 23 | func NewGotomicMap() HashMap { 24 | return GotomicMap{gotomic.NewHash()} 25 | } 26 | -------------------------------------------------------------------------------- /jchiu/benchhash/results/benchhash.q10.txt: -------------------------------------------------------------------------------- 1 | n=100000 q=10 2 | BenchmarkRead/GoMap-5 300 5589791 ns/op 3 | BenchmarkRead/GotomicMap-5 300 4976837 ns/op 4 | BenchmarkRead/ShardedGoMap4-5 300 4499172 ns/op 5 | BenchmarkRead/ShardedGoMap8-5 500 3717594 ns/op 6 | BenchmarkRead/ShardedGoMap16-5 500 3267158 ns/op 7 | BenchmarkRead/ShardedGoMap32-5 500 3015675 ns/op 8 | BenchmarkWrite/GoMap-5 100 19453609 ns/op 9 | BenchmarkWrite/GotomicMap-5 20 61459103 ns/op 10 | BenchmarkWrite/ShardedGoMap4-5 100 18971213 ns/op 11 | BenchmarkWrite/ShardedGoMap8-5 100 13347944 ns/op 12 | BenchmarkWrite/ShardedGoMap16-5 200 9870744 ns/op 13 | BenchmarkWrite/ShardedGoMap32-5 200 8182650 ns/op 14 | BenchmarkReadWrite1/GoMap-5 50 24472100 ns/op 15 | BenchmarkReadWrite1/GotomicMap-5 20 60752247 ns/op 16 | BenchmarkReadWrite1/ShardedGoMap4-5 100 19835708 ns/op 17 | BenchmarkReadWrite1/ShardedGoMap8-5 100 14622956 ns/op 18 | BenchmarkReadWrite1/ShardedGoMap16-5 100 11507319 ns/op 19 | BenchmarkReadWrite1/ShardedGoMap32-5 200 9596301 ns/op 20 | BenchmarkReadWrite3/GoMap-5 50 24141725 ns/op 21 | BenchmarkReadWrite3/GotomicMap-5 30 47403548 ns/op 22 | BenchmarkReadWrite3/ShardedGoMap4-5 100 19279487 ns/op 23 | BenchmarkReadWrite3/ShardedGoMap8-5 100 14832221 ns/op 24 | BenchmarkReadWrite3/ShardedGoMap16-5 100 11878726 ns/op 25 | BenchmarkReadWrite3/ShardedGoMap32-5 200 9642627 ns/op 26 | BenchmarkReadWrite5/GoMap-5 50 21987261 ns/op 27 | BenchmarkReadWrite5/GotomicMap-5 30 33544144 ns/op 28 | BenchmarkReadWrite5/ShardedGoMap4-5 100 17242246 ns/op 29 | BenchmarkReadWrite5/ShardedGoMap8-5 100 13764286 ns/op 30 | BenchmarkReadWrite5/ShardedGoMap16-5 100 10918797 ns/op 31 | BenchmarkReadWrite5/ShardedGoMap32-5 200 8873411 ns/op 32 | BenchmarkReadWrite7/GoMap-5 50 21416701 ns/op 33 | BenchmarkReadWrite7/GotomicMap-5 50 23854505 ns/op 34 | BenchmarkReadWrite7/ShardedGoMap4-5 100 15887691 ns/op 35 | BenchmarkReadWrite7/ShardedGoMap8-5 100 12953256 ns/op 36 | BenchmarkReadWrite7/ShardedGoMap16-5 100 10646643 ns/op 37 | BenchmarkReadWrite7/ShardedGoMap32-5 200 8780141 ns/op 38 | BenchmarkReadWrite9/GoMap-5 100 17650610 ns/op 39 | BenchmarkReadWrite9/GotomicMap-5 100 14876646 ns/op 40 | BenchmarkReadWrite9/ShardedGoMap4-5 100 12731669 ns/op 41 | BenchmarkReadWrite9/ShardedGoMap8-5 100 10399190 ns/op 42 | BenchmarkReadWrite9/ShardedGoMap16-5 200 8682727 ns/op 43 | BenchmarkReadWrite9/ShardedGoMap32-5 200 7270254 ns/op 44 | PASS 45 | ok github.com/jchiu0/experimental/benchhash 73.961s 46 | -------------------------------------------------------------------------------- /jchiu/benchhash/results/benchhash.q100.txt: -------------------------------------------------------------------------------- 1 | n=100000 q=100 2 | BenchmarkRead/GoMap-5 300 5617427 ns/op 3 | BenchmarkRead/GotomicMap-5 300 4903716 ns/op 4 | BenchmarkRead/ShardedGoMap4-5 300 4358324 ns/op 5 | BenchmarkRead/ShardedGoMap8-5 500 3614421 ns/op 6 | BenchmarkRead/ShardedGoMap16-5 500 3062921 ns/op 7 | BenchmarkRead/ShardedGoMap32-5 500 2754674 ns/op 8 | BenchmarkWrite/GoMap-5 50 21267048 ns/op 9 | BenchmarkWrite/GotomicMap-5 20 65863052 ns/op 10 | BenchmarkWrite/ShardedGoMap4-5 100 19057885 ns/op 11 | BenchmarkWrite/ShardedGoMap8-5 100 13041923 ns/op 12 | BenchmarkWrite/ShardedGoMap16-5 100 10296803 ns/op 13 | BenchmarkWrite/ShardedGoMap32-5 200 8747596 ns/op 14 | BenchmarkReadWrite1/GoMap-5 50 21160101 ns/op 15 | BenchmarkReadWrite1/GotomicMap-5 20 59116730 ns/op 16 | BenchmarkReadWrite1/ShardedGoMap4-5 100 19020403 ns/op 17 | BenchmarkReadWrite1/ShardedGoMap8-5 100 13358916 ns/op 18 | BenchmarkReadWrite1/ShardedGoMap16-5 100 10563749 ns/op 19 | BenchmarkReadWrite1/ShardedGoMap32-5 200 8633253 ns/op 20 | BenchmarkReadWrite3/GoMap-5 100 19662798 ns/op 21 | BenchmarkReadWrite3/GotomicMap-5 30 48625038 ns/op 22 | BenchmarkReadWrite3/ShardedGoMap4-5 100 17696082 ns/op 23 | BenchmarkReadWrite3/ShardedGoMap8-5 100 12801071 ns/op 24 | BenchmarkReadWrite3/ShardedGoMap16-5 200 9722693 ns/op 25 | BenchmarkReadWrite3/ShardedGoMap32-5 200 8807224 ns/op 26 | BenchmarkReadWrite5/GoMap-5 100 17979407 ns/op 27 | BenchmarkReadWrite5/GotomicMap-5 30 34629919 ns/op 28 | BenchmarkReadWrite5/ShardedGoMap4-5 100 15228276 ns/op 29 | BenchmarkReadWrite5/ShardedGoMap8-5 100 11211539 ns/op 30 | BenchmarkReadWrite5/ShardedGoMap16-5 200 8611909 ns/op 31 | BenchmarkReadWrite5/ShardedGoMap32-5 200 6795434 ns/op 32 | BenchmarkReadWrite7/GoMap-5 100 14393297 ns/op 33 | BenchmarkReadWrite7/GotomicMap-5 50 21908303 ns/op 34 | BenchmarkReadWrite7/ShardedGoMap4-5 100 13504974 ns/op 35 | BenchmarkReadWrite7/ShardedGoMap8-5 200 10031043 ns/op 36 | BenchmarkReadWrite7/ShardedGoMap16-5 200 7489030 ns/op 37 | BenchmarkReadWrite7/ShardedGoMap32-5 200 6185025 ns/op 38 | BenchmarkReadWrite9/GoMap-5 100 11891825 ns/op 39 | BenchmarkReadWrite9/GotomicMap-5 100 14139431 ns/op 40 | BenchmarkReadWrite9/ShardedGoMap4-5 200 10041826 ns/op 41 | BenchmarkReadWrite9/ShardedGoMap8-5 200 7342043 ns/op 42 | BenchmarkReadWrite9/ShardedGoMap16-5 300 5475161 ns/op 43 | BenchmarkReadWrite9/ShardedGoMap32-5 300 4474781 ns/op 44 | PASS 45 | ok github.com/jchiu0/experimental/benchhash 77.559s 46 | -------------------------------------------------------------------------------- /jchiu/benchhash/results/benchhash.q1000.txt: -------------------------------------------------------------------------------- 1 | n=100000 q=1000 2 | BenchmarkRead/GoMap-5 300 5707945 ns/op 3 | BenchmarkRead/GotomicMap-5 200 7660483 ns/op 4 | BenchmarkRead/ShardedGoMap4-5 300 4334152 ns/op 5 | BenchmarkRead/ShardedGoMap8-5 500 3650517 ns/op 6 | BenchmarkRead/ShardedGoMap16-5 500 3092442 ns/op 7 | BenchmarkRead/ShardedGoMap32-5 500 2798083 ns/op 8 | BenchmarkWrite/GoMap-5 100 19248951 ns/op 9 | BenchmarkWrite/GotomicMap-5 20 68380840 ns/op 10 | BenchmarkWrite/ShardedGoMap4-5 50 22946415 ns/op 11 | BenchmarkWrite/ShardedGoMap8-5 100 16433576 ns/op 12 | BenchmarkWrite/ShardedGoMap16-5 100 12968701 ns/op 13 | BenchmarkWrite/ShardedGoMap32-5 200 9640420 ns/op 14 | BenchmarkReadWrite1/GoMap-5 100 18141103 ns/op 15 | BenchmarkReadWrite1/GotomicMap-5 20 60256483 ns/op 16 | BenchmarkReadWrite1/ShardedGoMap4-5 100 19090418 ns/op 17 | BenchmarkReadWrite1/ShardedGoMap8-5 100 14518800 ns/op 18 | BenchmarkReadWrite1/ShardedGoMap16-5 100 12004291 ns/op 19 | BenchmarkReadWrite1/ShardedGoMap32-5 200 9683979 ns/op 20 | BenchmarkReadWrite3/GoMap-5 100 16905139 ns/op 21 | BenchmarkReadWrite3/GotomicMap-5 20 56369865 ns/op 22 | BenchmarkReadWrite3/ShardedGoMap4-5 100 16938464 ns/op 23 | BenchmarkReadWrite3/ShardedGoMap8-5 100 13184556 ns/op 24 | BenchmarkReadWrite3/ShardedGoMap16-5 100 11029215 ns/op 25 | BenchmarkReadWrite3/ShardedGoMap32-5 200 9105477 ns/op 26 | BenchmarkReadWrite5/GoMap-5 100 14234250 ns/op 27 | BenchmarkReadWrite5/GotomicMap-5 50 39185591 ns/op 28 | BenchmarkReadWrite5/ShardedGoMap4-5 100 14866024 ns/op 29 | BenchmarkReadWrite5/ShardedGoMap8-5 100 11986903 ns/op 30 | BenchmarkReadWrite5/ShardedGoMap16-5 200 9779242 ns/op 31 | BenchmarkReadWrite5/ShardedGoMap32-5 200 7886684 ns/op 32 | BenchmarkReadWrite7/GoMap-5 100 13249330 ns/op 33 | BenchmarkReadWrite7/GotomicMap-5 50 25227074 ns/op 34 | BenchmarkReadWrite7/ShardedGoMap4-5 100 12934799 ns/op 35 | BenchmarkReadWrite7/ShardedGoMap8-5 100 10830646 ns/op 36 | BenchmarkReadWrite7/ShardedGoMap16-5 200 8841664 ns/op 37 | BenchmarkReadWrite7/ShardedGoMap32-5 200 6996564 ns/op 38 | BenchmarkReadWrite9/GoMap-5 100 10887325 ns/op 39 | BenchmarkReadWrite9/GotomicMap-5 100 17298887 ns/op 40 | BenchmarkReadWrite9/ShardedGoMap4-5 200 9841916 ns/op 41 | BenchmarkReadWrite9/ShardedGoMap8-5 200 7776165 ns/op 42 | BenchmarkReadWrite9/ShardedGoMap16-5 300 5729450 ns/op 43 | BenchmarkReadWrite9/ShardedGoMap32-5 300 4559855 ns/op 44 | PASS 45 | ok github.com/jchiu0/experimental/benchhash 77.788s 46 | -------------------------------------------------------------------------------- /jchiu/benchhash/run.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | NUMCPU=5 4 | 5 | mkdir -p results 6 | for q in 10 100 1000; do 7 | go test -cpu $NUMCPU -benchn 100000 -benchq $q -bench=. > results/benchhash.q$q.txt 8 | done 9 | 10 | -------------------------------------------------------------------------------- /jchiu/benchhash2/README.md: -------------------------------------------------------------------------------- 1 | Note that `ShardedGoMap64` means we have 64 shards of standard Go maps. 2 | 3 | Here is what each setup is trying to measure. 4 | 5 | * Control2: Two `rand.Uint32` calls. 6 | * Control5: Five `rand.Uint32` calls. 7 | * BenchmarkRead: Concurrent read. Subtract timings by Control. 8 | * BenchmarkWrite: Concurrent write. Subtract timings by Control2. 9 | * BenchmarkReadWrite: Concurrent read/write. Probability of reading is readFrac. 10 | 11 | 12 | To run the test, do 13 | 14 | ``` 15 | go test -timeout 100m -benchtime 5s -bench=. | tee results.txt 16 | ``` 17 | 18 | # Sample results 19 | 20 | In theory, you should subtract the time taken to compute the random integers. 21 | 22 | For example, for `BenchmarkRead`, GoMap takes 131-59.4ns while GotomicMap takes 23 | 163-59.4ns, which is 1.45X slower. (2X slower means it takes twice as long.) 24 | 25 | For example, for `BenchmarkWrite`, GoMap takes 320-116ns while GotomicMap takes 26 | 867-116ns, which is 3.68X slower. 27 | 28 | ``` 29 | BenchmarkReadControl-8 30000000 59.4 ns/op 30 | BenchmarkWriteControl-8 20000000 116 ns/op 31 | BenchmarkRead/GoMap-8 10000000 131 ns/op 32 | BenchmarkRead/GotomicMap-8 10000000 163 ns/op 33 | BenchmarkRead/ShardedGoMap8-8 10000000 125 ns/op 34 | BenchmarkRead/ShardedGoMap16-8 10000000 126 ns/op 35 | BenchmarkRead/ShardedGoMap32-8 10000000 124 ns/op 36 | BenchmarkRead/ShardedGoMap64-8 10000000 124 ns/op 37 | BenchmarkWrite/GoMap-8 5000000 320 ns/op 38 | BenchmarkWrite/GotomicMap-8 2000000 867 ns/op 39 | BenchmarkWrite/ShardedGoMap8-8 5000000 300 ns/op 40 | BenchmarkWrite/ShardedGoMap16-8 5000000 291 ns/op 41 | BenchmarkWrite/ShardedGoMap32-8 5000000 286 ns/op 42 | BenchmarkWrite/ShardedGoMap64-8 5000000 283 ns/op 43 | ``` 44 | 45 | # Plot 46 | 47 | Consider BenchmarkReadWrite. Vary `readFrac` and plot the time per op (subtracted 48 | by control value). 49 | 50 | ![plot](plot.png "Plot for BenchmarkReadWrite") 51 | -------------------------------------------------------------------------------- /jchiu/benchhash2/benchhash_test.go: -------------------------------------------------------------------------------- 1 | package benchhash2 2 | 3 | import ( 4 | "math/rand" 5 | "strconv" 6 | "testing" 7 | ) 8 | 9 | type experiment struct { 10 | label string 11 | newFunc func() HashMap 12 | } 13 | 14 | var ( 15 | experiments = []experiment{ 16 | experiment{"GoMap", NewGoMap}, 17 | experiment{"GotomicMap", NewGotomicMap}, 18 | experiment{"ShardedGoMap16", NewShardedGoMap8}, 19 | experiment{"ShardedGoMap64", NewShardedGoMap64}, 20 | } 21 | ) 22 | 23 | func BenchmarkReadControl(b *testing.B) { 24 | b.RunParallel(func(pb *testing.PB) { 25 | for pb.Next() { 26 | rand.Uint32() 27 | } 28 | }) 29 | } 30 | 31 | func BenchmarkWriteControl(b *testing.B) { 32 | b.RunParallel(func(pb *testing.PB) { 33 | for pb.Next() { 34 | rand.Uint32() 35 | rand.Uint32() 36 | } 37 | }) 38 | } 39 | 40 | func BenchmarkReadWriteControl(b *testing.B) { 41 | for i := 0; i <= 20; i++ { 42 | readFrac := float32(i) / 20 43 | b.Run(strconv.Itoa(i), func(b *testing.B) { 44 | b.RunParallel(func(pb *testing.PB) { 45 | for pb.Next() { 46 | if rand.Float32() < readFrac { 47 | rand.Uint32() 48 | } else { 49 | rand.Uint32() 50 | rand.Uint32() 51 | } 52 | } 53 | }) 54 | }) 55 | } 56 | } 57 | 58 | func BenchmarkRead(b *testing.B) { 59 | for _, p := range experiments { 60 | b.Run(p.label, func(b *testing.B) { 61 | h := p.newFunc() 62 | b.StartTimer() 63 | b.RunParallel(func(pb *testing.PB) { 64 | for pb.Next() { 65 | h.Get(rand.Uint32()) 66 | } 67 | }) 68 | }) 69 | } 70 | } 71 | 72 | func BenchmarkWrite(b *testing.B) { 73 | for _, p := range experiments { 74 | b.Run(p.label, func(b *testing.B) { 75 | h := p.newFunc() 76 | b.StartTimer() 77 | b.RunParallel(func(pb *testing.PB) { 78 | for pb.Next() { 79 | h.Put(rand.Uint32(), rand.Uint32()) 80 | } 81 | }) 82 | }) 83 | } 84 | } 85 | 86 | // readFrac is fraction of operations that are reads. 87 | func BenchmarkReadWrite(b *testing.B) { 88 | for i := 0; i <= 20; i++ { 89 | readFrac := float32(i) / 20.0 90 | b.Run(strconv.Itoa(i), func(b *testing.B) { 91 | for _, p := range experiments { 92 | b.Run(p.label, func(b *testing.B) { 93 | h := p.newFunc() 94 | b.StartTimer() 95 | b.RunParallel(func(pb *testing.PB) { 96 | for pb.Next() { 97 | if rand.Float32() < readFrac { 98 | h.Get(rand.Uint32()) 99 | } else { 100 | h.Put(rand.Uint32(), rand.Uint32()) 101 | } 102 | } 103 | }) 104 | }) 105 | } 106 | }) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /jchiu/benchhash2/gomap.go: -------------------------------------------------------------------------------- 1 | package benchhash2 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type HashMap interface { 8 | Get(key uint32) (uint32, bool) 9 | Put(key, val uint32) 10 | } 11 | 12 | type GoMap struct { 13 | sync.RWMutex 14 | m map[uint32]uint32 15 | } 16 | 17 | func (s *GoMap) Get(key uint32) (uint32, bool) { 18 | s.RLock() 19 | defer s.RUnlock() 20 | val, found := s.m[key] 21 | return val, found 22 | } 23 | 24 | func (s *GoMap) Put(key, val uint32) { 25 | s.Lock() 26 | defer s.Unlock() 27 | s.m[key] = val 28 | } 29 | 30 | func NewGoMap() HashMap { 31 | return &GoMap{ 32 | m: make(map[uint32]uint32), 33 | } 34 | } 35 | 36 | type ShardedGoMap struct { 37 | numShards int 38 | m []HashMap 39 | } 40 | 41 | func (s *ShardedGoMap) Get(key uint32) (uint32, bool) { 42 | shard := key % uint32(s.numShards) 43 | return s.m[shard].Get(key) 44 | } 45 | 46 | func (s *ShardedGoMap) Put(key, val uint32) { 47 | shard := key % uint32(s.numShards) 48 | s.m[shard].Put(key, val) 49 | } 50 | 51 | func NewShardedGoMap(numShards int) HashMap { 52 | r := &ShardedGoMap{ 53 | numShards: numShards, 54 | m: make([]HashMap, numShards), 55 | } 56 | for i := 0; i < numShards; i++ { 57 | r.m[i] = NewGoMap() 58 | } 59 | return r 60 | } 61 | 62 | func NewShardedGoMap4() HashMap { return NewShardedGoMap(4) } 63 | func NewShardedGoMap8() HashMap { return NewShardedGoMap(8) } 64 | func NewShardedGoMap16() HashMap { return NewShardedGoMap(16) } 65 | func NewShardedGoMap32() HashMap { return NewShardedGoMap(32) } 66 | func NewShardedGoMap64() HashMap { return NewShardedGoMap(64) } 67 | -------------------------------------------------------------------------------- /jchiu/benchhash2/gotomicmap.go: -------------------------------------------------------------------------------- 1 | package benchhash2 2 | 3 | import ( 4 | "github.com/zond/gotomic" 5 | ) 6 | 7 | type GotomicMap struct { 8 | h *gotomic.Hash 9 | } 10 | 11 | func (s GotomicMap) Get(key uint32) (uint32, bool) { 12 | val, found := s.h.Get(gotomic.IntKey(key)) 13 | if val == nil { 14 | return 0, false 15 | } 16 | return uint32(val.(gotomic.IntKey)), found 17 | } 18 | 19 | func (s GotomicMap) Put(key, val uint32) { 20 | s.h.Put(gotomic.IntKey(key), gotomic.IntKey(val)) 21 | } 22 | 23 | func NewGotomicMap() HashMap { 24 | return GotomicMap{gotomic.NewHash()} 25 | } 26 | -------------------------------------------------------------------------------- /jchiu/benchhash2/plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/jchiu/benchhash2/plot.png -------------------------------------------------------------------------------- /jchiu/benchhash2/plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | with open('results.txt') as f: 5 | a = f.read().splitlines() 6 | 7 | a = [s.split() for s in a] 8 | a = {s[0].split('-')[0]: s for s in a} 9 | 10 | n = 20 11 | 12 | for mapType in ('GoMap', 'GotomicMap', 'ShardedGoMap16', 'ShardedGoMap64'): 13 | results = [] 14 | for i in range(n + 1): 15 | key = 'BenchmarkReadWrite/%d/%s' % (i, mapType) 16 | key2 = 'BenchmarkReadWriteControl/%d' % i 17 | assert key in a 18 | assert key2 in a 19 | v = float(a[key][2]) 20 | v2 = float(a[key2][2]) 21 | results.append(v - v2) 22 | 23 | x = np.linspace(0, 1, n + 1) 24 | plt.plot(x, results, label=mapType) 25 | plt.ylabel('ns/op') 26 | plt.xlabel('readFrac') 27 | 28 | plt.legend() 29 | plt.savefig('plot.png') -------------------------------------------------------------------------------- /jchiu/benchhash2/results.txt: -------------------------------------------------------------------------------- 1 | BenchmarkReadControl-8 100000000 57.9 ns/op 2 | BenchmarkWriteControl-8 100000000 111 ns/op 3 | BenchmarkReadWriteControl/0-8 50000000 172 ns/op 4 | BenchmarkReadWriteControl/1-8 50000000 169 ns/op 5 | BenchmarkReadWriteControl/2-8 50000000 166 ns/op 6 | BenchmarkReadWriteControl/3-8 50000000 165 ns/op 7 | BenchmarkReadWriteControl/4-8 50000000 163 ns/op 8 | BenchmarkReadWriteControl/5-8 50000000 160 ns/op 9 | BenchmarkReadWriteControl/6-8 50000000 157 ns/op 10 | BenchmarkReadWriteControl/7-8 50000000 155 ns/op 11 | BenchmarkReadWriteControl/8-8 50000000 153 ns/op 12 | BenchmarkReadWriteControl/9-8 50000000 150 ns/op 13 | BenchmarkReadWriteControl/10-8 50000000 149 ns/op 14 | BenchmarkReadWriteControl/11-8 50000000 146 ns/op 15 | BenchmarkReadWriteControl/12-8 50000000 142 ns/op 16 | BenchmarkReadWriteControl/13-8 50000000 139 ns/op 17 | BenchmarkReadWriteControl/14-8 50000000 137 ns/op 18 | BenchmarkReadWriteControl/15-8 50000000 132 ns/op 19 | BenchmarkReadWriteControl/16-8 50000000 129 ns/op 20 | BenchmarkReadWriteControl/17-8 50000000 126 ns/op 21 | BenchmarkReadWriteControl/18-8 50000000 123 ns/op 22 | BenchmarkReadWriteControl/19-8 50000000 120 ns/op 23 | BenchmarkReadWriteControl/20-8 100000000 115 ns/op 24 | BenchmarkRead/GoMap-8 50000000 129 ns/op 25 | BenchmarkRead/GotomicMap-8 50000000 161 ns/op 26 | BenchmarkRead/ShardedGoMap16-8 50000000 125 ns/op 27 | BenchmarkRead/ShardedGoMap64-8 50000000 122 ns/op 28 | BenchmarkWrite/GoMap-8 20000000 328 ns/op 29 | BenchmarkWrite/GotomicMap-8 10000000 1126 ns/op 30 | BenchmarkWrite/ShardedGoMap16-8 20000000 303 ns/op 31 | BenchmarkWrite/ShardedGoMap64-8 20000000 286 ns/op 32 | BenchmarkReadWrite/0/GoMap-8 20000000 358 ns/op 33 | BenchmarkReadWrite/0/GotomicMap-8 10000000 1240 ns/op 34 | BenchmarkReadWrite/0/ShardedGoMap16-8 20000000 379 ns/op 35 | BenchmarkReadWrite/0/ShardedGoMap64-8 20000000 364 ns/op 36 | BenchmarkReadWrite/1/GoMap-8 20000000 352 ns/op 37 | BenchmarkReadWrite/1/GotomicMap-8 10000000 1121 ns/op 38 | BenchmarkReadWrite/1/ShardedGoMap16-8 20000000 402 ns/op 39 | BenchmarkReadWrite/1/ShardedGoMap64-8 20000000 360 ns/op 40 | BenchmarkReadWrite/2/GoMap-8 20000000 343 ns/op 41 | BenchmarkReadWrite/2/GotomicMap-8 10000000 1033 ns/op 42 | BenchmarkReadWrite/2/ShardedGoMap16-8 20000000 370 ns/op 43 | BenchmarkReadWrite/2/ShardedGoMap64-8 20000000 354 ns/op 44 | BenchmarkReadWrite/3/GoMap-8 30000000 318 ns/op 45 | BenchmarkReadWrite/3/GotomicMap-8 10000000 1090 ns/op 46 | BenchmarkReadWrite/3/ShardedGoMap16-8 20000000 369 ns/op 47 | BenchmarkReadWrite/3/ShardedGoMap64-8 20000000 351 ns/op 48 | BenchmarkReadWrite/4/GoMap-8 30000000 321 ns/op 49 | BenchmarkReadWrite/4/GotomicMap-8 10000000 1003 ns/op 50 | BenchmarkReadWrite/4/ShardedGoMap16-8 20000000 365 ns/op 51 | BenchmarkReadWrite/4/ShardedGoMap64-8 20000000 347 ns/op 52 | BenchmarkReadWrite/5/GoMap-8 30000000 322 ns/op 53 | BenchmarkReadWrite/5/GotomicMap-8 10000000 935 ns/op 54 | BenchmarkReadWrite/5/ShardedGoMap16-8 20000000 358 ns/op 55 | BenchmarkReadWrite/5/ShardedGoMap64-8 20000000 342 ns/op 56 | BenchmarkReadWrite/6/GoMap-8 30000000 317 ns/op 57 | BenchmarkReadWrite/6/GotomicMap-8 10000000 930 ns/op 58 | BenchmarkReadWrite/6/ShardedGoMap16-8 20000000 347 ns/op 59 | BenchmarkReadWrite/6/ShardedGoMap64-8 20000000 333 ns/op 60 | BenchmarkReadWrite/7/GoMap-8 30000000 317 ns/op 61 | BenchmarkReadWrite/7/GotomicMap-8 10000000 851 ns/op 62 | BenchmarkReadWrite/7/ShardedGoMap16-8 20000000 338 ns/op 63 | BenchmarkReadWrite/7/ShardedGoMap64-8 20000000 326 ns/op 64 | BenchmarkReadWrite/8/GoMap-8 30000000 311 ns/op 65 | BenchmarkReadWrite/8/GotomicMap-8 10000000 853 ns/op 66 | BenchmarkReadWrite/8/ShardedGoMap16-8 20000000 334 ns/op 67 | BenchmarkReadWrite/8/ShardedGoMap64-8 20000000 321 ns/op 68 | BenchmarkReadWrite/9/GoMap-8 30000000 324 ns/op 69 | BenchmarkReadWrite/9/GotomicMap-8 10000000 851 ns/op 70 | BenchmarkReadWrite/9/ShardedGoMap16-8 20000000 332 ns/op 71 | BenchmarkReadWrite/9/ShardedGoMap64-8 20000000 318 ns/op 72 | BenchmarkReadWrite/10/GoMap-8 30000000 324 ns/op 73 | BenchmarkReadWrite/10/GotomicMap-8 10000000 856 ns/op 74 | BenchmarkReadWrite/10/ShardedGoMap16-8 20000000 325 ns/op 75 | BenchmarkReadWrite/10/ShardedGoMap64-8 20000000 314 ns/op 76 | BenchmarkReadWrite/11/GoMap-8 30000000 285 ns/op 77 | BenchmarkReadWrite/11/GotomicMap-8 10000000 760 ns/op 78 | BenchmarkReadWrite/11/ShardedGoMap16-8 20000000 323 ns/op 79 | BenchmarkReadWrite/11/ShardedGoMap64-8 20000000 310 ns/op 80 | BenchmarkReadWrite/12/GoMap-8 30000000 290 ns/op 81 | BenchmarkReadWrite/12/GotomicMap-8 10000000 660 ns/op 82 | BenchmarkReadWrite/12/ShardedGoMap16-8 30000000 310 ns/op 83 | BenchmarkReadWrite/12/ShardedGoMap64-8 30000000 300 ns/op 84 | BenchmarkReadWrite/13/GoMap-8 30000000 273 ns/op 85 | BenchmarkReadWrite/13/GotomicMap-8 10000000 646 ns/op 86 | BenchmarkReadWrite/13/ShardedGoMap16-8 30000000 306 ns/op 87 | BenchmarkReadWrite/13/ShardedGoMap64-8 20000000 296 ns/op 88 | BenchmarkReadWrite/14/GoMap-8 30000000 282 ns/op 89 | BenchmarkReadWrite/14/GotomicMap-8 10000000 609 ns/op 90 | BenchmarkReadWrite/14/ShardedGoMap16-8 30000000 301 ns/op 91 | BenchmarkReadWrite/14/ShardedGoMap64-8 20000000 287 ns/op 92 | BenchmarkReadWrite/15/GoMap-8 30000000 274 ns/op 93 | BenchmarkReadWrite/15/GotomicMap-8 20000000 643 ns/op 94 | BenchmarkReadWrite/15/ShardedGoMap16-8 30000000 297 ns/op 95 | BenchmarkReadWrite/15/ShardedGoMap64-8 30000000 285 ns/op 96 | BenchmarkReadWrite/16/GoMap-8 50000000 258 ns/op 97 | BenchmarkReadWrite/16/GotomicMap-8 20000000 569 ns/op 98 | BenchmarkReadWrite/16/ShardedGoMap16-8 30000000 285 ns/op 99 | BenchmarkReadWrite/16/ShardedGoMap64-8 30000000 274 ns/op 100 | BenchmarkReadWrite/17/GoMap-8 50000000 267 ns/op 101 | BenchmarkReadWrite/17/GotomicMap-8 20000000 558 ns/op 102 | BenchmarkReadWrite/17/ShardedGoMap16-8 30000000 278 ns/op 103 | BenchmarkReadWrite/17/ShardedGoMap64-8 30000000 268 ns/op 104 | BenchmarkReadWrite/18/GoMap-8 50000000 243 ns/op 105 | BenchmarkReadWrite/18/GotomicMap-8 20000000 494 ns/op 106 | BenchmarkReadWrite/18/ShardedGoMap16-8 30000000 264 ns/op 107 | BenchmarkReadWrite/18/ShardedGoMap64-8 30000000 258 ns/op 108 | BenchmarkReadWrite/19/GoMap-8 50000000 245 ns/op 109 | BenchmarkReadWrite/19/GotomicMap-8 20000000 454 ns/op 110 | BenchmarkReadWrite/19/ShardedGoMap16-8 30000000 250 ns/op 111 | BenchmarkReadWrite/19/ShardedGoMap64-8 30000000 248 ns/op 112 | BenchmarkReadWrite/20/GoMap-8 50000000 200 ns/op 113 | BenchmarkReadWrite/20/GotomicMap-8 20000000 429 ns/op 114 | BenchmarkReadWrite/20/ShardedGoMap16-8 30000000 203 ns/op 115 | BenchmarkReadWrite/20/ShardedGoMap64-8 30000000 205 ns/op 116 | PASS 117 | ok github.com/dgraph-io/experiments/jchiu/benchhash2 1004.815s 118 | -------------------------------------------------------------------------------- /jchiu/chanqueue/README.md: -------------------------------------------------------------------------------- 1 | We have seen numerous articles complaining about how slow channels are. However, 2 | in the following benchmarks, channels seem to do pretty well. 3 | 4 | We will test four different implementations: channels, simple array, circular 5 | buffer and finally Gringo, which is a lock-free ring buffer. You can get it by: 6 | 7 | ``` 8 | go get github.com/textnode/gringo/... 9 | ``` 10 | 11 | Here are the results. 12 | 13 | ``` 14 | BenchmarkChan-8 20000000 67.0 ns/op 15 | BenchmarkQueue-8 10000000 172 ns/op 16 | BenchmarkCQueue-8 5000000 307 ns/op 17 | BenchmarkGringo-8 20000000 81.8 ns/op 18 | ``` 19 | 20 | The tests are constructed such that there is one goroutine pushing stuff into a 21 | queue or channel. The main thread keeps popping until the queue is empty. 22 | 23 | Channel seems to run faster than the other implementations. This might change if 24 | we have more producers or more consumers. -------------------------------------------------------------------------------- /jchiu/chanqueue/chanqueue_test.go: -------------------------------------------------------------------------------- 1 | package chanqueue 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | 7 | "github.com/textnode/gringo" 8 | ) 9 | 10 | func BenchmarkChan(b *testing.B) { 11 | c := make(chan struct{}, 10) 12 | b.StartTimer() 13 | go func() { 14 | for i := 0; i < b.N; i++ { 15 | c <- struct{}{} 16 | } 17 | close(c) 18 | }() 19 | for _ = range c { 20 | } 21 | } 22 | 23 | func BenchmarkQueue(b *testing.B) { 24 | q := NewQueue() 25 | var wg sync.WaitGroup 26 | wg.Add(1) 27 | b.StartTimer() 28 | go func() { 29 | defer wg.Done() 30 | for i := 0; i < b.N; i++ { 31 | q.Push() 32 | } 33 | }() 34 | for i := 0; i < b.N; i++ { 35 | q.Pop() 36 | } 37 | wg.Wait() 38 | } 39 | 40 | func BenchmarkCQueue(b *testing.B) { 41 | q := NewCQueue() 42 | var wg sync.WaitGroup 43 | wg.Add(1) 44 | b.StartTimer() 45 | go func() { 46 | defer wg.Done() 47 | for i := 0; i < b.N; i++ { 48 | q.Push() 49 | } 50 | }() 51 | for i := 0; i < b.N; i++ { 52 | q.Pop() 53 | } 54 | wg.Wait() 55 | } 56 | 57 | var payload = *gringo.NewPayload(1) 58 | 59 | func BenchmarkGringo(b *testing.B) { 60 | q := gringo.NewGringo() 61 | var wg sync.WaitGroup 62 | wg.Add(1) 63 | b.StartTimer() 64 | go func() { 65 | defer wg.Done() 66 | for i := 0; i < b.N; i++ { 67 | q.Write(payload) 68 | } 69 | }() 70 | for i := 0; i < b.N; i++ { 71 | q.Read() 72 | } 73 | wg.Wait() 74 | } 75 | -------------------------------------------------------------------------------- /jchiu/chanqueue/cqueue.go: -------------------------------------------------------------------------------- 1 | package chanqueue 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type CQueue struct { 8 | sync.RWMutex 9 | data []struct{} // Size should remain constant throughout. 10 | out int // Where to pop. 11 | in int // Where to push. 12 | done bool 13 | } 14 | 15 | func NewCQueue() *CQueue { 16 | return &CQueue{ 17 | data: make([]struct{}, 1000), 18 | } 19 | } 20 | 21 | // Done marks queue as done. 22 | func (q *CQueue) Done() { 23 | q.Lock() 24 | defer q.Unlock() 25 | q.done = true 26 | } 27 | 28 | // Done checks if queue is done. 29 | func (q *CQueue) IsDone() bool { 30 | q.RLock() 31 | defer q.RUnlock() 32 | return q.done && (q.in == q.out) 33 | } 34 | 35 | func (q *CQueue) tryPush() bool { 36 | q.Lock() 37 | defer q.Unlock() 38 | newIn := (q.in + 1) % len(q.data) 39 | if newIn == q.out { 40 | return false 41 | } 42 | // q.data[q.in] = new element. 43 | q.in = newIn 44 | return true 45 | } 46 | 47 | func (q *CQueue) Push() { 48 | for !q.tryPush() { 49 | } 50 | } 51 | 52 | func (q *CQueue) IsEmpty() bool { 53 | q.RLock() 54 | defer q.RUnlock() 55 | return q.in == q.out 56 | } 57 | 58 | func (q *CQueue) tryPop() bool { 59 | q.Lock() 60 | defer q.Unlock() 61 | if q.in == q.out { 62 | // Queue is empty. 63 | return false 64 | } 65 | // Element to return is q.data[q.out]. 66 | q.out = (q.out + 1) % len(q.data) 67 | return true 68 | } 69 | 70 | // Pop returns an item. 71 | func (q *CQueue) Pop() { 72 | for !q.tryPop() { 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /jchiu/chanqueue/queue.go: -------------------------------------------------------------------------------- 1 | package chanqueue 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type Queue struct { 8 | sync.RWMutex 9 | data []struct{} // Size should remain constant throughout. 10 | idx int // Where to pop. 11 | done bool 12 | } 13 | 14 | func NewQueue() *Queue { 15 | return &Queue{ 16 | data: make([]struct{}, 0, 1000), 17 | } 18 | } 19 | 20 | // Done marks queue as done. 21 | func (q *Queue) Done() { 22 | q.Lock() 23 | defer q.Unlock() 24 | q.done = true 25 | } 26 | 27 | // Done checks if queue is done. 28 | func (q *Queue) IsDone() bool { 29 | q.RLock() 30 | defer q.RUnlock() 31 | return q.done && q.idx == len(q.data) 32 | } 33 | 34 | func (q *Queue) Push() { 35 | q.Lock() 36 | defer q.Unlock() 37 | q.data = append(q.data, struct{}{}) 38 | } 39 | 40 | func (q *Queue) IsEmpty() bool { 41 | q.RLock() 42 | defer q.RUnlock() 43 | return q.idx == len(q.data) 44 | } 45 | 46 | // Pop returns an item. 47 | func (q *Queue) Pop() { 48 | q.Lock() 49 | defer q.Unlock() 50 | if q.idx >= len(q.data) { 51 | return 52 | } 53 | q.idx++ 54 | } 55 | -------------------------------------------------------------------------------- /jchiu/heaporstack/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | type S struct{} 8 | 9 | func getNew() *S { 10 | return &S{} 11 | } 12 | 13 | func identity(x *S) *S { return x } 14 | 15 | func main() { 16 | a := getNew() 17 | identity(a) 18 | 19 | var b S 20 | identity(&b) 21 | 22 | c := new(S) 23 | identity(c) 24 | 25 | d := new(S) 26 | fmt.Println(d) 27 | 28 | var e S 29 | fmt.Println(&e) 30 | } 31 | -------------------------------------------------------------------------------- /jchiu/profile_shardedhash/README.md: -------------------------------------------------------------------------------- 1 | 20160918_155605 is the current master. 2 | 3 | 20160918_170016 is using a simple sharded hash instead of gotomic hash. 4 | 5 | Control takes 870s. Took about 8 mins on my machine. 6 | 7 | Treatment (sharded hash) takes 585s. Took about 5 mins on my machine. 8 | 9 | If you take a look at memory profile, listMapShard takes about <300M. 10 | On the other hand, gotomic takes >500M. 11 | 12 | It remains to verify that everything is working as desired. Need to add some 13 | tests, but let's check the overall design first. 14 | -------------------------------------------------------------------------------- /jchiu/profile_shardedhash/loader.20160918_155605.cpu.pprof: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/jchiu/profile_shardedhash/loader.20160918_155605.cpu.pprof -------------------------------------------------------------------------------- /jchiu/profile_shardedhash/loader.20160918_170016.cpu.pprof: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/jchiu/profile_shardedhash/loader.20160918_170016.cpu.pprof -------------------------------------------------------------------------------- /jchiu/wstring/cgo.go: -------------------------------------------------------------------------------- 1 | package wstring 2 | 3 | // #cgo CXXFLAGS: -std=c++11 -O2 4 | // #cgo LDFLAGS: -lstdc++ 5 | import "C" 6 | -------------------------------------------------------------------------------- /jchiu/wstring/sample/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | "time" 7 | 8 | "github.com/jchiu0/experimental/wstring" 9 | ) 10 | 11 | func main() { 12 | s := wstring.NewWString() 13 | s.Set([]byte("helloworld")) // This is a copy. No worries about double freeing. 14 | data := s.Get() 15 | fmt.Printf("[%s]\n", string(data)) 16 | fmt.Printf("Length = %d\n", s.Size()) 17 | 18 | // s is no longer used. We expect it to be destroyed when GC runs. 19 | // We want to make sure that std::string's destructor is run. 20 | // Make sure we see "Destroying wstring" before we see "Exiting". 21 | fmt.Println("GC start") 22 | runtime.GC() 23 | fmt.Println("GC end") 24 | time.Sleep(time.Second) // Give GC a bit of time. 25 | fmt.Println("Exiting") 26 | } 27 | -------------------------------------------------------------------------------- /jchiu/wstring/sample/sample: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/jchiu/wstring/sample/sample -------------------------------------------------------------------------------- /jchiu/wstring/util.go: -------------------------------------------------------------------------------- 1 | package wstring 2 | 3 | import "C" 4 | 5 | import ( 6 | "reflect" 7 | "unsafe" 8 | ) 9 | 10 | // byteToChar returns *C.char from byte slice. 11 | func byteToChar(b []byte) *C.char { 12 | var c *C.char 13 | if len(b) > 0 { 14 | c = (*C.char)(unsafe.Pointer(&b[0])) 15 | } 16 | return c 17 | } 18 | 19 | // charToByte converts a *C.char to a byte slice. 20 | func charToByte(data *C.char, l C.size_t) []byte { 21 | var value []byte 22 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) 23 | sH.Cap, sH.Len, sH.Data = int(l), int(l), uintptr(unsafe.Pointer(data)) 24 | return value 25 | } 26 | -------------------------------------------------------------------------------- /jchiu/wstring/wstring.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "wstring.h" 6 | 7 | struct wstring_t { std::string rep; }; 8 | 9 | wstring_t* wstring_new() { 10 | printf("~~Creating wstring\n"); 11 | return new wstring_t; 12 | } 13 | 14 | void wstring_destroy(wstring_t* s) { 15 | printf("~~Destroying wstring\n"); 16 | delete s; 17 | } 18 | 19 | char* wstring_get(wstring_t* s, size_t* len) { 20 | *len = s->rep.length(); 21 | return (char*)(s->rep.data()); 22 | } 23 | 24 | void wstring_set(wstring_t* s, char* data, size_t len) { 25 | s->rep.assign(data, len); 26 | } 27 | 28 | size_t wstring_len(wstring_t* s) { 29 | return s->rep.length(); 30 | } -------------------------------------------------------------------------------- /jchiu/wstring/wstring.go: -------------------------------------------------------------------------------- 1 | package wstring 2 | 3 | // #include 4 | // #include 5 | // #include "wstring.h" 6 | import "C" 7 | 8 | import ( 9 | "runtime" 10 | ) 11 | 12 | type WString struct { 13 | c *C.wstring_t 14 | } 15 | 16 | func NewWString() *WString { 17 | s := &WString{C.wstring_new()} 18 | runtime.SetFinalizer(s, destroyWString) 19 | return s 20 | } 21 | 22 | func destroyWString(s *WString) { 23 | C.wstring_destroy(s.c) 24 | } 25 | 26 | // Get returns a slice of the string data. Note that there is no copying here. 27 | // Do NOT mess with return value. It is a const char* in C++ and is readonly. 28 | func (s *WString) Get() []byte { 29 | var l C.size_t 30 | data := C.wstring_get(s.c, &l) 31 | return charToByte(data, l) 32 | } 33 | 34 | // Set assigns the C++ string to the given data. There is copying here. 35 | func (s *WString) Set(data []byte) { 36 | C.wstring_set(s.c, byteToChar(data), C.size_t(len(data))) 37 | } 38 | 39 | func (s *WString) Size() int { 40 | return int(C.wstring_len(s.c)) 41 | } 42 | -------------------------------------------------------------------------------- /jchiu/wstring/wstring.h: -------------------------------------------------------------------------------- 1 | #ifndef __WSTRING__ 2 | #define __WSTRING__ 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | typedef struct wstring_t wstring_t; 9 | wstring_t* wstring_new(); 10 | void wstring_destroy(wstring_t* s); 11 | char* wstring_get(wstring_t* s, size_t* len); 12 | void wstring_set(wstring_t* s, char* data, size_t len); 13 | size_t wstring_len(wstring_t* s); 14 | 15 | #ifdef __cplusplus 16 | } /* end extern "C" */ 17 | #endif 18 | 19 | #endif // __WSTRING__ 20 | -------------------------------------------------------------------------------- /raft/.gitignore: -------------------------------------------------------------------------------- 1 | /raft 2 | -------------------------------------------------------------------------------- /raft/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "log" 7 | "time" 8 | 9 | "golang.org/x/net/context" 10 | 11 | "github.com/coreos/etcd/raft" 12 | "github.com/coreos/etcd/raft/raftpb" 13 | ) 14 | 15 | type node struct { 16 | cfg *raft.Config 17 | ctx context.Context 18 | data map[string]string 19 | done <-chan struct{} 20 | id uint64 21 | raft raft.Node 22 | store *raft.MemoryStorage 23 | } 24 | 25 | var ( 26 | nodes = make(map[int]*node) 27 | ) 28 | 29 | // HardState contains term, vote and commit. 30 | // Snapshot contains data and snapshot metadata. 31 | func (n *node) saveToStorage(hardState raftpb.HardState, 32 | entries []raftpb.Entry, snapshot raftpb.Snapshot) { 33 | 34 | if !raft.IsEmptySnap(snapshot) { 35 | fmt.Printf("saveToStorage snapshot: %v\n", snapshot.String()) 36 | le, err := n.store.LastIndex() 37 | if err != nil { 38 | log.Fatalf("While retrieving last index: %v\n", err) 39 | } 40 | te, err := n.store.Term(le) 41 | if err != nil { 42 | log.Fatalf("While retrieving term: %v\n", err) 43 | } 44 | fmt.Printf("%d node Term for le: %v is %v\n", n.id, le, te) 45 | if snapshot.Metadata.Index <= le { 46 | fmt.Printf("%d node ignoring snapshot. Last index: %v\n", n.id, le) 47 | return 48 | } 49 | 50 | if err := n.store.ApplySnapshot(snapshot); err != nil { 51 | log.Fatalf("Applying snapshot: %v", err) 52 | } 53 | } 54 | 55 | if !raft.IsEmptyHardState(hardState) { 56 | n.store.SetHardState(hardState) 57 | } 58 | n.store.Append(entries) 59 | } 60 | 61 | // receive a single message. 62 | func (n *node) receive(ctx context.Context, message raftpb.Message) { 63 | n.raft.Step(ctx, message) 64 | } 65 | 66 | // send messages to peers. 67 | func (n *node) send(messages []raftpb.Message) { 68 | for _, m := range messages { 69 | log.Println("SEND: ", raft.DescribeMessage(m, nil)) 70 | 71 | nodes[int(m.To)].receive(context.TODO(), m) 72 | if m.Type == raftpb.MsgSnap { 73 | // n.raft.ReportSnapshot(m.To, raft.SnapshotFinish) 74 | } 75 | } 76 | } 77 | 78 | // processSnapshot applies the snapshot to state machine. 79 | func (n *node) applyToStateMachine(snapshot raftpb.Snapshot) { 80 | lead := int(n.raft.Status().Lead) 81 | for k, v := range nodes[lead].data { 82 | n.data[k] = v 83 | } 84 | } 85 | 86 | func (n *node) process(entry raftpb.Entry) { 87 | fmt.Printf("node %v: processing entry", n.id) 88 | if entry.Data == nil { 89 | return 90 | } 91 | if entry.Type == raftpb.EntryConfChange { 92 | fmt.Printf("Configuration change\n") 93 | var cc raftpb.ConfChange 94 | cc.Unmarshal(entry.Data) 95 | n.raft.ApplyConfChange(cc) 96 | return 97 | } 98 | 99 | if entry.Type == raftpb.EntryNormal { 100 | parts := bytes.SplitN(entry.Data, []byte(":"), 2) 101 | k := string(parts[0]) 102 | v := string(parts[1]) 103 | n.data[k] = v 104 | fmt.Printf(" Key: %v Val: %v\n", k, v) 105 | } 106 | } 107 | 108 | func (n *node) run() { 109 | for { 110 | select { 111 | case <-time.Tick(time.Second): 112 | n.raft.Tick() 113 | case rd := <-n.raft.Ready(): 114 | n.saveToStorage(rd.HardState, rd.Entries, rd.Snapshot) 115 | n.send(rd.Messages) 116 | if !raft.IsEmptySnap(rd.Snapshot) { 117 | fmt.Println("Applying snapshot to state machine") 118 | n.applyToStateMachine(rd.Snapshot) 119 | } 120 | if len(rd.CommittedEntries) > 0 { 121 | fmt.Printf("Node: %v. Got %d committed entries\n", n.id, len(rd.CommittedEntries)) 122 | } 123 | for _, entry := range rd.CommittedEntries { 124 | n.process(entry) 125 | } 126 | n.raft.Advance() 127 | case <-n.done: 128 | return 129 | } 130 | } 131 | } 132 | 133 | func newNode(id uint64, peers []raft.Peer) *node { 134 | store := raft.NewMemoryStorage() 135 | n := &node{ 136 | id: id, 137 | store: store, 138 | cfg: &raft.Config{ 139 | ID: uint64(id), 140 | ElectionTick: 3, 141 | HeartbeatTick: 1, 142 | Storage: store, 143 | MaxSizePerMsg: 4096, 144 | MaxInflightMsgs: 256, 145 | }, 146 | data: make(map[string]string), 147 | ctx: context.TODO(), 148 | } 149 | n.raft = raft.StartNode(n.cfg, peers) 150 | return n 151 | } 152 | 153 | func main() { 154 | // start a small cluster 155 | nodes[1] = newNode(1, []raft.Peer{{ID: 1}, {ID: 2}}) 156 | go nodes[1].run() 157 | 158 | nodes[2] = newNode(2, []raft.Peer{{ID: 1}, {ID: 2}}) 159 | go nodes[2].run() 160 | 161 | nodes[1].raft.Campaign(nodes[1].ctx) 162 | 163 | time.Sleep(10 * time.Second) 164 | 165 | fmt.Println("----------------- Adding NODE 3") 166 | nodes[3] = newNode(3, []raft.Peer{}) 167 | go nodes[3].run() 168 | nodes[2].raft.ProposeConfChange(nodes[2].ctx, raftpb.ConfChange{ 169 | ID: 3, 170 | Type: raftpb.ConfChangeAddNode, 171 | NodeID: 3, 172 | Context: []byte(""), 173 | }) 174 | time.Sleep(10 * time.Second) 175 | fmt.Println("------------------ Proposing values") 176 | 177 | // Wait for leader, is there a better way to do this 178 | /* 179 | for nodes[1].raft.Status().Lead != 1 { 180 | fmt.Println("Waiting for 1 to become leader") 181 | time.Sleep(100 * time.Millisecond) 182 | } 183 | */ 184 | 185 | nodes[1].raft.Propose(nodes[2].ctx, []byte("mykey1:myvalue1")) 186 | nodes[2].raft.Propose(nodes[2].ctx, []byte("mykey2:myvalue2")) 187 | nodes[3].raft.Propose(nodes[2].ctx, []byte("mykey3:myvalue3")) 188 | 189 | // Wait for proposed entry to be commited in cluster. 190 | // Apperently when should add an uniq id to the message and wait until it is 191 | // commited in the node. 192 | fmt.Printf("** Sleeping to visualize heartbeat between nodes **\n") 193 | time.Sleep(5 * time.Second) 194 | 195 | leader := int(nodes[1].raft.Status().Lead) 196 | fmt.Printf("=========== Taking snapshot of leader: %v\n", leader) 197 | le, err := nodes[leader].store.LastIndex() 198 | if err != nil { 199 | log.Fatalf("node leader: %v has error getting last index: err: %v\n.", le, err) 200 | } 201 | _, err = nodes[leader].store.CreateSnapshot(le-1, nil, []byte("")) 202 | if err != nil { 203 | log.Fatalf("node leader: %v has error taking snapshot: %v\n.", le, err) 204 | } 205 | if err := nodes[leader].store.Compact(le - 1); err != nil { 206 | log.Fatalf("node leader: %v has error while compaction: %v\n", le, err) 207 | } 208 | fmt.Println("===================== Snapshot taken") 209 | 210 | // Just check that data has been persited 211 | for i, node := range nodes { 212 | fmt.Printf("** Node %v **\n", i) 213 | for k, v := range node.data { 214 | fmt.Printf("%v = %v\n", k, v) 215 | } 216 | fmt.Printf("*************\n") 217 | } 218 | 219 | fmt.Println("Adding a new 4th node") 220 | nodes[4] = newNode(4, []raft.Peer{{ID: 1}}) 221 | go nodes[4].run() 222 | nodes[1].raft.ProposeConfChange(nodes[1].ctx, raftpb.ConfChange{ 223 | ID: 4, 224 | Type: raftpb.ConfChangeAddNode, 225 | NodeID: 4, 226 | Context: []byte(""), 227 | }) 228 | fmt.Printf("** Sleeping to visualize heartbeat between nodes **\n") 229 | time.Sleep(5 * time.Second) 230 | 231 | // Just check that data has been propagated to 4. 232 | for i, node := range nodes { 233 | fmt.Printf("** Node %v **\n", i) 234 | for k, v := range node.data { 235 | fmt.Printf("%v = %v\n", k, v) 236 | } 237 | fmt.Printf("*************\n") 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /rdbdemo/cpp/main.cc: -------------------------------------------------------------------------------- 1 | /* 2 | RDB=$HOME/rocksdb-4.11.2 3 | DIR=$HOME/rocksdb_demo/data 4 | 5 | g++ main.cc $RDB/librocksdb.a -std=c++11 -O2 -lz -lbz2 -I$RDB/include 6 | 7 | rm -Rf $DIR 8 | mkdir -p $DIR 9 | ./a.out $DIR 10 | */ 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | using namespace rocksdb; // Lazy here. 25 | 26 | void put_demo(DB* db) { 27 | char key[20]; 28 | char val[5000 * 5 + 20]; 29 | memset(val, 'v', sizeof(val)); 30 | const int key_len = 3 + 9; 31 | const int val_len = 5000 * 5 + 9; 32 | 33 | WriteOptions w_opt; 34 | w_opt.sync = false; 35 | 36 | for (int i = 0; i < 100000000; ++i) { 37 | if ((i % 100000) == 0) { 38 | std::cout << "Added " << i << " keys\n"; 39 | } 40 | sprintf(key, "key%09x", i % 10000); 41 | sprintf(val + 5000 * 5, "%09x", i); 42 | Status status = db->Put(w_opt, Slice(key, key_len), Slice(val, val_len)); 43 | assert(status.ok()); 44 | } 45 | } 46 | 47 | // Doesn't seem to have any memory leak. Htop mem usage remains constant at 2.3%. 48 | void writebatch_demo(DB* db) { 49 | char key[20]; 50 | char val[5000 * 5 + 20]; 51 | memset(val, 'v', sizeof(val)); 52 | const int key_len = 3 + 9; 53 | const int val_len = 5000 * 5 + 9; 54 | 55 | WriteOptions w_opt; 56 | w_opt.sync = false; 57 | 58 | WriteBatch wb; 59 | 60 | for (int i = 0; i < 100000000; ++i) { 61 | if ((i % 100000) == 0) { 62 | std::cout << "Added " << i << " keys\n"; 63 | } 64 | if ((i % 1000) == 0) { 65 | Status status = db->Write(w_opt, &wb); 66 | assert(status.ok()); 67 | wb.Clear(); 68 | } 69 | sprintf(key, "key%09x", i % 10000); 70 | sprintf(val + 5000 * 5, "%09x", i); 71 | 72 | wb.Put(Slice(key, key_len), Slice(val, val_len)); 73 | //db->Put(w_opt, Slice(key, key_len), Slice(val, val_len)); 74 | } 75 | } 76 | 77 | int main(int argc, char* argv[]) { 78 | assert(argc == 2); 79 | std::unique_ptr db; 80 | { 81 | DB* raw; 82 | Options opt; 83 | opt.create_if_missing = true; 84 | Status s = DB::Open(opt, argv[1], &raw); 85 | assert(s.ok()); 86 | db.reset(raw); 87 | } 88 | // put_demo(db.get()); 89 | writebatch_demo(db.get()); 90 | return 0; 91 | } 92 | -------------------------------------------------------------------------------- /rocksdbswig/README.md: -------------------------------------------------------------------------------- 1 | # Motivation 2 | 3 | We have been using GoRocksDB. Some disadvantages are: 4 | 5 | * GoRocksDB is not always compatible with the latest version of RocksDB. 6 | * GoRocksDB uses the C API of RocksDB. To get a new function, you may have to modify both the C API and GoRocksDB. 7 | * I believe there is some unnecessary string copying with Get calls in the C API. See [here](https://github.com/facebook/rocksdb/blob/master/db/c.cc#L722). 8 | Notice the C++ code writes to a temporary string `std::string tmp`. Then we call `CopyString` which does a `malloc` and `memcpy`. Then GoRocksDB receives it as a `[]byte`. And I suppose Go's GC will take care of it. 9 | 10 | # Difficulties 11 | 12 | We wish that SWIG can do a better job, but it is a difficult tool to master. The main problem I encountered is `DB**`. `DB::Open` calls takes in a `DB**` argument and `DB` itself is an abstract class. SWIG has difficulty interpreting it. 13 | 14 | We have tried different ways to fix this. One way is to use a '''typemap'''. 15 | 16 | ``` 17 | %typemap(in) rocksdb::DB ** (rocksdb::DB *temp) { 18 | $1 = &temp; 19 | } 20 | 21 | %typemap(argout) rocksdb::DB ** { 22 | %set_output(SWIG_NewPointerObj(SWIG_as_voidptr(*$1), $*1_descriptor, SWIG_POINTER_OWN)); 23 | } 24 | ``` 25 | 26 | This doesn't seem to work. This creates some DB ptr swig object which is undefined because DB is an abstract class. 27 | 28 | To get around the abstract class problem, we tried using directors. But that seems to require that `DBImpl` is included as well. But the latter is an internal RocksDB object not found in the installed includes. I tried including more and more of these internal headers until I encountered some problem with the file `port/port.h`. Not sure why. For future reference, we tried including the following: 29 | 30 | ```cpp 31 | #include "/home/jchiu/rocksdb-4.6.1/port/port.h" 32 | #include "/home/jchiu/rocksdb-4.6.1/util/coding.h" 33 | #include "/home/jchiu/rocksdb-4.6.1/db/dbformat.h" 34 | #include "/home/jchiu/rocksdb-4.6.1/db/memtable_list.h" 35 | #include "/home/jchiu/rocksdb-4.6.1/db/column_family.h" 36 | #include "/home/jchiu/rocksdb-4.6.1/db/db_impl.h" 37 | ``` 38 | 39 | We have also tried playing around with `cpointers.i` and `pointer_functions`. But I failed to massage it to work. 40 | 41 | In the end, I decided to just add some functions for `DB::Open` and `DB::Get`. You can see these in `extra.h`. I am not sure if they work completely fine (I need a key that is in the database) and I am not sure if there is some unnecessary string copying due to SWIG. 42 | 43 | # How everything else works 44 | 45 | We do not use `.swigcxx` as we want more control over the building and linking. See `build.sh` first. We run `swig` with a custom include folder. It generates C++ files in a subdirectory and a Go interface file in the current directory. 46 | 47 | We then run a script `add_cgo_flays.py` to insert a `#cgo LDFLAGS` line into the Go file. One alternative is to set `CGO_LDFLAGS` in shell before you `go build` but this is not nice because any binary that uses that Go lib will need to do the same. The best way is to insert these flags right into the Go file. However, we couldn't find a way to do this in Swig. Most of the code insertion mechanisms in SWIG pertains to the `.cxx` file, not the Go file. 48 | -------------------------------------------------------------------------------- /rocksdbswig/add_cgo_flags.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | assert(len(sys.argv) == 2) 4 | 5 | filename = sys.argv[1] 6 | print 'Opening go interface file: %s' % filename 7 | 8 | with open(filename) as f: 9 | lines = f.read().splitlines() 10 | 11 | for i, s in enumerate(lines): 12 | s = s.strip() 13 | if s == '*/': 14 | i_close = i 15 | elif s == 'import "C"': 16 | i_import = i 17 | break 18 | 19 | # lines[i_close] is the last '*/' before we saw 'import "C"'. 20 | assert(i_import and i_close) 21 | 22 | with open('cgo_flags.txt') as f: 23 | to_add = f.read().splitlines() 24 | 25 | with open(filename, 'w') as f: 26 | f.write('\n'.join(lines[:i_close])) 27 | f.write('\n\n') 28 | f.write('\n'.join(to_add)) 29 | f.write('\n\n') 30 | f.write('\n'.join(lines[i_close:])) 31 | 32 | 33 | print 'Wrote to go interface file: %s' % filename -------------------------------------------------------------------------------- /rocksdbswig/build.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | cd cc 4 | 5 | swig -I/usr/local/include -go -c++ -cgo -intgosize 64 -outdir ../ rocksdbswig.i 6 | 7 | g++ -c -fPIC -O2 -std=c++11 rocksdbswig_wrap.cxx 8 | 9 | ar -r librocksdbswigwrap.a rocksdbswig_wrap.o 10 | 11 | cd .. 12 | 13 | python add_cgo_flags.py rocksdbswig.go 14 | -------------------------------------------------------------------------------- /rocksdbswig/cc/extra.h: -------------------------------------------------------------------------------- 1 | namespace rocksdb { 2 | 3 | typedef struct { 4 | Status status; 5 | DB* db; 6 | } StatusDBPair; 7 | 8 | StatusDBPair MyDBOpen(const Options& options, const std::string& name) { 9 | StatusDBPair out; 10 | out.status = DB::Open(options, name, &out.db); 11 | return out; 12 | } 13 | 14 | typedef struct { 15 | Status status; 16 | std::string value; 17 | } StatusStringPair; 18 | 19 | StatusStringPair MyDBGet(DB* db, const ReadOptions& options, const Slice& key) { 20 | StatusStringPair out; 21 | out.status = db->Get(options, key, &out.value); 22 | return out; 23 | } 24 | 25 | } // namespace rocksdb -------------------------------------------------------------------------------- /rocksdbswig/cc/librocksdbswigwrap.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/rocksdbswig/cc/librocksdbswigwrap.a -------------------------------------------------------------------------------- /rocksdbswig/cc/rocksdbswig.i: -------------------------------------------------------------------------------- 1 | %module(directors="1") rocksdbswig 2 | //%module rocksdbswig 3 | 4 | %{ 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "extra.h" 12 | 13 | 14 | using namespace rocksdb; 15 | %} 16 | 17 | 18 | // %rename only works for function names. We use a C++ macro to rename "range" 19 | // as myrange so that the Go code can compile. 20 | #define range myrange 21 | 22 | //%feature("director"); 23 | 24 | %include 25 | 26 | // Disable mapping between C++ and Go strings to prevent extra copying. 27 | //%include "std_string.i" 28 | // %include "cpointer.i" 29 | 30 | /*%typemap(in) rocksdb::DB ** (rocksdb::DB *temp) { 31 | $1 = &temp; 32 | } 33 | 34 | %typemap(argout) rocksdb::DB ** { 35 | %set_output(SWIG_NewPointerObj(SWIG_as_voidptr(*$1), $*1_descriptor, SWIG_POINTER_OWN)); 36 | }*/ 37 | 38 | %apply DB **OUTPUT { DB **dbptr }; 39 | 40 | %include "/usr/include/c++/5.4.0/string" 41 | 42 | %include 43 | %include 44 | %include 45 | %include 46 | 47 | %include "extra.h" 48 | 49 | //%pointer_functions(DB, DB_p) -------------------------------------------------------------------------------- /rocksdbswig/cc/rocksdbswig_wrap.h: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | * This file was automatically generated by SWIG (http://www.swig.org). 3 | * Version 3.0.8 4 | * 5 | * This file is not intended to be easily readable and contains a number of 6 | * coding conventions designed to improve portability and efficiency. Do not make 7 | * changes to this file unless you know what you are doing--modify the SWIG 8 | * interface file instead. 9 | * ----------------------------------------------------------------------------- */ 10 | 11 | // source: rocksdbswig.i 12 | 13 | #ifndef SWIG_rocksdbswig_WRAP_H_ 14 | #define SWIG_rocksdbswig_WRAP_H_ 15 | 16 | class Swig_memory; 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /rocksdbswig/cc/rocksdbswig_wrap.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/rocksdbswig/cc/rocksdbswig_wrap.o -------------------------------------------------------------------------------- /rocksdbswig/cgo_flags.txt: -------------------------------------------------------------------------------- 1 | #cgo LDFLAGS: -L${SRCDIR}/cc -lrocksdbswigwrap -lrocksdb -lstdc++ 2 | -------------------------------------------------------------------------------- /rocksdbswig/clean.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | rm *.go 4 | 5 | cd cc 6 | 7 | rm -f *.cxx *.o *.a 8 | 9 | cd .. -------------------------------------------------------------------------------- /rocksdbswig/tmp/add_cgo_flags.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | assert(len(sys.argv) == 2) 4 | 5 | filename = sys.argv[1] 6 | print 'Opening go interface file: %s' % filename 7 | 8 | with open(filename) as f: 9 | lines = f.read().splitlines() 10 | 11 | for i, s in enumerate(lines): 12 | s = s.strip() 13 | if s == '*/': 14 | i_close = i 15 | elif s == 'import "C"': 16 | i_import = i 17 | break 18 | 19 | # lines[i_close] is the last '*/' before we saw 'import "C"'. 20 | assert(i_import and i_close) 21 | 22 | with open('cgo_flags.txt') as f: 23 | to_add = f.read().splitlines() 24 | 25 | with open(filename, 'w') as f: 26 | f.write('\n'.join(lines[:i_close])) 27 | f.write('\n\n') 28 | f.write('\n'.join(to_add)) 29 | f.write('\n\n') 30 | f.write('\n'.join(lines[i_close:])) 31 | 32 | 33 | print 'Wrote to go interface file: %s' % filename -------------------------------------------------------------------------------- /rocksdbswig/tmp/build.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | cd cc 4 | 5 | swig -I/usr/local/include -go -c++ -cgo -intgosize 64 -outdir ../ tmp.i 6 | 7 | g++ -c -fPIC -O2 -std=c++11 tmp_wrap.cxx 8 | 9 | ar -r libtmp.a tmp_wrap.o 10 | 11 | cd .. 12 | 13 | python add_cgo_flags.py tmp.go 14 | 15 | go build . 16 | -------------------------------------------------------------------------------- /rocksdbswig/tmp/cc/extra.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void setMyString(std::string* s) { 4 | *s = "hello"; 5 | } -------------------------------------------------------------------------------- /rocksdbswig/tmp/cc/libtmp.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/rocksdbswig/tmp/cc/libtmp.a -------------------------------------------------------------------------------- /rocksdbswig/tmp/cc/tmp.i: -------------------------------------------------------------------------------- 1 | %module tmp 2 | 3 | %{ 4 | #include "extra.h" 5 | 6 | %} 7 | 8 | 9 | %include "std_string.i" 10 | 11 | %include "/usr/include/c++/5.4.0/string" 12 | 13 | %include "extra.h" 14 | -------------------------------------------------------------------------------- /rocksdbswig/tmp/cc/tmp_wrap.cxx: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | * This file was automatically generated by SWIG (http://www.swig.org). 3 | * Version 3.0.8 4 | * 5 | * This file is not intended to be easily readable and contains a number of 6 | * coding conventions designed to improve portability and efficiency. Do not make 7 | * changes to this file unless you know what you are doing--modify the SWIG 8 | * interface file instead. 9 | * ----------------------------------------------------------------------------- */ 10 | 11 | // source: tmp.i 12 | 13 | #define SWIGMODULE tmp 14 | 15 | #ifdef __cplusplus 16 | /* SwigValueWrapper is described in swig.swg */ 17 | template class SwigValueWrapper { 18 | struct SwigMovePointer { 19 | T *ptr; 20 | SwigMovePointer(T *p) : ptr(p) { } 21 | ~SwigMovePointer() { delete ptr; } 22 | SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } 23 | } pointer; 24 | SwigValueWrapper& operator=(const SwigValueWrapper& rhs); 25 | SwigValueWrapper(const SwigValueWrapper& rhs); 26 | public: 27 | SwigValueWrapper() : pointer(0) { } 28 | SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } 29 | operator T&() const { return *pointer.ptr; } 30 | T *operator&() { return pointer.ptr; } 31 | }; 32 | 33 | template T SwigValueInit() { 34 | return T(); 35 | } 36 | #endif 37 | 38 | /* ----------------------------------------------------------------------------- 39 | * This section contains generic SWIG labels for method/variable 40 | * declarations/attributes, and other compiler dependent labels. 41 | * ----------------------------------------------------------------------------- */ 42 | 43 | /* template workaround for compilers that cannot correctly implement the C++ standard */ 44 | #ifndef SWIGTEMPLATEDISAMBIGUATOR 45 | # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) 46 | # define SWIGTEMPLATEDISAMBIGUATOR template 47 | # elif defined(__HP_aCC) 48 | /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ 49 | /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ 50 | # define SWIGTEMPLATEDISAMBIGUATOR template 51 | # else 52 | # define SWIGTEMPLATEDISAMBIGUATOR 53 | # endif 54 | #endif 55 | 56 | /* inline attribute */ 57 | #ifndef SWIGINLINE 58 | # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) 59 | # define SWIGINLINE inline 60 | # else 61 | # define SWIGINLINE 62 | # endif 63 | #endif 64 | 65 | /* attribute recognised by some compilers to avoid 'unused' warnings */ 66 | #ifndef SWIGUNUSED 67 | # if defined(__GNUC__) 68 | # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) 69 | # define SWIGUNUSED __attribute__ ((__unused__)) 70 | # else 71 | # define SWIGUNUSED 72 | # endif 73 | # elif defined(__ICC) 74 | # define SWIGUNUSED __attribute__ ((__unused__)) 75 | # else 76 | # define SWIGUNUSED 77 | # endif 78 | #endif 79 | 80 | #ifndef SWIG_MSC_UNSUPPRESS_4505 81 | # if defined(_MSC_VER) 82 | # pragma warning(disable : 4505) /* unreferenced local function has been removed */ 83 | # endif 84 | #endif 85 | 86 | #ifndef SWIGUNUSEDPARM 87 | # ifdef __cplusplus 88 | # define SWIGUNUSEDPARM(p) 89 | # else 90 | # define SWIGUNUSEDPARM(p) p SWIGUNUSED 91 | # endif 92 | #endif 93 | 94 | /* internal SWIG method */ 95 | #ifndef SWIGINTERN 96 | # define SWIGINTERN static SWIGUNUSED 97 | #endif 98 | 99 | /* internal inline SWIG method */ 100 | #ifndef SWIGINTERNINLINE 101 | # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE 102 | #endif 103 | 104 | /* exporting methods */ 105 | #if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) 106 | # ifndef GCC_HASCLASSVISIBILITY 107 | # define GCC_HASCLASSVISIBILITY 108 | # endif 109 | #endif 110 | 111 | #ifndef SWIGEXPORT 112 | # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) 113 | # if defined(STATIC_LINKED) 114 | # define SWIGEXPORT 115 | # else 116 | # define SWIGEXPORT __declspec(dllexport) 117 | # endif 118 | # else 119 | # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) 120 | # define SWIGEXPORT __attribute__ ((visibility("default"))) 121 | # else 122 | # define SWIGEXPORT 123 | # endif 124 | # endif 125 | #endif 126 | 127 | /* calling conventions for Windows */ 128 | #ifndef SWIGSTDCALL 129 | # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) 130 | # define SWIGSTDCALL __stdcall 131 | # else 132 | # define SWIGSTDCALL 133 | # endif 134 | #endif 135 | 136 | /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ 137 | #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) 138 | # define _CRT_SECURE_NO_DEPRECATE 139 | #endif 140 | 141 | /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ 142 | #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) 143 | # define _SCL_SECURE_NO_DEPRECATE 144 | #endif 145 | 146 | /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ 147 | #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) 148 | # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 149 | #endif 150 | 151 | /* Intel's compiler complains if a variable which was never initialised is 152 | * cast to void, which is a common idiom which we use to indicate that we 153 | * are aware a variable isn't used. So we just silence that warning. 154 | * See: https://github.com/swig/swig/issues/192 for more discussion. 155 | */ 156 | #ifdef __INTEL_COMPILER 157 | # pragma warning disable 592 158 | #endif 159 | 160 | 161 | #include 162 | #include 163 | #include 164 | #include 165 | #include 166 | 167 | 168 | 169 | typedef long long intgo; 170 | typedef unsigned long long uintgo; 171 | 172 | 173 | 174 | typedef struct { char *p; intgo n; } _gostring_; 175 | typedef struct { void* array; intgo len; intgo cap; } _goslice_; 176 | 177 | 178 | 179 | 180 | #define swiggo_size_assert_eq(x, y, name) typedef char name[(x-y)*(x-y)*-2+1]; 181 | #define swiggo_size_assert(t, n) swiggo_size_assert_eq(sizeof(t), n, swiggo_sizeof_##t##_is_not_##n) 182 | 183 | swiggo_size_assert(char, 1) 184 | swiggo_size_assert(short, 2) 185 | swiggo_size_assert(int, 4) 186 | typedef long long swiggo_long_long; 187 | swiggo_size_assert(swiggo_long_long, 8) 188 | swiggo_size_assert(float, 4) 189 | swiggo_size_assert(double, 8) 190 | 191 | #ifdef __cplusplus 192 | extern "C" { 193 | #endif 194 | extern void crosscall2(void (*fn)(void *, int), void *, int); 195 | extern char* _cgo_topofstack(void) __attribute__ ((weak)); 196 | extern void _cgo_allocate(void *, int); 197 | extern void _cgo_panic(void *, int); 198 | #ifdef __cplusplus 199 | } 200 | #endif 201 | 202 | static char *_swig_topofstack() { 203 | if (_cgo_topofstack) { 204 | return _cgo_topofstack(); 205 | } else { 206 | return 0; 207 | } 208 | } 209 | 210 | static void _swig_gopanic(const char *p) { 211 | struct { 212 | const char *p; 213 | } a; 214 | a.p = p; 215 | crosscall2(_cgo_panic, &a, (int) sizeof a); 216 | } 217 | 218 | 219 | 220 | 221 | #define SWIG_contract_assert(expr, msg) \ 222 | if (!(expr)) { _swig_gopanic(msg); } else 223 | 224 | 225 | static void Swig_free(void* p) { 226 | free(p); 227 | } 228 | 229 | 230 | #include "extra.h" 231 | 232 | 233 | 234 | #include 235 | 236 | #ifdef __cplusplus 237 | extern "C" { 238 | #endif 239 | 240 | void _wrap_Swig_free_tmp_4231490e6333a8e7(void *_swig_go_0) { 241 | void *arg1 = (void *) 0 ; 242 | 243 | arg1 = *(void **)&_swig_go_0; 244 | 245 | Swig_free(arg1); 246 | 247 | } 248 | 249 | 250 | void _wrap_setMyString_tmp_4231490e6333a8e7(_gostring_* _swig_go_0) { 251 | std::string *arg1 = (std::string *) 0 ; 252 | 253 | arg1 = *(std::string **)&_swig_go_0; 254 | 255 | setMyString(arg1); 256 | 257 | } 258 | 259 | 260 | #ifdef __cplusplus 261 | } 262 | #endif 263 | 264 | -------------------------------------------------------------------------------- /rocksdbswig/tmp/cc/tmp_wrap.h: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | * This file was automatically generated by SWIG (http://www.swig.org). 3 | * Version 3.0.8 4 | * 5 | * This file is not intended to be easily readable and contains a number of 6 | * coding conventions designed to improve portability and efficiency. Do not make 7 | * changes to this file unless you know what you are doing--modify the SWIG 8 | * interface file instead. 9 | * ----------------------------------------------------------------------------- */ 10 | 11 | // source: tmp.i 12 | 13 | #ifndef SWIG_rocksdbswig_WRAP_H_ 14 | #define SWIG_rocksdbswig_WRAP_H_ 15 | 16 | class Swig_memory; 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /rocksdbswig/tmp/cc/tmp_wrap.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/experiments/9841dc289feb4454304a0d45a05c0555acfb432f/rocksdbswig/tmp/cc/tmp_wrap.o -------------------------------------------------------------------------------- /rocksdbswig/tmp/cgo_flags.txt: -------------------------------------------------------------------------------- 1 | #cgo LDFLAGS: -L${SRCDIR}/cc -ltmp -lstdc++ 2 | -------------------------------------------------------------------------------- /rocksdbswig/tmp/tmp.go: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | * This file was automatically generated by SWIG (http://www.swig.org). 3 | * Version 3.0.8 4 | * 5 | * This file is not intended to be easily readable and contains a number of 6 | * coding conventions designed to improve portability and efficiency. Do not make 7 | * changes to this file unless you know what you are doing--modify the SWIG 8 | * interface file instead. 9 | * ----------------------------------------------------------------------------- */ 10 | 11 | // source: tmp.i 12 | 13 | package tmp 14 | 15 | /* 16 | #define intgo swig_intgo 17 | typedef void *swig_voidp; 18 | 19 | #include 20 | 21 | 22 | typedef long long intgo; 23 | typedef unsigned long long uintgo; 24 | 25 | 26 | 27 | typedef struct { char *p; intgo n; } _gostring_; 28 | typedef struct { void* array; intgo len; intgo cap; } _goslice_; 29 | 30 | 31 | extern void _wrap_Swig_free_tmp_4231490e6333a8e7(uintptr_t arg1); 32 | extern void _wrap_setMyString_tmp_4231490e6333a8e7(swig_voidp arg1); 33 | #undef intgo 34 | 35 | #cgo LDFLAGS: -L${SRCDIR}/cc -ltmp -lstdc++ 36 | 37 | */ 38 | import "C" 39 | 40 | import "unsafe" 41 | import _ "runtime/cgo" 42 | import "sync" 43 | 44 | 45 | type _ unsafe.Pointer 46 | 47 | 48 | 49 | var Swig_escape_always_false bool 50 | var Swig_escape_val interface{} 51 | 52 | 53 | type _swig_fnptr *byte 54 | type _swig_memberptr *byte 55 | 56 | 57 | type _ sync.Mutex 58 | 59 | func Swig_free(arg1 uintptr) { 60 | _swig_i_0 := arg1 61 | C._wrap_Swig_free_tmp_4231490e6333a8e7(C.uintptr_t(_swig_i_0)) 62 | } 63 | 64 | const X_GLIBCXX_STRING int = 1 65 | func SetMyString(arg1 *string) { 66 | _swig_i_0 := arg1 67 | C._wrap_setMyString_tmp_4231490e6333a8e7(C.swig_voidp(_swig_i_0)) 68 | } 69 | 70 | -------------------------------------------------------------------------------- /rocksdbswig/tmp/tmp_test.go: -------------------------------------------------------------------------------- 1 | package tmp 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func Test1(t *testing.T) { 9 | var s string 10 | SetMyString(&s) 11 | fmt.Println(s) 12 | } 13 | -------------------------------------------------------------------------------- /rpc/.gitignore: -------------------------------------------------------------------------------- 1 | /rpc 2 | -------------------------------------------------------------------------------- /rpc/client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "log" 10 | "net/rpc" 11 | ) 12 | 13 | type ccodec struct { 14 | rwc io.ReadWriteCloser 15 | payloadLen int32 16 | } 17 | 18 | func writeHeader(rwc io.ReadWriteCloser, seq uint64, 19 | method string, data []byte) error { 20 | 21 | var bh bytes.Buffer 22 | var rerr error 23 | 24 | setError(&rerr, binary.Write(&bh, binary.LittleEndian, seq)) 25 | setError(&rerr, binary.Write(&bh, binary.LittleEndian, int32(len(method)))) 26 | setError(&rerr, binary.Write(&bh, binary.LittleEndian, int32(len(data)))) 27 | _, err := bh.Write([]byte(method)) 28 | setError(&rerr, err) 29 | if rerr != nil { 30 | return rerr 31 | } 32 | _, err = rwc.Write(bh.Bytes()) 33 | return err 34 | } 35 | 36 | func parseHeader(rwc io.ReadWriteCloser, seq *uint64, method *string, plen *int32) error { 37 | var err error 38 | var sz int32 39 | setError(&err, binary.Read(rwc, binary.LittleEndian, seq)) 40 | setError(&err, binary.Read(rwc, binary.LittleEndian, &sz)) 41 | setError(&err, binary.Read(rwc, binary.LittleEndian, plen)) 42 | if err != nil { 43 | return err 44 | } 45 | buf := make([]byte, sz) 46 | n, err := rwc.Read(buf) 47 | if err != nil { 48 | return err 49 | } 50 | if n != int(sz) { 51 | return fmt.Errorf("Expected: %v. Got: %v\n", sz, n) 52 | } 53 | *method = string(buf) 54 | return nil 55 | } 56 | 57 | func (c *ccodec) WriteRequest(r *rpc.Request, body interface{}) error { 58 | if body == nil { 59 | return errors.New("Nil body") 60 | } 61 | 62 | query := body.(*Query) 63 | if err := writeHeader(c.rwc, r.Seq, r.ServiceMethod, query.d); err != nil { 64 | return err 65 | } 66 | 67 | n, err := c.rwc.Write(query.d) 68 | if n != len(query.d) { 69 | return errors.New("Unable to write payload.") 70 | } 71 | return err 72 | } 73 | 74 | func (c *ccodec) ReadResponseHeader(r *rpc.Response) error { 75 | if len(r.Error) > 0 { 76 | log.Fatal("client got response error: " + r.Error) 77 | } 78 | if err := parseHeader(c.rwc, &r.Seq, 79 | &r.ServiceMethod, &c.payloadLen); err != nil { 80 | return err 81 | } 82 | fmt.Println("Client got response:", r.Seq) 83 | fmt.Println("Client got response:", r.ServiceMethod) 84 | return nil 85 | } 86 | 87 | func (c *ccodec) ReadResponseBody(body interface{}) error { 88 | buf := make([]byte, c.payloadLen) 89 | n, err := c.rwc.Read(buf) 90 | if n != int(c.payloadLen) { 91 | return fmt.Errorf("Client expected: %d. Got: %d\n", c.payloadLen, n) 92 | } 93 | reply := body.(*Reply) 94 | reply.d = buf 95 | return err 96 | } 97 | 98 | func (c *ccodec) Close() error { 99 | return c.rwc.Close() 100 | } 101 | -------------------------------------------------------------------------------- /rpc/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "log" 8 | "math/rand" 9 | "net" 10 | "net/rpc" 11 | ) 12 | 13 | type Query struct { 14 | d []byte 15 | } 16 | 17 | type Reply struct { 18 | d []byte 19 | } 20 | 21 | func setError(prev *error, n error) { 22 | if prev == nil { 23 | prev = &n 24 | } 25 | } 26 | 27 | type Worker struct { 28 | } 29 | 30 | func serveIt(conn io.ReadWriteCloser) { 31 | for { 32 | srv := &scodec{ 33 | rwc: conn, 34 | ebuf: bufio.NewWriter(conn), 35 | } 36 | rpc.ServeRequest(srv) 37 | } 38 | } 39 | 40 | func (w *Worker) Receive(query *Query, reply *Reply) error { 41 | fmt.Printf("Worker received: [%s]\n", string(query.d)) 42 | reply.d = []byte("abcdefghij-Hello World!") 43 | return nil 44 | } 45 | 46 | func runServer(address string) error { 47 | ln, err := net.Listen("tcp", address) 48 | if err != nil { 49 | fmt.Printf("listen(%q): %s\n", address, err) 50 | return err 51 | } 52 | fmt.Printf("Worker listening on %s\n", ln.Addr()) 53 | go func() { 54 | for { 55 | cxn, err := ln.Accept() 56 | if err != nil { 57 | log.Fatalf("listen(%q): %s\n", address, err) 58 | return 59 | } 60 | log.Printf("Worker accepted connection to %s from %s\n", 61 | cxn.LocalAddr(), cxn.RemoteAddr()) 62 | go serveIt(cxn) 63 | } 64 | }() 65 | return nil 66 | } 67 | 68 | func main() { 69 | addresses := map[int]string{ 70 | 1: "127.0.0.1:10000", 71 | 2: "127.0.0.1:10001", 72 | 3: "127.0.0.1:10002", 73 | } 74 | 75 | w := new(Worker) 76 | if err := rpc.Register(w); err != nil { 77 | log.Fatal(err) 78 | } 79 | 80 | for _, address := range addresses { 81 | if err := runServer(address); err != nil { 82 | log.Fatal(err) 83 | } 84 | } 85 | 86 | clients := make(map[int]*rpc.Client) 87 | for id, address := range addresses { 88 | conn, err := net.Dial("tcp", address) 89 | if err != nil { 90 | log.Fatal("dial", err) 91 | } 92 | cc := &ccodec{ 93 | rwc: conn, 94 | } 95 | clients[id] = rpc.NewClientWithCodec(cc) 96 | } 97 | 98 | for i := 0; i < 10; i++ { 99 | /* 100 | client := clients[1] 101 | if client == nil { 102 | log.Fatal("Worker is nil") 103 | } 104 | */ 105 | 106 | for id, client := range clients { 107 | query := new(Query) 108 | query.d = []byte(fmt.Sprintf("id:%d Rand: %d", id, rand.Int())) 109 | reply := new(Reply) 110 | if err := client.Call("Worker.Receive", query, reply); err != nil { 111 | log.Fatal("call", err) 112 | } 113 | 114 | fmt.Printf("Returned: %s\n", string(reply.d)) 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /rpc/server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "log" 9 | "net/rpc" 10 | "reflect" 11 | ) 12 | 13 | type scodec struct { 14 | rwc io.ReadWriteCloser 15 | ebuf *bufio.Writer 16 | payloadLen int32 17 | } 18 | 19 | func (c *scodec) ReadRequestHeader(r *rpc.Request) error { 20 | var err error 21 | if err = parseHeader(c.rwc, &r.Seq, 22 | &r.ServiceMethod, &c.payloadLen); err != nil { 23 | return err 24 | } 25 | 26 | fmt.Println("server using custom codec to read header") 27 | fmt.Println("server method called:", r.ServiceMethod) 28 | fmt.Println("server method called:", r.Seq) 29 | return nil 30 | } 31 | 32 | func (c *scodec) ReadRequestBody(data interface{}) error { 33 | if data == nil { 34 | log.Fatal("Why is data nil here?") 35 | } 36 | value := reflect.ValueOf(data) 37 | if value.Type().Kind() != reflect.Ptr { 38 | log.Fatal("Should of of type pointer") 39 | } 40 | 41 | b := make([]byte, c.payloadLen) 42 | n, err := c.rwc.Read(b) 43 | fmt.Printf("Worker read n bytes: %v %s\n", n, string(b)) 44 | if err != nil { 45 | log.Fatal("server", err) 46 | } 47 | if n != int(c.payloadLen) { 48 | return errors.New("Server unable to read request.") 49 | } 50 | 51 | query := data.(*Query) 52 | query.d = b 53 | return nil 54 | } 55 | 56 | func (c *scodec) WriteResponse(resp *rpc.Response, data interface{}) error { 57 | if len(resp.Error) > 0 { 58 | log.Fatal("Response has error: " + resp.Error) 59 | } 60 | if data == nil { 61 | log.Fatal("Worker write response data is nil") 62 | } 63 | reply, ok := data.(*Reply) 64 | if !ok { 65 | log.Fatal("Unable to convert to reply") 66 | } 67 | 68 | if err := writeHeader(c.rwc, resp.Seq, 69 | resp.ServiceMethod, reply.d); err != nil { 70 | return err 71 | } 72 | 73 | _, err := c.rwc.Write(reply.d) 74 | return err 75 | } 76 | 77 | func (c *scodec) Close() error { 78 | return c.rwc.Close() 79 | } 80 | -------------------------------------------------------------------------------- /sortedencoding/sortedencoding.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "sort" 8 | ) 9 | 10 | func main() { 11 | a := 2<<24 + 10 12 | b := -2<<24 - 1 13 | arr := []int{a, b, 1, 2, 3, 4, -1, -2, -3, 0, 234, 10000, 123, -1543} 14 | sarr := make([]string, 0) 15 | for _, it := range arr { 16 | buf := new(bytes.Buffer) 17 | //Encode using bigendian. 18 | if it < 0 { 19 | buf.WriteByte(0) 20 | } else { 21 | buf.WriteByte(1) 22 | } 23 | err := binary.Write(buf, binary.BigEndian, int32(it)) 24 | b := buf.Bytes() 25 | // Filp the last but so we can preserve ordering. 26 | if err != nil { 27 | fmt.Println(err) 28 | } 29 | sarr = append(sarr, string(b)) 30 | } 31 | sort.Sort(sort.IntSlice(arr)) 32 | sort.Sort(sort.StringSlice(sarr)) 33 | fmt.Println(arr) 34 | for _, it := range sarr { 35 | var pi int32 36 | fmt.Printf("%v ", []byte(it)) 37 | itOrig := []byte(it[1:]) 38 | // Flip it back before decoding 39 | buf := bytes.NewReader(itOrig) 40 | // Decode to get original value 41 | err := binary.Read(buf, binary.BigEndian, &pi) 42 | if err != nil { 43 | fmt.Println(err) 44 | } 45 | fmt.Printf("%v\n", pi) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /usecgo/.gitignore: -------------------------------------------------------------------------------- 1 | /usecgo 2 | -------------------------------------------------------------------------------- /usecgo/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // #cgo LDFLAGS: -lm 4 | // #include 5 | // 6 | // double ps(double a, double b) { 7 | // double result = pow(a, b); 8 | // result = sqrt(result); 9 | // return result; 10 | // } 11 | import "C" 12 | import "fmt" 13 | 14 | func Pow(b, e float64) float64 { 15 | return float64(C.pow(C.double(b), C.double(e))) 16 | } 17 | 18 | func Sqrt(b float64) float64 { 19 | return float64(C.sqrt(C.double(b))) 20 | } 21 | 22 | func main() { 23 | b, e := 5.0, 2.0 24 | fmt.Println("5 ^ 2:", Pow(b, e)) 25 | fmt.Println("Sqrt of 5:", Sqrt(b)) 26 | fmt.Println("sq:", C.ps(C.double(b), C.double(e))) 27 | } 28 | -------------------------------------------------------------------------------- /vrpc/.gitignore: -------------------------------------------------------------------------------- 1 | /vrpc 2 | -------------------------------------------------------------------------------- /vrpc/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIC+TCCAeOgAwIBAgIRAI8Ep4koxEk353EOZMNGkcUwCwYJKoZIhvcNAQELMBIx 3 | EDAOBgNVBAoTB0FjbWUgQ28wHhcNMTUwOTAyMTAyNDEzWhcNMTYwOTAxMTAyNDEz 4 | WjASMRAwDgYDVQQKEwdBY21lIENvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB 5 | CgKCAQEAyjp64czvRbei8cz1a9Ist3MYPAfiaDK7L40O5jA38sPjSdU2JSXdHNaX 6 | 8FR8Hh4hJ6SPwPmNm2w5b2x4K0G+TVOXrZmk4v53NUdPiwdYu3mzYbn5xpVahBWL 7 | eB20alYLmvFZDGBL/rjdz8nvYeb/vvzNSv2CwzcSP9tOsxDevxYKwXnSCVC8Moyv 8 | 1U27V6pcdAsaTcOWrNYm9EqzG/KyHmgRekLJzknXnrc4JvjERzgfdVh+2GaTPgYQ 9 | OhZI4Av9apRo8Nv+udhVAUpcTwkmvif1K+8zr/+6YwE7w8bbP+blX0tZO+R4iWuE 10 | gd68/o2g7TIEp8TyXIoglCOgntUFXwIDAQABo04wTDAOBgNVHQ8BAf8EBAMCAKQw 11 | EwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAUBgNVHREEDTAL 12 | gglsb2NhbGhvc3QwCwYJKoZIhvcNAQELA4IBAQDA2SS77xHmbXaEhpDLwagEwneI 13 | zhA0QDRGYd881xJ5wmbjPu21Hy60A0AplH+9CmwNMGYxc8AiQF+Gdpw7SmHXX00z 14 | 4ntarb3UbpMmwV7r1iQdb+qpPcsNJqU+fb3EhWKmQaPST3osUCduxvUNXgBZFg6c 15 | /5qKnzLhHad9rPfD8YCQpey6rcZXGsJLvLpDZvwnNIyab7mBNSc8/9ueM9uoIqdQ 16 | I562Dv9u/rQYiZo+6rVoTYPUum6DEneDJaUCcnXfFg48o9DE77i+8di8IPioSQBg 17 | awmfPZf/PAPVgMwt7r+sa1ruA+6DjFZ+qW/FWdIJ8zRVFWGMeTnz7I3Z9voT 18 | -----END CERTIFICATE----- 19 | -------------------------------------------------------------------------------- /vrpc/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEAyjp64czvRbei8cz1a9Ist3MYPAfiaDK7L40O5jA38sPjSdU2 3 | JSXdHNaX8FR8Hh4hJ6SPwPmNm2w5b2x4K0G+TVOXrZmk4v53NUdPiwdYu3mzYbn5 4 | xpVahBWLeB20alYLmvFZDGBL/rjdz8nvYeb/vvzNSv2CwzcSP9tOsxDevxYKwXnS 5 | CVC8Moyv1U27V6pcdAsaTcOWrNYm9EqzG/KyHmgRekLJzknXnrc4JvjERzgfdVh+ 6 | 2GaTPgYQOhZI4Av9apRo8Nv+udhVAUpcTwkmvif1K+8zr/+6YwE7w8bbP+blX0tZ 7 | O+R4iWuEgd68/o2g7TIEp8TyXIoglCOgntUFXwIDAQABAoIBAQDFGmKSmi/L/8Qt 8 | cNrzkBJNazx/ph+h93BCVHXlHbIqR4rMd6ciZdkxNmc4st+Waz9daCNErva6Oa1x 9 | UIN/hcB5lbQMW+g95k9B+pMoqqxtbZzM73rndsKWYnlhRXKHAf6lsf5c9a6cQorO 10 | 778F6SPzfmqJYLWGl7KN0q7OEO9JxfKEiUBTbUj9y8+SaE9d3LaoZwppukyl5KDD 11 | +DzTEKIr90dczwsE2voMAi+fP6Mg6EgYdbP4g9G6uRbggwNzy5sLpXVqN9sf+blT 12 | 1h2gh3shgtc8Rb3yh0ErUamux3XWOxHJlVy3k1vZ4GmOcW5dVxYJQgw5pfs9VNpN 13 | +1vIzqVBAoGBAOO0KPEsRq/Sl4TUG94R+0y5LlBCjVTHEa/rYsi8GJ6sLPy0dyGw 14 | +nn+BxDkQJmLTAWbQHVCFy6MFuv9LFx3/bzXywUoUbDJsykOG3Tpk1MWMUGtsfNd 15 | SmBunQkrsYWqwaHWbh9PjgT6JNTF+OhtcpoqIBmG1wo+q18e3ZGFUagXAoGBAONb 16 | 4pTLGBLnNWIHyLWifMD/NkueqavpACdsEe4EKhT/svbPa6nUiXuT6RCqx6zrjYu3 17 | szdlIK+U/J4E/Wa+h+7A/HXiO6J4dDMX8dXxpb63L26ceE0AqBhMEef71w0Zcj6w 18 | zQqtB96T/PpY3KhHJgKbyybD8LmASNGS8MREGRH5AoGBAMwaGPjUrye6pIYzeic8 19 | Bv1mn9ThUr3DEl+Vph44O/k+a7S4LnDMqjffM4Y4ko8PBT2hQ83XR+A5QfKWJMVd 20 | 6Nr0ss4UreTBZw5Nzt6k36GCKvSsNO3nmVuGyswFlkIyJQ5jnqICNVLQxWLTcaEl 21 | OaxmRg+bhfVw5zuD9ycRUXhRAoGAbbZkU1rh5hVOTFznPMcJHnTJNZp90+ge2+cp 22 | i8rUp8dwC+uxz4UxRsHsYM+YhVgxuK9PuzaAnFnvwS01PsRIgiz8ZsJh9pfbsuAm 23 | IhB2494v9FdrMfVUKE1lAqYjcz3JRHk6qKEwv/Z3yDLjYAzdSbr2QGGjI1DFUuRu 24 | bLdTxbkCgYBi6PCSK7IjSpNW5zkeblVQOqdXs62TZOHmZ1HLGNS+lZ8qQV07hKyh 25 | 5CbbgxXwa1udrFJ0I+3uG7qnxD50XfO+TI9OdjbZG3AQILJP2KKyGh74udW/kYcV 26 | gq/BfRt8SEn3qllU+MXZUIfN/OIqg8xAdetpBWAhOXB43p8EQRsldA== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /vrpc/server.go: -------------------------------------------------------------------------------- 1 | package vrpc 2 | 3 | import ( 4 | "flag" 5 | 6 | "math/rand" 7 | 8 | "github.com/Sirupsen/logrus" 9 | ) 10 | 11 | var port = flag.String("port", "12345", "Port to bind on") 12 | 13 | type Reply struct { 14 | Req interface{} 15 | Hello string 16 | } 17 | 18 | type PostingList struct { 19 | Uids []uint64 20 | } 21 | 22 | var log = logrus.WithField("pkg", "vrpc") 23 | 24 | func NewRequest(sz int) *PostingList { 25 | req := new(PostingList) 26 | req.Uids = make([]uint64, sz) 27 | for i := 0; i < sz; i++ { 28 | req.Uids[i] = uint64(rand.Int63()) 29 | } 30 | return req 31 | } 32 | 33 | func PingPong(addr string, req interface{}) interface{} { 34 | pl := req.(*PostingList) 35 | reply := new(PostingList) 36 | reply.Uids = make([]uint64, len(pl.Uids)) 37 | for i := 0; i < len(pl.Uids); i++ { 38 | reply.Uids[i] = pl.Uids[i] 39 | } 40 | return reply 41 | } 42 | 43 | func (pl *PostingList) PingPong(req *PostingList, reply *PostingList) error { 44 | reply = PingPong("", req).(*PostingList) 45 | return nil 46 | } 47 | -------------------------------------------------------------------------------- /vrpc/server_test.go: -------------------------------------------------------------------------------- 1 | // vrpc package benchmarks the various RPC libraries. 2 | // 1. A custom tcp library by valyala (custom TCP) 3 | // 2. net/rpc standard library (TCP) 4 | // 3. crypto/tls over net/rpc standard libraries. (TLS over TCP) 5 | // 6 | // The results as of today, on my desktop are these: 7 | // BenchmarkPingPong_2MB_valyala 2 554987424 ns/op 8 | // BenchmarkPingPong_2MB_tlsrpc 20 77509099 ns/op 9 | // BenchmarkPingPong_2MB_netrpc 100 17450330 ns/op 10 | // BenchmarkPingPong_2KB_valyala 10000 167166 ns/op 11 | // BenchmarkPingPong_2KB_tlsrpc 10000 176208 ns/op 12 | // BenchmarkPingPong_2KB_netrpc 10000 102846 ns/op 13 | // 14 | // So, valyala is consistently slow. 15 | // TLS-TCP vs only TCP ranges from 1.7x to 4.5x performance 16 | // penalty, which is significant. 17 | // For now, it probably makes sense to stick to just TCP, 18 | // and see if we need to worry about encrypting inter-node connections later. 19 | 20 | package vrpc 21 | 22 | import ( 23 | "crypto/tls" 24 | "crypto/x509" 25 | "io/ioutil" 26 | "net" 27 | "net/rpc" 28 | "testing" 29 | "time" 30 | 31 | "github.com/valyala/gorpc" 32 | ) 33 | 34 | var mb2 = 250000 35 | var kb2 = 250 36 | 37 | // Benchmark valyala/gorpc TCP connection for 2MB payload. 38 | func BenchmarkPingPong_2MB_valyala(b *testing.B) { 39 | gorpc.RegisterType(&PostingList{}) 40 | 41 | s := gorpc.NewTCPServer(":12345", PingPong) 42 | if err := s.Start(); err != nil { 43 | b.Fatal("While starting server on port 12345") 44 | return 45 | } 46 | defer s.Stop() 47 | 48 | req := NewRequest(mb2) 49 | c := gorpc.NewTCPClient(":12345") 50 | c.Start() 51 | defer c.Stop() 52 | 53 | b.ResetTimer() 54 | for i := 0; i < b.N; i++ { 55 | _, err := c.Call(req) 56 | if err != nil { 57 | b.Fatalf("While running request: %v", err) 58 | return 59 | } 60 | } 61 | } 62 | 63 | func BenchmarkPingPong_2MB_tlsrpc(b *testing.B) { 64 | ready := make(chan bool) 65 | done := make(chan bool) 66 | go runTlsServer(b, ready, done) 67 | connC := getTlsClientConn(b, ready) 68 | defer connC.Close() 69 | 70 | c := rpc.NewClient(connC) 71 | defer c.Close() 72 | 73 | req := NewRequest(mb2) 74 | b.ResetTimer() 75 | 76 | for i := 0; i < b.N; i++ { 77 | var reply PostingList 78 | if err := c.Call("PostingList.PingPong", req, &reply); err != nil { 79 | b.Fatalf("While running request: %v", err) 80 | return 81 | } 82 | } 83 | b.StopTimer() 84 | done <- true 85 | } 86 | 87 | func BenchmarkPingPong_2MB_netrpc(b *testing.B) { 88 | ready := make(chan bool) 89 | done := make(chan bool) 90 | addr := "127.0.0.1:12346" 91 | go runServer(b, addr, ready, done) 92 | 93 | <-ready // Block until server is ready. 94 | 95 | connC, err := net.DialTimeout("tcp", addr, 10*time.Second) 96 | if err != nil { 97 | b.Fatalf("cannot dial. Error: %v", err) 98 | return 99 | } 100 | defer connC.Close() 101 | 102 | c := rpc.NewClient(connC) 103 | defer c.Close() 104 | 105 | req := NewRequest(mb2) 106 | b.ResetTimer() 107 | 108 | for i := 0; i < b.N; i++ { 109 | var reply PostingList 110 | if err := c.Call("PostingList.PingPong", req, &reply); err != nil { 111 | b.Fatalf("While running request: %v", err) 112 | return 113 | } 114 | } 115 | b.StopTimer() 116 | done <- true 117 | } 118 | 119 | /* 120 | func BenchmarkPingPong_2KB_udp(b *testing.B) { 121 | ready := make(chan bool) 122 | done := make(chan bool) 123 | addr := "127.0.0.1:12333" 124 | go runUdpServer(b, addr, ready, done) 125 | 126 | saddr, err := net.ResolveUDPAddr("udp", addr) 127 | if err != nil { 128 | b.Fatalf("While resolving: %v", err) 129 | return 130 | } 131 | 132 | fmt.Println("Waiting for ready") 133 | <-ready 134 | fmt.Println("Server is now ready") 135 | connC, err := net.DialUDP("udp", nil, saddr) 136 | if err != nil { 137 | b.Fatalf("While dialing: %v", err) 138 | return 139 | } 140 | fmt.Println("Dial done") 141 | 142 | c := rpc.NewClient(connC) 143 | defer c.Close() 144 | 145 | w := new(bytes.Buffer) 146 | for i := 0; i < 1000; i++ { 147 | binary.Write(w, binary.LittleEndian, rand.Int63()) 148 | } 149 | req := w.Bytes() 150 | fmt.Printf("Got buffer of size: %v\n", len(req)) 151 | b.ResetTimer() 152 | 153 | for i := 0; i < 10; i++ { 154 | fmt.Println("Sending call") 155 | n, err := connC.Write(req) 156 | if err != nil { 157 | b.Fatalf("While writing: %v", err) 158 | return 159 | } 160 | fmt.Printf("Wrote bytes: %v\n", n) 161 | } 162 | b.StopTimer() 163 | connC.Write([]byte("0")) 164 | fmt.Println("Sending done") 165 | done <- true 166 | } 167 | */ 168 | 169 | func BenchmarkPingPong_2KB_valyala(b *testing.B) { 170 | gorpc.RegisterType(&PostingList{}) 171 | 172 | s := gorpc.NewTCPServer(":12345", PingPong) 173 | if err := s.Start(); err != nil { 174 | b.Fatal("While starting server on port 12345") 175 | return 176 | } 177 | defer s.Stop() 178 | 179 | req := NewRequest(kb2) 180 | c := gorpc.NewTCPClient(":12345") 181 | c.Start() 182 | defer c.Stop() 183 | 184 | b.ResetTimer() 185 | for i := 0; i < b.N; i++ { 186 | _, err := c.Call(req) 187 | if err != nil { 188 | b.Fatalf("While running request: %v", err) 189 | return 190 | } 191 | } 192 | } 193 | 194 | func BenchmarkPingPong_2KB_tlsrpc(b *testing.B) { 195 | ready := make(chan bool) 196 | done := make(chan bool) 197 | go runTlsServer(b, ready, done) 198 | connC := getTlsClientConn(b, ready) 199 | defer connC.Close() 200 | 201 | c := rpc.NewClient(connC) 202 | defer c.Close() 203 | 204 | req := NewRequest(kb2) 205 | b.ResetTimer() 206 | 207 | for i := 0; i < b.N; i++ { 208 | var reply PostingList 209 | if err := c.Call("PostingList.PingPong", req, &reply); err != nil { 210 | b.Fatalf("While running request: %v", err) 211 | return 212 | } 213 | } 214 | b.StopTimer() 215 | done <- true 216 | } 217 | 218 | func BenchmarkPingPong_2KB_netrpc(b *testing.B) { 219 | ready := make(chan bool) 220 | done := make(chan bool) 221 | addr := "127.0.0.1:12348" 222 | go runServer(b, addr, ready, done) 223 | 224 | <-ready // Block until server is ready. 225 | 226 | connC, err := net.DialTimeout("tcp", addr, 10*time.Second) 227 | if err != nil { 228 | b.Fatalf("cannot dial. Error: %v", err) 229 | return 230 | } 231 | defer connC.Close() 232 | 233 | c := rpc.NewClient(connC) 234 | defer c.Close() 235 | 236 | req := NewRequest(kb2) 237 | b.ResetTimer() 238 | 239 | for i := 0; i < b.N; i++ { 240 | var reply PostingList 241 | if err := c.Call("PostingList.PingPong", req, &reply); err != nil { 242 | b.Fatalf("While running request: %v", err) 243 | return 244 | } 245 | } 246 | b.StopTimer() 247 | done <- true 248 | } 249 | 250 | func runServer(b *testing.B, addr string, ready, done chan bool) { 251 | ln, err := net.Listen("tcp", addr) 252 | if err != nil { 253 | b.Fatalf("Cannot listen to socket: %s", err) 254 | return 255 | } 256 | defer ln.Close() 257 | 258 | s := rpc.NewServer() 259 | if err := s.Register(&PostingList{}); err != nil { 260 | b.Fatalf("Error when registering rpc server: %v", err) 261 | return 262 | } 263 | 264 | ready <- true 265 | conn, err := ln.Accept() 266 | if err != nil { 267 | b.Fatalf("Cannot accept incoming: %v", err) 268 | return 269 | } 270 | defer conn.Close() 271 | 272 | go s.ServeConn(conn) 273 | <-done 274 | } 275 | 276 | /* 277 | func runUdpServer(b *testing.B, addr string, ready, done chan bool) { 278 | saddr, err := net.ResolveUDPAddr("udp", addr) 279 | if err != nil { 280 | b.Fatalf("While resolving: %v", err) 281 | return 282 | } 283 | 284 | conn, err := net.ListenUDP("udp", saddr) 285 | if err != nil { 286 | b.Fatalf("Cannot listen to socket: %s", err) 287 | return 288 | } 289 | defer conn.Close() 290 | fmt.Println("Listen UDP OK.") 291 | 292 | ready <- true 293 | fmt.Println("serving connection via udp") 294 | buf := make([]byte, 2<<20) 295 | go func() { 296 | for { 297 | fmt.Println("Waiting for UDP packets") 298 | n, addr, err := conn.ReadFromUDP(buf) 299 | if err != nil { 300 | b.Fatalf("While reading from udp: %v", err) 301 | return 302 | } 303 | fmt.Printf("Got n bytes: %v Addr: %v\n", n, addr) 304 | if n == 1 { 305 | break 306 | } 307 | } 308 | }() 309 | // go s.ServeConn(conn) 310 | fmt.Println("Blocking on done") 311 | <-done 312 | fmt.Println("I AM DONE") 313 | } 314 | */ 315 | 316 | // Benchmark TLS over TCP connection for 2MB payload. 317 | var tlsAddr = "127.0.0.1:12347" 318 | 319 | func runTlsServer(b *testing.B, ready, done chan bool) { 320 | cert, err := tls.LoadX509KeyPair("./cert.pem", "./key.pem") 321 | if err != nil { 322 | b.Fatalf("While loading tls certs: %v", err) 323 | return 324 | } 325 | config := tls.Config{Certificates: []tls.Certificate{cert}} 326 | ln, err := tls.Listen("tcp", tlsAddr, &config) 327 | if err != nil { 328 | b.Fatalf("When listening: %v", err) 329 | return 330 | } 331 | defer ln.Close() 332 | 333 | s := rpc.NewServer() 334 | if err := s.Register(&PostingList{}); err != nil { 335 | b.Fatalf("Error when registering rpc server: %v", err) 336 | return 337 | } 338 | 339 | ready <- true 340 | 341 | conn, err := ln.Accept() 342 | if err != nil { 343 | b.Fatalf("cannot accept incoming tcp conn: %s", err) 344 | return 345 | } 346 | defer conn.Close() 347 | go s.ServeConn(conn) 348 | <-done 349 | } 350 | 351 | func getTlsClientConn(b *testing.B, ready chan bool) *tls.Conn { 352 | <-ready 353 | ca_pool := x509.NewCertPool() 354 | scert, err := ioutil.ReadFile("./cert.pem") 355 | if err != nil { 356 | b.Fatalf("While reading cert.pem: %v", err) 357 | } 358 | ca_pool.AppendCertsFromPEM(scert) 359 | cconf := tls.Config{RootCAs: ca_pool, InsecureSkipVerify: true} 360 | 361 | insConn, err := net.DialTimeout("tcp", tlsAddr, 10*time.Second) 362 | if err != nil { 363 | b.Fatalf("While dialing via net: %v", err) 364 | } 365 | 366 | connC := tls.Client(insConn, &cconf) 367 | if err := connC.Handshake(); err != nil { 368 | b.Fatalf("While handshaking: %v", err) 369 | } 370 | return connC 371 | } 372 | -------------------------------------------------------------------------------- /vrpc/tls/.gitignore: -------------------------------------------------------------------------------- 1 | /tls 2 | -------------------------------------------------------------------------------- /vrpc/tls/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "io/ioutil" 7 | "math/rand" 8 | "net/rpc" 9 | 10 | "github.com/Sirupsen/logrus" 11 | ) 12 | 13 | var log = logrus.WithField("pkg", "tls") 14 | 15 | type PostingList struct { 16 | Uids []uint64 17 | } 18 | 19 | func NewRequest() *PostingList { 20 | req := new(PostingList) 21 | sz := 250000 22 | req.Uids = make([]uint64, sz) 23 | // Generate a 2MB request. 24 | for i := 0; i < sz; i++ { 25 | req.Uids[i] = uint64(rand.Int63()) 26 | } 27 | return req 28 | } 29 | 30 | func (p *PostingList) PingPong(req *PostingList, reply *PostingList) error { 31 | reply.Uids = make([]uint64, len(req.Uids)) 32 | for i := 0; i < len(req.Uids); i++ { 33 | reply.Uids[i] = req.Uids[i] 34 | } 35 | 36 | return nil 37 | } 38 | 39 | var addr = "127.0.0.1:12345" 40 | 41 | func runServer(ch chan bool, done chan bool) { 42 | cert, err := tls.LoadX509KeyPair("../cert.pem", "../key.pem") 43 | if err != nil { 44 | log.Fatalf("While loading tls certs: %v", err) 45 | return 46 | } 47 | config := tls.Config{Certificates: []tls.Certificate{cert}} 48 | ln, err := tls.Listen("tcp", addr, &config) 49 | if err != nil { 50 | log.Fatalf("When listening: %v", err) 51 | return 52 | } 53 | s := rpc.NewServer() 54 | if err := s.Register(&PostingList{}); err != nil { 55 | log.Fatalf("Error when registering rpc server: %v", err) 56 | return 57 | } 58 | 59 | ch <- true 60 | log.Debugln("Ready to accept new connection") 61 | conn, err := ln.Accept() 62 | if err != nil { 63 | log.Fatalf("cannot accept incoming tcp conn: %s", err) 64 | return 65 | } 66 | defer conn.Close() 67 | log.Debugln("Accepted a connection") 68 | go s.ServeConn(conn) 69 | <-done 70 | } 71 | 72 | func getClientConn(ch chan bool) *tls.Conn { 73 | <-ch // Block until server is ready. 74 | ca_pool := x509.NewCertPool() 75 | scert, err := ioutil.ReadFile("../cert.pem") 76 | if err != nil { 77 | log.Fatalf("While reading cert.pem: %v", err) 78 | } 79 | ca_pool.AppendCertsFromPEM(scert) 80 | cconf := tls.Config{RootCAs: ca_pool, InsecureSkipVerify: true} 81 | 82 | /* 83 | insConn, err := net.DialTimeout("tcp", addr, 10*time.Second) 84 | if err != nil { 85 | log.Fatalf("While dialing via net: %v", err) 86 | } 87 | log.Debugln("Got connection via net.Dial") 88 | */ 89 | 90 | connC, err := tls.Dial("tcp", addr, &cconf) 91 | // connC := tls.Client(insConn, &cconf) 92 | log.Debugln("Converted to tls") 93 | if err := connC.Handshake(); err != nil { 94 | log.Fatalf("While handshaking: %v", err) 95 | } 96 | log.Debugln("Shook hands") 97 | return connC 98 | } 99 | 100 | func main() { 101 | logrus.SetLevel(logrus.DebugLevel) 102 | ch := make(chan bool) 103 | done := make(chan bool) 104 | go runServer(ch, done) 105 | connC := getClientConn(ch) 106 | defer connC.Close() 107 | 108 | c := rpc.NewClient(connC) 109 | defer c.Close() 110 | 111 | req := NewRequest() 112 | var reply PostingList 113 | if err := c.Call("PostingList.PingPong", req, &reply); err != nil { 114 | log.Fatalf("While running request: %v", err) 115 | return 116 | } 117 | log.Printf("Got reply of len: %v", len(reply.Uids)) 118 | done <- true 119 | } 120 | -------------------------------------------------------------------------------- /x.go: -------------------------------------------------------------------------------- 1 | package x 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "math/rand" 7 | "sort" 8 | ) 9 | 10 | const alphachars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 11 | 12 | func UniqueString(alpha int) string { 13 | var buf bytes.Buffer 14 | for i := 0; i < alpha; i++ { 15 | idx := rand.Intn(len(alphachars)) 16 | buf.WriteByte(alphachars[idx]) 17 | } 18 | return buf.String() 19 | } 20 | 21 | type int64arr []int64 22 | 23 | func (a int64arr) Len() int { return len(a) } 24 | func (a int64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 25 | func (a int64arr) Less(i, j int) bool { return a[i] < a[j] } 26 | 27 | func SortedInt(alpha int) []int64 { 28 | result := make([]int64, alpha) 29 | for i := 0; i < alpha; i++ { 30 | result[i] = rand.Int63() 31 | } 32 | sort.Sort(int64arr(result)) 33 | return result 34 | } 35 | 36 | func SortedString(alpha int) []string { 37 | result := make([]string, alpha) 38 | for i := 0; i < alpha; i++ { 39 | result[i] = UniqueString(rand.Intn(11)) 40 | } 41 | sort.Sort(sort.StringSlice(result)) 42 | return result 43 | } 44 | 45 | func linearSearchInt(arr []int64, pos int, n int64) int { 46 | for i := pos; i < len(arr); i++ { 47 | if arr[i] > n { 48 | return i - 1 49 | } 50 | } 51 | return len(arr) - 1 52 | } 53 | 54 | func findIndexInt(arr []int64, n int64) int { 55 | i := 0 56 | j := len(arr) - 1 57 | 58 | for n > arr[i] { 59 | if n > arr[j] { 60 | // The right limit found is already lower than n. 61 | // Let's do linear search here. 62 | return linearSearchInt(arr, j, n) 63 | } 64 | 65 | mid := (i + j) / 2 66 | if mid == i { 67 | return linearSearchInt(arr, i, n) 68 | } else if n > arr[mid] { 69 | i = mid + 1 70 | } else if n < arr[mid] { 71 | j = mid - 1 72 | } else { 73 | return linearSearchInt(arr, mid, n) 74 | } 75 | } 76 | return i 77 | } 78 | 79 | func mergeInt(arr []int64, n int64) []int64 { 80 | arr = append(arr, n) 81 | for i := len(arr) - 1; i > 0; i-- { 82 | if arr[i] < arr[i-1] { 83 | arr[i], arr[i-1] = arr[i-1], arr[i] 84 | } else { 85 | break 86 | } 87 | } 88 | return arr 89 | } 90 | 91 | func linearSearchString(arr []string, pos int, n string) int { 92 | for i := pos; i < len(arr); i++ { 93 | if arr[i] > n { 94 | return i - 1 95 | } 96 | } 97 | return len(arr) - 1 98 | } 99 | 100 | func findIndexString(arr []string, n string) int { 101 | i := 0 102 | j := len(arr) - 1 103 | for n > arr[i] { 104 | if arr[j] < n { 105 | return linearSearchString(arr, j, n) 106 | } 107 | 108 | mid := (i + j) / 2 109 | if n > arr[mid] { 110 | i = mid + 1 111 | } else if n < arr[mid] { 112 | j = mid - 1 113 | } else { 114 | // n == arr[mid] 115 | // linear search 116 | return linearSearchString(arr, mid, n) 117 | } 118 | } 119 | return i 120 | } 121 | 122 | func PrintList(l []int64) { 123 | for i := 0; i < len(l); i++ { 124 | fmt.Printf("pos=[%v] val=[%v]\n", i, l[i]) 125 | } 126 | fmt.Println("=============") 127 | } 128 | 129 | func mergeString(arr []string, n string) []string { 130 | arr = append(arr, n) 131 | for i := len(arr) - 1; i > 0; i-- { 132 | if arr[i] < arr[i-1] { 133 | arr[i], arr[i-1] = arr[i-1], arr[i] 134 | } else { 135 | break 136 | } 137 | } 138 | return arr 139 | } 140 | -------------------------------------------------------------------------------- /x_test.go: -------------------------------------------------------------------------------- 1 | package x 2 | 3 | import ( 4 | "math/rand" 5 | "sort" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestMergeInt(t *testing.T) { 11 | l := SortedInt(10) 12 | if !sort.IsSorted(int64arr(l)) { 13 | t.Errorf("Not sorted: [%v]\n", l) 14 | t.FailNow() 15 | } 16 | for i := 0; i < 50000; i++ { 17 | l = mergeInt(l, rand.Int63()) 18 | if !sort.IsSorted(int64arr(l)) { 19 | t.Errorf("Not sorted: [%v]\n", l) 20 | t.FailNow() 21 | } 22 | } 23 | } 24 | 25 | func TestMergeString(t *testing.T) { 26 | l := SortedString(10) 27 | for !sort.StringsAreSorted(l) { 28 | t.Errorf("Not sorted: [%v]\n", l) 29 | } 30 | for i := 0; i < 5000; i++ { 31 | s := UniqueString(rand.Intn(11)) 32 | l = mergeString(l, s) 33 | if !sort.StringsAreSorted(l) { 34 | t.Error("Strings are not sorted") 35 | t.FailNow() 36 | } 37 | } 38 | } 39 | 40 | // Around 0.37 ns/op on my laptop. 41 | // This is 25x faster than string comparisons, so iterating over and merging 42 | // lists of ints would be a lot faster than doing the same for strings. 43 | func BenchmarkInt64(b *testing.B) { 44 | rand.Seed(time.Now().UnixNano()) 45 | var m, n int64 46 | m = rand.Int63() 47 | n = rand.Int63() 48 | b.ResetTimer() 49 | 50 | for i := 0; i < b.N; i++ { 51 | _ = m == n 52 | } 53 | } 54 | 55 | // There's no difference between this and int64 benchmarks. 56 | func BenchmarkInt32(b *testing.B) { 57 | rand.Seed(time.Now().UnixNano()) 58 | var m, n int32 59 | m = rand.Int31() 60 | n = rand.Int31() 61 | b.ResetTimer() 62 | 63 | for i := 0; i < b.N; i++ { 64 | _ = m == n 65 | } 66 | } 67 | 68 | func BenchmarkFloat(b *testing.B) { 69 | rand.Seed(time.Now().UnixNano()) 70 | var m, n float64 71 | m = rand.Float64() 72 | n = rand.Float64() 73 | b.ResetTimer() 74 | 75 | for i := 0; i < b.N; i++ { 76 | _ = m == n 77 | } 78 | } 79 | 80 | // Around 8.67 ns/op on my laptop. 81 | func BenchmarkString(b *testing.B) { 82 | rand.Seed(time.Now().UnixNano()) 83 | var m, n string 84 | l := rand.Intn(11) // Num permutations are now 5x +ve vals for int64 85 | m = UniqueString(l) 86 | n = UniqueString(l) 87 | b.ResetTimer() 88 | 89 | for i := 0; i < b.N; i++ { 90 | _ = m == n 91 | } 92 | } 93 | 94 | func BenchmarkMergeSortedInt(b *testing.B) { 95 | rand.Seed(time.Now().UnixNano()) 96 | sz := rand.Intn(50000) + 1000 97 | l := SortedInt(sz) 98 | b.ResetTimer() 99 | 100 | for i := 0; i < b.N; i++ { 101 | l = mergeInt(l, rand.Int63()) 102 | } 103 | } 104 | 105 | func BenchmarkMergeSortedString(b *testing.B) { 106 | rand.Seed(time.Now().UnixNano()) 107 | sz := rand.Intn(50000) + 1000 108 | l := SortedString(sz) 109 | b.ResetTimer() 110 | 111 | for i := 0; i < b.N; i++ { 112 | s := UniqueString(rand.Intn(11)) 113 | l = mergeString(l, s) 114 | } 115 | } 116 | 117 | func BenchmarkFindIndexInt(b *testing.B) { 118 | rand.Seed(time.Now().UnixNano()) 119 | sz := rand.Intn(50000) + 1000 120 | l := SortedInt(sz) 121 | b.ResetTimer() 122 | 123 | for i := 0; i < b.N; i++ { 124 | findIndexInt(l, rand.Int63()) 125 | } 126 | } 127 | 128 | func BenchmarkFindIndexString(b *testing.B) { 129 | rand.Seed(time.Now().UnixNano()) 130 | sz := rand.Intn(50000) + 1000 131 | l := SortedString(sz) 132 | b.ResetTimer() 133 | 134 | for i := 0; i < b.N; i++ { 135 | s := UniqueString(rand.Intn(11)) 136 | findIndexString(l, s) 137 | } 138 | } 139 | --------------------------------------------------------------------------------