├── .gitignore ├── LICENSE.txt ├── README ├── fmath ├── icompare.go ├── integer.go ├── ticompare_int8_test.go ├── ticompare_uint64_test.go └── tinteger_test.go ├── fstrconv ├── README.md ├── fstrconv.go └── tfstrconv_test.go ├── fsync ├── fatomic │ └── lazy.go └── padded │ ├── cachebuffer.go │ ├── const_amd64.go │ ├── int64.go │ └── slice.go ├── ftime ├── ftime.go ├── ftime_amd64.s └── tftime_test.go ├── funsafe ├── README.md ├── convert.go └── convert_test.go ├── go.mod └── queues └── spscq ├── bheader_test.go ├── bpointer_test.go ├── bytechunkq.go ├── bytemsgq.go ├── common.go ├── perf_spscq ├── .gitignore ├── bcqar.go ├── bcqarl.go ├── bmqar.go ├── bmqarl.go ├── main.go ├── pqar.go ├── pqarl.go ├── pqs.go └── pqsl.go ├── pointerq.go └── tcommon_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.prof 3 | *.6 4 | prof_* 5 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright © 2016 Francis Stephens All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, 4 | are permitted provided that the following conditions are met: 5 | * Redistributions of source code must retain the above copyright notice, this 6 | list of conditions and the following disclaimer. 7 | * Redistributions in binary form must reproduce the above copyright notice, this 8 | list of conditions and the following disclaimer in the documentation and/or 9 | other materials provided with the distribution. 10 | * Neither the name of Francis Stephens nor the names of its contributors may be used to 11 | endorse or promote products derived from this software without specific prior 12 | written permission. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 18 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 21 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | A small extension to the Go Standard library to support writing fast multi-threaded applications. 2 | 3 | Most packages mimic the standard library and a fairly straightforward. However, the queues/spscq directory contains a small collection of high performance in-memory queues. The spscq stands for 'Single Producer, Single Consumer Queue' which means they are only safe when a single goroutine performs writes (and only writes) and a single goroutine performs reads (and only reads). Although they are somewhat delicate these queues are very fast. The rest of flib mostly serves the development of these queues. 4 | -------------------------------------------------------------------------------- /fmath/icompare.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package fmath 6 | 7 | // uint64 comparisons 8 | 9 | // Returns 1 if x > y, 0 otherwise 10 | func UIGT(x, y uint64) uint8 { 11 | return uint8((y - x) >> 63) 12 | } 13 | 14 | // Returns 1 if x >= y, 0 otherwise 15 | func UIGTE(x, y uint64) uint8 { 16 | return uint8(((x - y) >> 63) ^ 1) 17 | } 18 | 19 | // Returns 1 if x < y, 0 otherwise 20 | func UILT(x, y uint64) uint8 { 21 | return uint8(((x - y) >> 63)) 22 | } 23 | 24 | // Returns 1 if x <= y, 0 otherwise 25 | func UILTE(x, y uint64) uint8 { 26 | return uint8(((y - x) >> 63) ^ 1) 27 | } 28 | 29 | // Returns 1 if x == 0 30 | // Returns 0 if x == 1 31 | // Undfined for all other inputs 32 | func UINot(x uint64) uint8 { 33 | return uint8(x ^ 1) 34 | } 35 | 36 | // uint8 comparisons 37 | 38 | // Returns 1 if x > y, 0 otherwise 39 | func UI8GT(x, y uint8) uint8 { 40 | return (y - x) >> 7 41 | } 42 | 43 | // Returns 1 if x >= y, 0 otherwise 44 | func UI8GTE(x, y uint8) uint8 { 45 | return ((x - y) >> 7) ^ 1 46 | } 47 | 48 | // Returns 1 if x < y, 0 otherwise 49 | func UI8LT(x, y uint8) uint8 { 50 | return ((x - y) >> 7) 51 | } 52 | 53 | // Returns 1 if x <= y, 0 otherwise 54 | func UI8LTE(x, y uint8) uint8 { 55 | return ((y - x) >> 7) ^ 1 56 | } 57 | 58 | // Returns 1 if x == 0 59 | // Returns 0 if x == 1 60 | // Undfined for all other inputs 61 | func UI8Not(x uint8) uint8 { 62 | return x ^ 1 63 | } 64 | -------------------------------------------------------------------------------- /fmath/integer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package fmath 6 | 7 | import "math/bits" 8 | 9 | // Returns true if val is a power of two, otherwise returns false 10 | func PowerOfTwo(val int64) bool { 11 | return val > 0 && val&(val-1) == 0 12 | } 13 | 14 | // Returns the smallest power of two >= val 15 | func NxtPowerOfTwo(val int64) int64 { 16 | if val <= 1 { 17 | return 1 18 | } 19 | if PowerOfTwo(val) { 20 | return val 21 | } 22 | return 1 << bits.Len64(uint64(val)) 23 | } 24 | 25 | // Returns x if x < y, otherwise returns y 26 | // 27 | // NB: Only valid if math.MinInt64 <= x-y <= math.MaxInt64 28 | // In particular, always valid if both arguments are positive 29 | func Min(x, y int64) int64 { 30 | return y + ((x - y) & ((x - y) >> 63)) 31 | } 32 | 33 | // Returns x if x > y, otherwise returns y 34 | // 35 | // NB: Only valid if math.MinInt64 <= x-y <= math.MaxInt64 36 | // In particular, always valid if both arguments are positive 37 | func Max(x, y int64) int64 { 38 | return x ^ ((x ^ y) & ((x - y) >> 63)) 39 | } 40 | 41 | // Combines two int32 values into a single int64 42 | // high occupies bits 32-63 43 | // low occupies bits 0-31 44 | func CombineInt32(high, low int32) int64 { 45 | high64 := int64(uint32(high)) << 32 46 | low64 := int64(uint32(low)) 47 | return high64 | low64 48 | } 49 | 50 | // Returns the highest 32 bits of an int64 51 | func HighInt32(whole int64) int32 { 52 | return int32(whole >> 32) 53 | } 54 | 55 | // Returns the lowest 32 bits of an int64 56 | func LowInt32(whole int64) int32 { 57 | return int32(whole) 58 | } 59 | 60 | func Abs(val int64) int64 { 61 | if val >= 0 { 62 | return val 63 | } 64 | return -val 65 | } 66 | -------------------------------------------------------------------------------- /fmath/ticompare_int8_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package fmath 6 | 7 | import ( 8 | "testing" 9 | ) 10 | 11 | func TestUI8All(t *testing.T) { 12 | for x := uint8(0); x <= 127 && x >= 0; x++ { 13 | for y := uint8(0); y <= 127; y++ { 14 | testComparisonsUI8(x, y, t) 15 | testComparisonsUI8(y, x, t) 16 | } 17 | } 18 | } 19 | 20 | func TestUI8Not(t *testing.T) { 21 | if UI8Not(0) != 1 { 22 | t.Errorf("UI8Not(0) returns %d", UI8Not(0)) 23 | } 24 | if UI8Not(1) != 0 { 25 | t.Errorf("UI8Not(1) returns %d", UI8Not(1)) 26 | } 27 | } 28 | 29 | func testComparisonsUI8(x, y uint8, t *testing.T) { 30 | if int64(x) < 0 || int64(y) < 0 { 31 | t.Fatalf("Cannot test numbers with highest order bit set %X, %X", x, y) 32 | } 33 | testGreaterUI8(x, y, t) 34 | testLessUI8(x, y, t) 35 | testEqualUI8(x, y, t) 36 | } 37 | 38 | // TODO is there a way to auto-generate these verbose tests? 39 | func testGreaterUI8(x, y uint8, t *testing.T) { 40 | if x > y { 41 | resultGT := UI8GT(x, y) 42 | if resultGT != 1 { 43 | println(x, y, y-x) 44 | t.Errorf("uint8 %d > %d but resultGT %d", x, y, resultGT) 45 | } 46 | resultNGT := UI8GT(y, x) 47 | if resultNGT != 0 { 48 | t.Errorf("uint8 %x > %x but resultNGT %x", x, y, resultNGT) 49 | } 50 | resultGTE := UI8GTE(x, y) 51 | if resultGTE != 1 { 52 | t.Errorf("uint8 %d > %d but resultGTE %d", x, y, resultGTE) 53 | } 54 | resultNGTE := UI8GTE(y, x) 55 | if resultNGTE != 0 { 56 | t.Errorf("uint8 %d > %d but resultNGTE %d", x, y, resultNGTE) 57 | } 58 | resultLT := UI8LT(y, x) 59 | if resultLT != 1 { 60 | t.Errorf("uint8 %d < %d but resultLT %d", y, x, resultLT) 61 | } 62 | resultNLT := UI8LT(x, y) 63 | if resultNLT != 0 { 64 | t.Errorf("uint8 %d < %d but resultNLT %d", y, x, resultNLT) 65 | } 66 | resultLTE := UI8LTE(y, x) 67 | if resultLTE != 1 { 68 | t.Errorf("uint8 %d < %d but resultLTE %d", y, x, resultLTE) 69 | } 70 | resultNLTE := UI8LTE(x, y) 71 | if resultNLTE != 0 { 72 | t.Errorf("uint8 %d < %d but resultNLTE %d", y, x, resultNLTE) 73 | } 74 | } 75 | } 76 | 77 | func testLessUI8(x, y uint8, t *testing.T) { 78 | if x < y { 79 | resultGT := UI8GT(x, y) 80 | if resultGT != 0 { 81 | t.Errorf("uint8 %d < %d but resultGT %d", x, y, resultGT) 82 | } 83 | resultNGT := UI8GT(y, x) 84 | if resultNGT != 1 { 85 | t.Errorf("uint8 %d < %d but resultNGT %d", x, y, resultNGT) 86 | } 87 | resultGTE := UI8GTE(x, y) 88 | if resultGTE != 0 { 89 | t.Errorf("uint8 %d < %d but resultGTE %d", x, y, resultGTE) 90 | } 91 | resultNGTE := UI8GTE(y, x) 92 | if resultNGTE != 1 { 93 | t.Errorf("uint8 %d < %d but resultNGTE %d", x, y, resultNGTE) 94 | } 95 | resultLT := UI8LT(y, x) 96 | if resultLT != 0 { 97 | t.Errorf("uint8 %d > %d but resultLT %d", y, x, resultLT) 98 | } 99 | resultNLT := UI8LT(x, y) 100 | if resultNLT != 1 { 101 | t.Errorf("uint8 %d > %d but resultNLT %d", y, x, resultNLT) 102 | } 103 | resultLTE := UI8LTE(y, x) 104 | if resultLTE != 0 { 105 | t.Errorf("uint8 %d > %d but resultLTE %d", y, x, resultLTE) 106 | } 107 | resultNLTE := UI8LTE(x, y) 108 | if resultNLTE != 1 { 109 | t.Errorf("uint8 %d > %d but resultNLTE %d", y, x, resultNLTE) 110 | } 111 | } 112 | } 113 | 114 | func testEqualUI8(x, y uint8, t *testing.T) { 115 | if x == y { 116 | resultGT := UI8GT(x, y) 117 | if resultGT != 0 { 118 | t.Errorf("uint8 %d == %d but resultGT %d", x, y, resultGT) 119 | } 120 | resultNGT := UI8GT(y, x) 121 | if resultNGT != 0 { 122 | t.Errorf("uint8 %d == %d but resultNGT %d", x, y, resultNGT) 123 | } 124 | resultGTE := UI8GTE(x, y) 125 | if resultGTE != 1 { 126 | t.Errorf("uint8 %d == %d but resultGTE %d", x, y, resultGTE) 127 | } 128 | resultNGTE := UI8GTE(y, x) 129 | if resultNGTE != 1 { 130 | t.Errorf("uint8 %d == %d but resultNGTE %d", x, y, resultNGTE) 131 | } 132 | resultLT := UI8LT(y, x) 133 | if resultLT != 0 { 134 | t.Errorf("uint8 %d == %d but resultLT %d", y, x, resultLT) 135 | } 136 | resultNLT := UI8LT(x, y) 137 | if resultNLT != 0 { 138 | t.Errorf("uint8 %d == %d but resultNLT %d", y, x, resultNLT) 139 | } 140 | resultLTE := UI8LTE(y, x) 141 | if resultLTE != 1 { 142 | t.Errorf("uint8 %d == %d but resultLTE %d", y, x, resultLTE) 143 | } 144 | resultNLTE := UI8LTE(x, y) 145 | if resultNLTE != 1 { 146 | t.Errorf("uint8 %d == %d but resultNLTE %d", y, x, resultNLTE) 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /fmath/ticompare_uint64_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package fmath 6 | 7 | import ( 8 | "math/rand" 9 | "testing" 10 | ) 11 | 12 | func TestUIGTLow(t *testing.T) { 13 | for x := uint64(0); x < 1000*1000; x += 1003 { 14 | for y := uint64(0); y <= x; y += 503 { 15 | testComparisons(x, y, t) 16 | testComparisons(y, x, t) 17 | } 18 | } 19 | } 20 | 21 | func TestUIGTHigh(t *testing.T) { 22 | for i := uint64(0); i < 1000*1000; i += 1003 { 23 | for j := uint64(0); j <= i; j += 503 { 24 | x := (0 - j) >> 1 25 | y := (0 - i) >> 1 26 | testComparisons(x, y, t) 27 | testComparisons(y, x, t) 28 | } 29 | } 30 | } 31 | 32 | func TestUIGT(t *testing.T) { 33 | r := rand.New(rand.NewSource(1)) 34 | for i := 0; i < 10*1000*1000; i++ { 35 | x := uint64(r.Int63()) 36 | y := uint64(r.Int63()) 37 | testComparisons(x, y, t) 38 | testComparisons(y, x, t) 39 | } 40 | } 41 | 42 | func TestUINot(t *testing.T) { 43 | if UINot(0) != 1 { 44 | t.Errorf("UINot(0) returns %d", UINot(0)) 45 | } 46 | if UINot(1) != 0 { 47 | t.Errorf("UINot(1) returns %d", UINot(1)) 48 | } 49 | } 50 | 51 | func testComparisons(x, y uint64, t *testing.T) { 52 | if int64(x) < 0 || int64(y) < 0 { 53 | t.Fatalf("Cannot test numbers with highest order bit set %X, %X", x, y) 54 | } 55 | testGreater(x, y, t) 56 | testLess(x, y, t) 57 | testEqual(x, y, t) 58 | } 59 | 60 | // TODO is there a way to auto-generate these verbose tests? 61 | func testGreater(x, y uint64, t *testing.T) { 62 | if x > y { 63 | resultGT := UIGT(x, y) 64 | if resultGT != 1 { 65 | t.Errorf("uint64 %d > %d but resultGT %d", x, y, resultGT) 66 | } 67 | resultNGT := UIGT(y, x) 68 | if resultNGT != 0 { 69 | t.Errorf("uint64 %x > %x but resultNGT %x", x, y, resultNGT) 70 | } 71 | resultGTE := UIGTE(x, y) 72 | if resultGTE != 1 { 73 | t.Errorf("uint64 %d > %d but resultGTE %d", x, y, resultGTE) 74 | } 75 | resultNGTE := UIGTE(y, x) 76 | if resultNGTE != 0 { 77 | t.Errorf("uint64 %d > %d but resultNGTE %d", x, y, resultNGTE) 78 | } 79 | resultLT := UILT(y, x) 80 | if resultLT != 1 { 81 | t.Errorf("uint64 %d < %d but resultLT %d", y, x, resultLT) 82 | } 83 | resultNLT := UILT(x, y) 84 | if resultNLT != 0 { 85 | t.Errorf("uint64 %d < %d but resultNLT %d", y, x, resultNLT) 86 | } 87 | resultLTE := UILTE(y, x) 88 | if resultLTE != 1 { 89 | t.Errorf("uint64 %d < %d but resultLTE %d", y, x, resultLTE) 90 | } 91 | resultNLTE := UILTE(x, y) 92 | if resultNLTE != 0 { 93 | t.Errorf("uint64 %d < %d but resultNLTE %d", y, x, resultNLTE) 94 | } 95 | } 96 | } 97 | 98 | func testLess(x, y uint64, t *testing.T) { 99 | if x < y { 100 | resultGT := UIGT(x, y) 101 | if resultGT != 0 { 102 | t.Errorf("uint64 %d < %d but resultGT %d", x, y, resultGT) 103 | } 104 | resultNGT := UIGT(y, x) 105 | if resultNGT != 1 { 106 | t.Errorf("uint64 %d < %d but resultNGT %d", x, y, resultNGT) 107 | } 108 | resultGTE := UIGTE(x, y) 109 | if resultGTE != 0 { 110 | t.Errorf("uint64 %d < %d but resultGTE %d", x, y, resultGTE) 111 | } 112 | resultNGTE := UIGTE(y, x) 113 | if resultNGTE != 1 { 114 | t.Errorf("uint64 %d < %d but resultNGTE %d", x, y, resultNGTE) 115 | } 116 | resultLT := UILT(y, x) 117 | if resultLT != 0 { 118 | t.Errorf("uint64 %d > %d but resultLT %d", y, x, resultLT) 119 | } 120 | resultNLT := UILT(x, y) 121 | if resultNLT != 1 { 122 | t.Errorf("uint64 %d > %d but resultNLT %d", y, x, resultNLT) 123 | } 124 | resultLTE := UILTE(y, x) 125 | if resultLTE != 0 { 126 | t.Errorf("uint64 %d > %d but resultLTE %d", y, x, resultLTE) 127 | } 128 | resultNLTE := UILTE(x, y) 129 | if resultNLTE != 1 { 130 | t.Errorf("uint64 %d > %d but resultNLTE %d", y, x, resultNLTE) 131 | } 132 | } 133 | } 134 | 135 | func testEqual(x, y uint64, t *testing.T) { 136 | if x == y { 137 | resultGT := UIGT(x, y) 138 | if resultGT != 0 { 139 | t.Errorf("uint64 %d == %d but resultGT %d", x, y, resultGT) 140 | } 141 | resultNGT := UIGT(y, x) 142 | if resultNGT != 0 { 143 | t.Errorf("uint64 %d == %d but resultNGT %d", x, y, resultNGT) 144 | } 145 | resultGTE := UIGTE(x, y) 146 | if resultGTE != 1 { 147 | t.Errorf("uint64 %d == %d but resultGTE %d", x, y, resultGTE) 148 | } 149 | resultNGTE := UIGTE(y, x) 150 | if resultNGTE != 1 { 151 | t.Errorf("uint64 %d == %d but resultNGTE %d", x, y, resultNGTE) 152 | } 153 | resultLT := UILT(y, x) 154 | if resultLT != 0 { 155 | t.Errorf("uint64 %d == %d but resultLT %d", y, x, resultLT) 156 | } 157 | resultNLT := UILT(x, y) 158 | if resultNLT != 0 { 159 | t.Errorf("uint64 %d == %d but resultNLT %d", y, x, resultNLT) 160 | } 161 | resultLTE := UILTE(y, x) 162 | if resultLTE != 1 { 163 | t.Errorf("uint64 %d == %d but resultLTE %d", y, x, resultLTE) 164 | } 165 | resultNLTE := UILTE(x, y) 166 | if resultNLTE != 1 { 167 | t.Errorf("uint64 %d == %d but resultNLTE %d", y, x, resultNLTE) 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /fmath/tinteger_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package fmath 6 | 7 | import ( 8 | "math" 9 | "math/rand" 10 | "testing" 11 | 12 | "github.com/fmstephe/flib/fstrconv" 13 | ) 14 | 15 | var allPowersOfTwo []int64 16 | 17 | func init() { 18 | allPowersOfTwo = make([]int64, 1) 19 | allPowersOfTwo[0] = 1 20 | for i := int64(2); i > 0; i = i << 1 { 21 | allPowersOfTwo = append(allPowersOfTwo, i) 22 | } 23 | } 24 | 25 | // Test fmath.PowerOfTwo(int64) bool 26 | func TestPowerOfTwo(t *testing.T) { 27 | // Test all actual powers of two 28 | for _, i := range allPowersOfTwo { 29 | checkPowerOfTwo(t, i) 30 | } 31 | // Test low numbers for power of two-ness 32 | for i := int64(0); i < 10*1000; i++ { 33 | checkPowerOfTwo(t, i) 34 | } 35 | // Test high numbers for power of two-ness 36 | for i := int64(math.MaxInt64); i > math.MaxInt64-(10*1000); i-- { 37 | checkPowerOfTwo(t, i) 38 | } 39 | // Test small negatives for power of two-ness 40 | for i := int64(0); i > -10*1000; i-- { 41 | checkPowerOfTwo(t, i) 42 | } 43 | // Test large negatives for power of two-ness 44 | for i := int64(math.MinInt64); i < math.MinInt64+(10*1000); i++ { 45 | checkPowerOfTwo(t, i) 46 | } 47 | // Test random numbers for power of two-ness 48 | rand.Seed(1) 49 | for i := 0; i < 10*1000; i++ { 50 | n := rand.Int63() 51 | checkPowerOfTwo(t, n) 52 | } 53 | } 54 | 55 | func checkPowerOfTwo(t *testing.T, i int64) { 56 | r := PowerOfTwo(i) 57 | rs := simplePowerOfTwo(i) 58 | if r != rs { 59 | t.Errorf("PowerOfTwo(%d) returns %v, while simplePowerOfTwo(%d) returns %v", i, r, i, rs) 60 | } 61 | } 62 | 63 | func simplePowerOfTwo(i int64) bool { 64 | for _, j := range allPowersOfTwo { 65 | if i == j { 66 | return true 67 | } 68 | } 69 | return false 70 | } 71 | 72 | // Test that for all positive powers of two n, NxtPowerOfTwo(n) returns n 73 | func TestNxtPowerOfTwoPowersOfTwo(t *testing.T) { 74 | for n := int64(1); n > 0; n *= 2 { 75 | nxt := NxtPowerOfTwo(n) 76 | if nxt != n { 77 | t.Errorf("For input %d expected NxtPowerOfTwo = %d, found %d", n, n, nxt) 78 | } 79 | } 80 | } 81 | 82 | // Test that for all positive powers of two n, NxtPowerOfTwo(-n) returns 1 83 | func TestNxtPowerOfTwoPowersOfTwoNegative(t *testing.T) { 84 | for n := int64(1); n > 0; n *= 2 { 85 | nxt := NxtPowerOfTwo(n) 86 | if nxt != n { 87 | t.Errorf("For input %d expected NxtPowerOfTwo = %d, found %d", n, n, nxt) 88 | } 89 | } 90 | } 91 | 92 | // Test that for all positive powers of two n, 93 | // if we generate a random number, m, smaller than n 94 | // but larger than the largest power of two less than n 95 | // then NxtPowerOfTwo(m) returns n 96 | // 97 | // A better way to describe this could be that if you take 98 | // a random number, m, sandwiched between two powers of two 99 | // NxtPowerOfTwo(m) returns the larger of the two powers of two 100 | func TestNxtPowerOfTwoRandom(t *testing.T) { 101 | rand.Seed(1) 102 | for i := 0; i < 1000; i++ { 103 | for n := int64(2); n > 0; n *= 2 { 104 | low := n >> 1 105 | m := rand.Int63n(n-low) + low + 1 106 | nxt := NxtPowerOfTwo(m) 107 | if nxt != n { 108 | t.Errorf("For input %d expected NxtPowerOfTwo = %d, found %d", m, n, nxt) 109 | } 110 | } 111 | } 112 | } 113 | 114 | // Test that for any random negative number n 115 | // NxtPowerOfTwo(n) returns 1 116 | func TestNxtPowerOfTwoRandomNegative(t *testing.T) { 117 | rand.Seed(1) 118 | for i := 0; i < 10*1000; i++ { 119 | n := -rand.Int63() 120 | nxt := NxtPowerOfTwo(n) 121 | if nxt != 1 { 122 | t.Errorf("For input %d expected NxtPowerOfTwo = 1, found %d", n, nxt) 123 | } 124 | } 125 | } 126 | 127 | // Test fmath.Min(int64,int64) int64 128 | // TODO test for large positive values and moderate negative values 129 | func TestMin(t *testing.T) { 130 | rand.Seed(1) 131 | for i := 0; i < 1000*1000; i++ { 132 | a := rand.Int63n(1 << 41) 133 | b := rand.Int63n(1 << 41) 134 | m := Min(a, b) 135 | om := simpleMin(a, b) 136 | if m != om { 137 | as := fstrconv.ItoaComma(a) 138 | bs := fstrconv.ItoaComma(b) 139 | ms := fstrconv.ItoaComma(m) 140 | t.Errorf("Problem with min of %s, %s - min returned %s", as, bs, ms) 141 | } 142 | } 143 | } 144 | 145 | func simpleMin(val1, val2 int64) int64 { 146 | if val1 < val2 { 147 | return val1 148 | } 149 | return val2 150 | } 151 | 152 | // Test fmath.Max(int64,int64) int64 153 | // TODO test for large positive values and moderate negative values 154 | func TestMax(t *testing.T) { 155 | rand.Seed(1) 156 | for i := 0; i < 1000*1000; i++ { 157 | a := rand.Int63n(1 << 41) 158 | b := rand.Int63n(1 << 41) 159 | m := Max(a, b) 160 | om := simpleMax(a, b) 161 | if m != om { 162 | as := fstrconv.ItoaComma(a) 163 | bs := fstrconv.ItoaComma(b) 164 | ms := fstrconv.ItoaComma(m) 165 | t.Errorf("Problem with min of %s, %s - min returned %s", as, bs, ms) 166 | } 167 | } 168 | } 169 | 170 | func simpleMax(val1, val2 int64) int64 { 171 | if val1 > val2 { 172 | return val1 173 | } 174 | return val2 175 | } 176 | 177 | // Test with random positive int32 178 | // CombineInt32(int32,int32) int64 179 | // HighInt32(int64) int32 180 | // LowInt32(int64) int32 181 | func TestCombineInt32(t *testing.T) { 182 | r := rand.New(rand.NewSource(1)) 183 | for i := 0; i < 10*1000; i++ { 184 | high := r.Int31() 185 | low := r.Int31() 186 | whole := CombineInt32(high, low) 187 | if high != HighInt32(whole) { 188 | t.Errorf("Expecting '%d' found '%d'", high, HighInt32(whole)) 189 | } 190 | if low != LowInt32(whole) { 191 | t.Errorf("Expecting '%d' found '%d'", low, LowInt32(whole)) 192 | } 193 | } 194 | } 195 | 196 | // Test with random negative int32 197 | // CombineInt32(int32,int32) int64 198 | // HighInt32(int64) int32 199 | // LowInt32(int64) int32 200 | func TestGuidFunsWithNegativeInt32(t *testing.T) { 201 | r := rand.New(rand.NewSource(1)) 202 | for i := 0; i < 10*1000; i++ { 203 | high := -r.Int31() 204 | low := -r.Int31() 205 | whole := CombineInt32(high, low) 206 | if high != HighInt32(whole) { 207 | t.Errorf("Expecting '%d' found '%d'", high, HighInt32(whole)) 208 | } 209 | if low != LowInt32(whole) { 210 | t.Errorf("Expecting '%d' found '%d'", low, LowInt32(whole)) 211 | } 212 | } 213 | } 214 | 215 | // Test with random uint32, using int32 casts 216 | // CombineInt32(int32,int32) int64 217 | // HighInt32(int64) int32 218 | // LowInt32(int64) int32 219 | func TestCombineUint32(t *testing.T) { 220 | r := rand.New(rand.NewSource(1)) 221 | for i := 0; i < 10*1000; i++ { 222 | high := r.Uint32() 223 | low := r.Uint32() 224 | whole := CombineInt32(int32(high), int32(low)) 225 | if high != uint32(HighInt32(whole)) { 226 | t.Errorf("Expecting '%d' found '%d'", high, uint32(HighInt32(whole))) 227 | } 228 | if low != uint32(LowInt32(whole)) { 229 | t.Errorf("Expecting '%d' found '%d'", low, uint32(LowInt32(whole))) 230 | } 231 | } 232 | } 233 | 234 | // Test with random uint32 most significant bit set, using int32 casts 235 | // CombineInt32(int32,int32) int64 236 | // HighInt32(int64) int32 237 | // LowInt32(int64) int32 238 | func TestGuidFunsWithLargeUint32(t *testing.T) { 239 | r := rand.New(rand.NewSource(1)) 240 | for i := 0; i < 10*1000; i++ { 241 | high := uint32(-r.Int31()) 242 | low := uint32(-r.Int31()) 243 | whole := CombineInt32(int32(high), int32(low)) 244 | if high != uint32(HighInt32(whole)) { 245 | t.Errorf("Expecting '%d' found '%d'", high, HighInt32(whole)) 246 | } 247 | if low != uint32(LowInt32(whole)) { 248 | t.Errorf("Expecting '%d' found '%d'", low, LowInt32(whole)) 249 | } 250 | } 251 | } 252 | -------------------------------------------------------------------------------- /fstrconv/README.md: -------------------------------------------------------------------------------- 1 | A small package of string conversion utilities similar to strconv 2 | -------------------------------------------------------------------------------- /fstrconv/fstrconv.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package fstrconv 6 | 7 | import ( 8 | "bytes" 9 | ) 10 | 11 | func ItoaComma(i int64) string { 12 | return ItoaDelim(i, ',') 13 | } 14 | 15 | func ItoaDelim(i int64, delim byte) string { 16 | if i == 0 { 17 | return "0" 18 | } 19 | var b bytes.Buffer 20 | neg := i < 0 21 | if neg { 22 | i = -i 23 | } 24 | for cnt := 0; i != 0; cnt++ { 25 | if cnt == 3 { 26 | b.WriteByte(delim) 27 | cnt = 0 28 | } 29 | r := i % 10 30 | i = i / 10 31 | b.WriteByte(byte(r) + 48) 32 | } 33 | if neg { 34 | b.WriteByte('-') 35 | } 36 | return reverse(b.String()) 37 | } 38 | 39 | func reverse(s string) string { 40 | // With thanks to Russ Cox 41 | n := 0 42 | rune := make([]rune, len(s)) 43 | for _, r := range s { 44 | rune[n] = r 45 | n++ 46 | } 47 | rune = rune[0:n] 48 | // Reverse 49 | for i := 0; i < n/2; i++ { 50 | rune[i], rune[n-1-i] = rune[n-1-i], rune[i] 51 | } 52 | // Convert back to UTF-8. 53 | return string(rune) 54 | } 55 | -------------------------------------------------------------------------------- /fstrconv/tfstrconv_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package fstrconv 6 | 7 | import ( 8 | "testing" 9 | ) 10 | 11 | func TestTest(t *testing.T) { 12 | helper(0, "0", t) 13 | helper(1, "1", t) 14 | helper(2, "2", t) 15 | helper(3, "3", t) 16 | helper(7, "7", t) 17 | helper(8, "8", t) 18 | helper(10, "10", t) 19 | helper(33, "33", t) 20 | helper(99, "99", t) 21 | helper(100, "100", t) 22 | helper(123, "123", t) 23 | helper(999, "999", t) 24 | helper(1000, "1,000", t) 25 | helper(10000, "10,000", t) 26 | helper(100000, "100,000", t) 27 | helper(1000000, "1,000,000", t) 28 | helper(10000000, "10,000,000", t) 29 | helper(100000000, "100,000,000", t) 30 | helper(1000000000, "1,000,000,000", t) 31 | 32 | helper(0, "0", t) 33 | helper(0, "0", t) 34 | helper(1, "1", t) 35 | helper(2, "2", t) 36 | helper(3, "3", t) 37 | helper(7, "7", t) 38 | helper(8, "8", t) 39 | helper(10, "10", t) 40 | helper(33, "33", t) 41 | helper(99, "99", t) 42 | helper(100, "100", t) 43 | helper(123, "123", t) 44 | helper(999, "999", t) 45 | helper(1000, "1,000", t) 46 | helper(10000, "10,000", t) 47 | helper(100000, "100,000", t) 48 | helper(1000000, "1,000,000", t) 49 | helper(10000000, "10,000,000", t) 50 | helper(100000000, "100,000,000", t) 51 | helper(1000000000, "1,000,000,000", t) 52 | 53 | helper(-1, "-1", t) 54 | helper(-2, "-2", t) 55 | helper(-3, "-3", t) 56 | helper(-7, "-7", t) 57 | helper(-8, "-8", t) 58 | helper(-10, "-10", t) 59 | helper(-33, "-33", t) 60 | helper(-99, "-99", t) 61 | helper(-100, "-100", t) 62 | helper(-123, "-123", t) 63 | helper(-999, "-999", t) 64 | helper(-1000, "-1,000", t) 65 | helper(-10000, "-10,000", t) 66 | helper(-100000, "-100,000", t) 67 | helper(-1000000, "-1,000,000", t) 68 | helper(-10000000, "-10,000,000", t) 69 | helper(-100000000, "-100,000,000", t) 70 | helper(-1000000000, "-1,000,000,000", t) 71 | } 72 | 73 | func helper(x int64, s string, t *testing.T) { 74 | r := ItoaDelim(x, ',') 75 | if r != s { 76 | t.Errorf("%d not reversed properly, expecting %s got %s instead", x, s, r) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /fsync/fatomic/lazy.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | // +build go1.6 6 | // +build amd64 7 | 8 | package fatomic 9 | 10 | //go:nosplit 11 | //go:noinline 12 | func LazyStore(addr *int64, val int64) { 13 | *addr = val 14 | } 15 | -------------------------------------------------------------------------------- /fsync/padded/cachebuffer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package padded 6 | 7 | type CacheBuffer struct { 8 | Bytes [CacheLineBytes * 2]byte 9 | } 10 | -------------------------------------------------------------------------------- /fsync/padded/const_amd64.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package padded 6 | 7 | const CacheLineBytes = 64 8 | -------------------------------------------------------------------------------- /fsync/padded/int64.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package padded 6 | 7 | type Int64 struct { 8 | before [CacheLineBytes - 8]byte 9 | Value int64 10 | after [CacheLineBytes]byte 11 | } 12 | -------------------------------------------------------------------------------- /fsync/padded/slice.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package padded 6 | 7 | import ( 8 | "unsafe" 9 | ) 10 | 11 | func ByteSlice(size int) []byte { 12 | b := make([]byte, CacheLineBytes+size+CacheLineBytes) 13 | return b[CacheLineBytes : size+CacheLineBytes] 14 | } 15 | 16 | func PointerSlice(size int) []unsafe.Pointer { 17 | b := make([]unsafe.Pointer, CacheLineBytes+size+CacheLineBytes) 18 | return b[CacheLineBytes : size+CacheLineBytes] 19 | } 20 | -------------------------------------------------------------------------------- /ftime/ftime.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package ftime 6 | 7 | // A fast timing counter accessing the cheapest fastest 8 | // counter your cpu can provide. Guaranteed to increase monotonically 9 | // across successive calls on the same CPU core. 10 | // On AMD64 CPUs we use the RDTSC instruction 11 | func Counter() (count int64) 12 | 13 | // Indicates whether the results returned from a call to Counter() 14 | // increase at a uniform rate, independent of the actual clock speed 15 | // of the CPU it is running on. 16 | // On AMD64 CPUs we test for the 'Invariant TSC' property using CPUID 17 | func IsCounterSteady() bool { 18 | _, _, _, edx := cpuid(0X80000007) 19 | return edx&(1<<8) != 0 20 | } 21 | 22 | // Indicates whether the results returned from a call to Counter() 23 | // is guaranteed to be monotonically increasing per CPU and across 24 | // multiple CPUs on the same socket. No guarantee is made across CPU 25 | // sockets 26 | // On AMD64 CPUs we test for the 'Invariant TSC' property using CPUID 27 | func IsCounterSMPMonotonic() bool { 28 | _, _, _, edx := cpuid(0X80000007) 29 | return edx&(1<<8) != 0 30 | } 31 | 32 | func cpuid(eaxi uint32) (eax, ebx, ecx, edx uint32) 33 | 34 | // This method will not return until the value returned by Counter() 35 | // has increased by ticks. 36 | // This method is useful as an alternative to time.Sleep() when very short 37 | // pause periods are desired and it is undesirable to have the current 38 | // thread/goroutine descheduled. 39 | func Pause(ticks int64) 40 | -------------------------------------------------------------------------------- /ftime/ftime_amd64.s: -------------------------------------------------------------------------------- 1 | TEXT ·Counter(SB),$0-8 2 | RDTSC 3 | SHLQ $32, DX 4 | ADDQ DX, AX 5 | MOVQ AX, count+0(FP) 6 | RET 7 | 8 | TEXT ·cpuid(SB),$0-24 9 | MOVL eaxi+0(FP), AX 10 | MOVL $0, BX 11 | MOVL $0, CX 12 | MOVL $0, DX 13 | CPUID 14 | MOVL AX, eax+8(FP) 15 | MOVL BX, ebx+12(FP) // Do I need to preserve this? 16 | MOVL CX, ecx+16(FP) 17 | MOVL DX, edx+20(FP) 18 | RET 19 | 20 | TEXT ·Pause(SB),$0-8 21 | MOVQ ticks+0(FP), BX 22 | RDTSC 23 | SHLQ $32, DX 24 | ADDQ DX, AX 25 | ADDQ AX, BX // Target ticks lives in BX 26 | testTick: 27 | PAUSE 28 | RDTSC 29 | SHLQ $32, DX 30 | ADDQ DX, AX 31 | CMPQ BX, AX 32 | JGT testTick 33 | RET 34 | -------------------------------------------------------------------------------- /ftime/tftime_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package ftime 6 | 7 | import ( 8 | "testing" 9 | 10 | . "github.com/fmstephe/flib/fstrconv" 11 | ) 12 | 13 | func TestCounter(t *testing.T) { 14 | if !IsCounterSMPMonotonic() { 15 | return // Very hard to guarantee anything 16 | } 17 | c := Counter() 18 | for i := 0; i < 1000*1000; i++ { 19 | newc := Counter() 20 | if newc < c { 21 | t.Errorf("Counter() values reducing. Previous %s New %s", ItoaComma(c), ItoaComma(newc)) 22 | } 23 | } 24 | } 25 | 26 | func TestCounterSMP(t *testing.T) { 27 | if !IsCounterSMPMonotonic() { 28 | return // Very hard to guarantee anything 29 | } 30 | counterChan := make(chan int64) 31 | go func() { 32 | for i := 0; i < 1000*1000; i++ { 33 | counterChan <- Counter() 34 | } 35 | close(counterChan) 36 | }() 37 | go func() { 38 | for c := range counterChan { 39 | newc := Counter() 40 | if newc < c { 41 | t.Errorf("Counter() values reducing. Previous %s New %s", ItoaComma(c), ItoaComma(newc)) 42 | } 43 | } 44 | }() 45 | } 46 | 47 | func TestPause(t *testing.T) { 48 | for i := int64(1000); i <= int64(1000*1000*1000); i *= 10 { 49 | c := Counter() 50 | Pause(i) 51 | c = Counter() - c 52 | if c < i { 53 | t.Errorf("Counter ticks elapsed (%s) less than asked for (%s)", ItoaComma(c), ItoaComma(i)) 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /funsafe/README.md: -------------------------------------------------------------------------------- 1 | # Unsafe Conversions Between String <-> []byte 2 | 3 | This package is intended to be a clear implementation of the solution suggested by Keith Randal here 4 | 5 | https://groups.google.com/d/msg/golang-nuts/Zsfk-VMd_fU/WXPjfZwPBAAJ 6 | 7 | This avoids the problems associated with using a `uintptr` as well as failing tests if the runtime representation of string or []byte changes. 8 | -------------------------------------------------------------------------------- /funsafe/convert.go: -------------------------------------------------------------------------------- 1 | package funsafe 2 | 3 | import ( 4 | "unsafe" 5 | ) 6 | 7 | // stringHeader is the runtime representation of a string. 8 | // It should be identical to reflect.StringHeader 9 | type stringHeader struct { 10 | data unsafe.Pointer 11 | stringLen int 12 | } 13 | 14 | // sliceHeader is the runtime representation of a slice. 15 | // It should be identical to reflect.sliceHeader 16 | type sliceHeader struct { 17 | data unsafe.Pointer 18 | sliceLen int 19 | sliceCap int 20 | } 21 | 22 | // Unsafely converts s into a byte slice. 23 | // If you modify b, then s will also be modified. This violates the 24 | // property that strings are immutable. 25 | func StringToBytes(s string) (b []byte) { 26 | stringHeader := (*stringHeader)(unsafe.Pointer(&s)) 27 | sliceHeader := (*sliceHeader)(unsafe.Pointer(&b)) 28 | sliceHeader.data = stringHeader.data 29 | sliceHeader.sliceLen = len(s) 30 | sliceHeader.sliceCap = len(s) 31 | return b 32 | } 33 | 34 | // Unsafely converts b into a string. 35 | // If you modify b, then s will also be modified. This violates the 36 | // property that strings are immutable. 37 | func BytesToString(b []byte) (s string) { 38 | sliceHeader := (*sliceHeader)(unsafe.Pointer(&b)) 39 | stringHeader := (*stringHeader)(unsafe.Pointer(&s)) 40 | stringHeader.data = sliceHeader.data 41 | stringHeader.stringLen = len(b) 42 | return s 43 | } 44 | -------------------------------------------------------------------------------- /funsafe/convert_test.go: -------------------------------------------------------------------------------- 1 | package funsafe 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | "unsafe" 7 | ) 8 | 9 | func TestStringToBytes(t *testing.T) { 10 | s := "string" 11 | b := StringToBytes(s) 12 | // Should have the same length 13 | if len(s) != len(b) { 14 | t.Errorf("Converted bytes have different length (%d) than the string (%d)", len(b), len(s)) 15 | } 16 | if len(s) != cap(b) { 17 | t.Errorf("Converted bytes have capacity (%d) beyond the length of string (%d)", cap(b), len(s)) 18 | } 19 | // Should have same content 20 | if s != string(b) { 21 | t.Errorf("Converted bytes has different value %q than the string %q", string(b), s) 22 | } 23 | // Should point to the same data in memory 24 | sData := (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data 25 | bData := (*(*reflect.SliceHeader)(unsafe.Pointer(&b))).Data 26 | if sData != bData { 27 | t.Errorf("Converted bytes points to different data %d than the string %d", sData, bData) 28 | } 29 | } 30 | 31 | func TestBytesToString(t *testing.T) { 32 | b := []byte("bytes!") 33 | s := BytesToString(b) 34 | // Should have the same length 35 | if len(s) != len(b) { 36 | t.Errorf("Converted string has a different length (%d) than the bytes (%d)", len(s), len(b)) 37 | } 38 | // Should have same content 39 | if s != string(b) { 40 | t.Errorf("Converted string has a different value %q than the bytes %q", s, string(b)) 41 | } 42 | // Should point to the same data in memory 43 | sData := (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data 44 | bData := (*(*reflect.SliceHeader)(unsafe.Pointer(&b))).Data 45 | if sData != bData { 46 | t.Errorf("Converted string points to different data %d than the bytes %d", sData, bData) 47 | } 48 | } 49 | 50 | // Check we don't access the entire byte slice's capacity 51 | func TestBytesToString_WithUnusedBytes(t *testing.T) { 52 | // make a long slice of bytes 53 | bLongDontUse := []byte("bytes! and all these other bytes") 54 | // just take the first 6 characters 55 | b := bLongDontUse[:6] 56 | s := BytesToString(b) 57 | // Should have the same length 58 | if len(s) != len(b) { 59 | t.Errorf("Converted string has a different length (%d) than the bytes (%d)", len(s), len(b)) 60 | } 61 | // Should have same content 62 | if s != string(b) { 63 | t.Errorf("Converted string has a different value %q than the bytes %q", s, string(b)) 64 | } 65 | // Should point to the same data in memory 66 | sData := (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data 67 | bData := (*(*reflect.SliceHeader)(unsafe.Pointer(&b))).Data 68 | if sData != bData { 69 | t.Errorf("Converted string points to different data %d than the bytes %d", sData, bData) 70 | } 71 | } 72 | 73 | func TestStringHeadersCompatible(t *testing.T) { 74 | // Check to make sure string header is what reflect thinks it is. 75 | // They should be the same except for the type of the data field. 76 | if unsafe.Sizeof(stringHeader{}) != unsafe.Sizeof(reflect.StringHeader{}) { 77 | t.Errorf("stringHeader layout has changed ours %#v theirs %#v", stringHeader{}, reflect.StringHeader{}) 78 | } 79 | x := stringHeader{} 80 | y := reflect.StringHeader{} 81 | x.data = unsafe.Pointer(y.Data) 82 | y.Data = uintptr(x.data) 83 | x.stringLen = y.Len 84 | y.Len = x.stringLen 85 | // If we can do all of that then the two structs are compatible 86 | } 87 | 88 | func TestSliceHeadersCompatible(t *testing.T) { 89 | // Check to make sure string header is what reflect thinks it is. 90 | // They should be the same except for the type of the data field. 91 | if unsafe.Sizeof(sliceHeader{}) != unsafe.Sizeof(reflect.SliceHeader{}) { 92 | t.Errorf("sliceHeader layout has changed ours %#v theirs %#v", sliceHeader{}, reflect.SliceHeader{}) 93 | } 94 | x := sliceHeader{} 95 | y := reflect.SliceHeader{} 96 | x.data = unsafe.Pointer(y.Data) 97 | y.Data = uintptr(x.data) 98 | x.sliceLen = y.Len 99 | y.Len = x.sliceLen 100 | x.sliceCap = y.Cap 101 | y.Cap = x.sliceCap 102 | // If we can do all of that then the two structs are compatible 103 | } 104 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/fmstephe/flib 2 | 3 | go 1.19 4 | -------------------------------------------------------------------------------- /queues/spscq/bheader_test.go: -------------------------------------------------------------------------------- 1 | package spscq 2 | 3 | import ( 4 | "encoding/binary" 5 | "testing" 6 | ) 7 | 8 | // These tests are intended to provide a rough guide 9 | // to the performance cost of writing an int64 to a 10 | // byte slice using unsafe. These benchmarks are very 11 | // micro and therefore do not include cache misses or 12 | // other confounding factors. But they are definitely 13 | // interesting. 14 | 15 | func BenchmarkComparisonWriteHeader(b *testing.B) { 16 | x := int64(0) 17 | for i := 0; i < b.N; i++ { 18 | x = int64(i) 19 | } 20 | _ = x 21 | } 22 | 23 | func BenchmarkComparisonReadHeader(b *testing.B) { 24 | x := int64(0) 25 | y := int64(0) 26 | for i := 0; i < b.N; i++ { 27 | y = x 28 | } 29 | _ = x 30 | _ = y 31 | } 32 | 33 | func BenchmarkWriteHeader(b *testing.B) { 34 | x := make([]byte, 8) 35 | for i := 0; i < b.N; i++ { 36 | writeHeader(x, 0, int64(i)) 37 | } 38 | } 39 | 40 | func BenchmarkReadHeader(b *testing.B) { 41 | y := int64(0) 42 | x := make([]byte, 8) 43 | writeHeader(x, 0, 64) 44 | for i := 0; i < b.N; i++ { 45 | y = readHeader(x, 0) 46 | } 47 | _ = y 48 | } 49 | 50 | func BenchmarkBinaryWriteHeader(b *testing.B) { 51 | x := make([]byte, 8) 52 | for i := 0; i < b.N; i++ { 53 | binary.PutVarint(x, int64(i)) 54 | } 55 | } 56 | 57 | func BenchmarkBinaryReadHeader(b *testing.B) { 58 | y := int64(0) 59 | x := make([]byte, 8) 60 | writeHeader(x, 0, 64) 61 | for i := 0; i < b.N; i++ { 62 | y, _ = binary.Varint(x) 63 | } 64 | _ = y 65 | } 66 | -------------------------------------------------------------------------------- /queues/spscq/bpointer_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package spscq 6 | 7 | import ( 8 | "runtime/debug" 9 | "testing" 10 | "unsafe" 11 | ) 12 | 13 | func BenchmarkStrict(b *testing.B) { 14 | println(b.N) 15 | debug.SetGCPercent(-1) 16 | q, _ := NewPointerQ(64*1024, 10*1000) 17 | done := make(chan int64) 18 | b.ResetTimer() 19 | go pqsDequeue(int64(b.N), q, done) 20 | go pqsEnqueue(int64(b.N), q, done) 21 | <-done 22 | <-done 23 | b.StopTimer() 24 | // Clear out all those rediculous pointers before we garbage collect 25 | for i := range q.ringBuffer { 26 | q.ringBuffer[i] = unsafe.Pointer(uintptr(0)) 27 | } 28 | debug.SetGCPercent(100) 29 | } 30 | 31 | func BenchmarkLazy(b *testing.B) { 32 | println(b.N) 33 | debug.SetGCPercent(-1) 34 | q, _ := NewPointerQ(64*1024, 10*1000) 35 | done := make(chan int64) 36 | b.ResetTimer() 37 | go pqsDequeueLazy(int64(b.N), q, done) 38 | go pqsEnqueueLazy(int64(b.N), q, done) 39 | <-done 40 | <-done 41 | b.StopTimer() 42 | // Clear out all those rediculous pointers before we garbage collect 43 | for i := range q.ringBuffer { 44 | q.ringBuffer[i] = unsafe.Pointer(uintptr(0)) 45 | } 46 | debug.SetGCPercent(100) 47 | } 48 | 49 | func pqsEnqueue(msgCount int64, q *PointerQ, done chan int64) { 50 | t := 1 51 | var v unsafe.Pointer 52 | for i := int64(0); i < msgCount; i++ { 53 | v = unsafe.Pointer(uintptr(uint64(t))) 54 | w := q.WriteSingle(v) 55 | for w == false { 56 | w = q.WriteSingle(v) 57 | } 58 | t++ 59 | } 60 | done <- -1 61 | } 62 | 63 | func pqsDequeue(msgCount int64, q *PointerQ, done chan int64) { 64 | sum := int64(0) 65 | var v unsafe.Pointer 66 | for i := int64(1); i <= msgCount; i++ { 67 | v = q.ReadSingle() 68 | for v == nil { 69 | v = q.ReadSingle() 70 | } 71 | pv := int64(uintptr(v)) 72 | sum += pv 73 | } 74 | done <- sum 75 | } 76 | 77 | func pqsEnqueueLazy(msgCount int64, q *PointerQ, done chan int64) { 78 | t := 1 79 | var v unsafe.Pointer 80 | for i := int64(0); i < msgCount; i++ { 81 | v = unsafe.Pointer(uintptr(uint64(t))) 82 | w := q.WriteSingleLazy(v) 83 | for w == false { 84 | w = q.WriteSingleLazy(v) 85 | } 86 | t++ 87 | } 88 | done <- -1 89 | } 90 | 91 | func pqsDequeueLazy(msgCount int64, q *PointerQ, done chan int64) { 92 | sum := int64(0) 93 | var v unsafe.Pointer 94 | for i := int64(1); i <= msgCount; i++ { 95 | v = q.ReadSingleLazy() 96 | for v == nil { 97 | v = q.ReadSingleLazy() 98 | } 99 | pv := int64(uintptr(v)) 100 | sum += pv 101 | } 102 | done <- sum 103 | } 104 | -------------------------------------------------------------------------------- /queues/spscq/bytechunkq.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package spscq 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "sync/atomic" 11 | 12 | "github.com/fmstephe/flib/fsync/fatomic" 13 | "github.com/fmstephe/flib/fsync/padded" 14 | "github.com/fmstephe/flib/ftime" 15 | ) 16 | 17 | type ByteChunkQueue interface { 18 | //Acquire/Release Read 19 | AcquireRead() []byte 20 | ReleaseRead() 21 | ReleaseReadLazy() 22 | //Acquire/Release Write 23 | AcquireWrite() []byte 24 | ReleaseWrite() 25 | ReleaseWriteLazy() 26 | } 27 | 28 | func NewByteChunkQueue(size, pause, chunk int64) (ByteChunkQueue, error) { 29 | return NewByteChunkQ(size, pause, chunk) 30 | } 31 | 32 | type ByteChunkQ struct { 33 | _prebuffer padded.CacheBuffer 34 | commonQ 35 | _midbuffer padded.CacheBuffer 36 | ringBuffer []byte 37 | chunk int64 38 | _postbuffer padded.CacheBuffer 39 | } 40 | 41 | func NewByteChunkQ(size, pause, chunk int64) (*ByteChunkQ, error) { 42 | if size%chunk != 0 { 43 | return nil, errors.New(fmt.Sprintf("Size must divide by chunk, (size) %d rem (chunk) %d = %d", size, chunk, size%chunk)) 44 | } 45 | ringBuffer := padded.ByteSlice(int(size)) 46 | cq, err := newCommonQ(size, pause) 47 | if err != nil { 48 | return nil, err // TODO is that the best error to return? 49 | } 50 | return &ByteChunkQ{ringBuffer: ringBuffer, commonQ: cq, chunk: chunk}, nil 51 | } 52 | 53 | func (q *ByteChunkQ) AcquireWrite() []byte { 54 | chunk := q.chunk 55 | write := q.write.Value 56 | writeTo := write + chunk 57 | readLimit := writeTo - q.size 58 | if readLimit > q.readCache.Value { 59 | q.readCache.Value = atomic.LoadInt64(&q.read.Value) 60 | if readLimit > q.readCache.Value { 61 | q.failedWrites.Value++ 62 | ftime.Pause(q.pause) 63 | return nil 64 | } 65 | } 66 | idx := write & q.mask 67 | nxt := idx + chunk 68 | return q.ringBuffer[idx:nxt] 69 | } 70 | 71 | func (q *ByteChunkQ) ReleaseWrite() { 72 | atomic.AddInt64(&q.write.Value, q.chunk) 73 | } 74 | 75 | func (q *ByteChunkQ) ReleaseWriteLazy() { 76 | fatomic.LazyStore(&q.write.Value, q.write.Value+q.chunk) 77 | } 78 | 79 | func (q *ByteChunkQ) AcquireRead() []byte { 80 | chunk := q.chunk 81 | read := q.read.Value 82 | readTo := read + chunk 83 | if readTo > q.writeCache.Value { 84 | q.writeCache.Value = atomic.LoadInt64(&q.write.Value) 85 | if readTo > q.writeCache.Value { 86 | q.failedReads.Value++ 87 | ftime.Pause(q.pause) 88 | return nil 89 | } 90 | } 91 | idx := read & q.mask 92 | nxt := idx + chunk 93 | return q.ringBuffer[idx:nxt] 94 | } 95 | 96 | func (q *ByteChunkQ) ReleaseRead() { 97 | atomic.AddInt64(&q.read.Value, q.chunk) 98 | } 99 | 100 | func (q *ByteChunkQ) ReleaseReadLazy() { 101 | fatomic.LazyStore(&q.read.Value, q.read.Value+q.chunk) 102 | } 103 | -------------------------------------------------------------------------------- /queues/spscq/bytemsgq.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package spscq 6 | 7 | import ( 8 | "sync/atomic" 9 | "unsafe" 10 | 11 | "github.com/fmstephe/flib/fsync/padded" 12 | "github.com/fmstephe/flib/ftime" 13 | ) 14 | 15 | const ( 16 | headerSize = 8 17 | ) 18 | 19 | type ByteMsgQueue interface { 20 | //Acquire/Release Read 21 | AcquireRead() []byte 22 | ReleaseRead() 23 | ReleaseReadLazy() 24 | //Acquire/Release Write 25 | AcquireWrite(int64) []byte 26 | ReleaseWrite() 27 | ReleaseWriteLazy() 28 | } 29 | 30 | func NewByteMsgQueue(size, pause int64) (ByteMsgQueue, error) { 31 | return NewByteMsgQ(size, pause) 32 | } 33 | 34 | type ByteMsgQ struct { 35 | _prebuffer padded.CacheBuffer 36 | commonQ 37 | _midbuffer padded.CacheBuffer 38 | ringBuffer []byte 39 | _postbuffer padded.CacheBuffer 40 | } 41 | 42 | func NewByteMsgQ(size, pause int64) (*ByteMsgQ, error) { 43 | // TODO there is an effective minimum queue size - should be enforced 44 | ringBuffer := padded.ByteSlice(int(size)) 45 | cq, err := newCommonQ(size, pause) 46 | if err != nil { 47 | return nil, err // TODO is that the best error to return? 48 | } 49 | return &ByteMsgQ{ringBuffer: ringBuffer, commonQ: cq}, nil 50 | } 51 | 52 | func (q *ByteMsgQ) AcquireWrite(bufferSize int64) []byte { 53 | totalSize := bufferSize + headerSize 54 | initFrom := q.write.Value & q.mask 55 | rem := q.size - initFrom 56 | if rem < totalSize { 57 | if rem >= headerSize { 58 | writeHeader(q.ringBuffer, initFrom, -rem) 59 | } 60 | atomic.AddInt64(&q.write.Value, rem) 61 | } 62 | from, to := q.msgWrite(totalSize) 63 | if from == to { 64 | return nil 65 | } 66 | writeHeader(q.ringBuffer, from, totalSize) 67 | return q.ringBuffer[from+headerSize : to] 68 | } 69 | 70 | func (q *ByteMsgQ) AcquireRead() []byte { 71 | rem := q.size - (q.read.Value & q.mask) 72 | if rem < headerSize { 73 | atomic.AddInt64(&q.read.Value, rem) 74 | } 75 | initFrom := q.read.Value & q.mask 76 | totalSize := readHeader(q.ringBuffer, initFrom) 77 | if totalSize < 0 { 78 | atomic.AddInt64(&q.read.Value, -totalSize) 79 | initFrom = q.read.Value & q.mask 80 | totalSize = readHeader(q.ringBuffer, initFrom) 81 | } 82 | from, to := q.msgRead(totalSize) 83 | if from == to { 84 | return nil 85 | } 86 | return q.ringBuffer[from+headerSize : to] 87 | } 88 | 89 | func (q *ByteMsgQ) msgWrite(bufferSize int64) (from int64, to int64) { 90 | writeTo := q.write.Value + bufferSize 91 | readLimit := writeTo - q.size 92 | if readLimit > q.readCache.Value { 93 | q.readCache.Value = atomic.LoadInt64(&q.read.Value) 94 | if readLimit > q.readCache.Value { 95 | q.failedWrites.Value++ 96 | ftime.Pause(q.pause) 97 | return 0, 0 98 | } 99 | } 100 | from = q.write.Value & q.mask 101 | to = from + bufferSize 102 | q.writeSize.Value = bufferSize 103 | return from, to 104 | } 105 | 106 | func (q *ByteMsgQ) msgRead(bufferSize int64) (from int64, to int64) { 107 | readTo := q.read.Value + bufferSize 108 | if readTo > q.writeCache.Value { 109 | q.writeCache.Value = atomic.LoadInt64(&q.write.Value) 110 | if readTo > q.writeCache.Value { 111 | q.failedReads.Value++ 112 | ftime.Pause(q.pause) 113 | return 0, 0 114 | } 115 | } 116 | from = q.read.Value & q.mask 117 | to = from + bufferSize 118 | q.readSize.Value = bufferSize 119 | return from, to 120 | } 121 | 122 | func writeHeader(buffer []byte, i, val int64) { 123 | *((*int64)(unsafe.Pointer(&buffer[i]))) = val 124 | } 125 | 126 | func readHeader(buffer []byte, i int64) int64 { 127 | return *((*int64)(unsafe.Pointer(&buffer[i]))) 128 | } 129 | -------------------------------------------------------------------------------- /queues/spscq/common.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package spscq 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "sync/atomic" 11 | 12 | "github.com/fmstephe/flib/fmath" 13 | "github.com/fmstephe/flib/fsync/fatomic" 14 | "github.com/fmstephe/flib/fsync/padded" 15 | ) 16 | 17 | const maxSize = 1 << 41 18 | 19 | type commonQ struct { 20 | // Readonly Fields 21 | size int64 22 | mask int64 23 | pause int64 24 | // Writer fields 25 | write padded.Int64 26 | writeSize padded.Int64 27 | failedWrites padded.Int64 28 | readCache padded.Int64 29 | // Reader fields 30 | read padded.Int64 31 | readSize padded.Int64 32 | failedReads padded.Int64 33 | writeCache padded.Int64 34 | } 35 | 36 | func newCommonQ(size, pause int64) (commonQ, error) { 37 | var cq commonQ 38 | if !fmath.PowerOfTwo(size) { 39 | return cq, errors.New(fmt.Sprintf("Size (%d) must be a power of two", size)) 40 | } 41 | if size > maxSize { 42 | return cq, errors.New(fmt.Sprintf("Size (%d) must be less than %d", size, maxSize)) 43 | } 44 | return commonQ{size: size, mask: size - 1, pause: pause}, nil 45 | } 46 | 47 | func (q *commonQ) ReleaseWrite() { 48 | atomic.AddInt64(&q.write.Value, q.writeSize.Value) 49 | q.writeSize.Value = 0 50 | } 51 | 52 | func (q *commonQ) ReleaseWriteLazy() { 53 | fatomic.LazyStore(&q.write.Value, q.write.Value+q.writeSize.Value) 54 | q.writeSize.Value = 0 55 | } 56 | 57 | func (q *commonQ) ReleaseRead() { 58 | atomic.AddInt64(&q.read.Value, q.readSize.Value) 59 | q.readSize.Value = 0 60 | } 61 | 62 | func (q *commonQ) ReleaseReadLazy() { 63 | fatomic.LazyStore(&q.read.Value, q.read.Value+q.readSize.Value) 64 | q.readSize.Value = 0 65 | } 66 | 67 | func (q *commonQ) FailedWrites() int64 { 68 | return atomic.LoadInt64(&q.failedWrites.Value) 69 | } 70 | 71 | func (q *commonQ) FailedReads() int64 { 72 | return atomic.LoadInt64(&q.failedReads.Value) 73 | } 74 | 75 | func (q *commonQ) String() string { 76 | size := q.size 77 | mask := q.mask 78 | write := q.write.Value 79 | writeSize := q.writeSize.Value 80 | failedWrites := q.failedWrites.Value 81 | readCache := q.readCache.Value 82 | read := q.read.Value 83 | readSize := q.readSize.Value 84 | failedReads := q.failedReads.Value 85 | writeCache := q.writeCache.Value 86 | return fmt.Sprintf("{Size %d, mask %d, write %d, writeSize %d, failedWrites %d, readCache %d, read %d, readSize %d, failedReads %d, writeCache %d}", size, mask, write, writeSize, failedWrites, readCache, read, readSize, failedReads, writeCache) 87 | } 88 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/.gitignore: -------------------------------------------------------------------------------- 1 | perf_spscq 2 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/bcqar.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | 13 | "github.com/fmstephe/flib/queues/spscq" 14 | ) 15 | 16 | func bcqarTest(msgCount, pause, msgSize, qSize int64, profile bool) { 17 | q, _ := spscq.NewByteChunkQ(qSize, pause, msgSize) 18 | done := make(chan bool) 19 | if profile { 20 | f, err := os.Create("prof_bcqar") 21 | if err != nil { 22 | panic(err.Error()) 23 | } 24 | pprof.StartCPUProfile(f) 25 | defer pprof.StopCPUProfile() 26 | } 27 | go bcqarDequeue(msgCount, q, done) 28 | go bcqarEnqueue(msgCount, q, done) 29 | <-done 30 | <-done 31 | } 32 | 33 | func bcqarEnqueue(msgCount int64, q *spscq.ByteChunkQ, done chan bool) { 34 | runtime.LockOSThread() 35 | for i := int64(0); i < msgCount; i++ { 36 | writeBuffer := q.AcquireWrite() 37 | for writeBuffer == nil { 38 | writeBuffer = q.AcquireWrite() 39 | } 40 | writeBuffer[0] = byte(i) 41 | q.ReleaseWrite() 42 | } 43 | done <- true 44 | } 45 | 46 | func bcqarDequeue(msgCount int64, q *spscq.ByteChunkQ, done chan bool) { 47 | runtime.LockOSThread() 48 | start := time.Now().UnixNano() 49 | sum := int64(0) 50 | checksum := int64(0) 51 | for i := int64(0); i < msgCount; i++ { 52 | readBuffer := q.AcquireRead() 53 | for readBuffer == nil { 54 | readBuffer = q.AcquireRead() 55 | } 56 | sum += int64(readBuffer[0]) 57 | checksum += int64(byte(i)) 58 | q.ReleaseRead() 59 | } 60 | nanos := time.Now().UnixNano() - start 61 | printSummary(msgCount, nanos, q.FailedWrites(), q.FailedReads(), "bcqar") 62 | expect(sum, checksum) 63 | done <- true 64 | } 65 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/bcqarl.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | 13 | "github.com/fmstephe/flib/queues/spscq" 14 | ) 15 | 16 | func bcqarlTest(msgCount, pause, msgSize, qSize int64, profile bool) { 17 | q, _ := spscq.NewByteChunkQ(qSize, pause, msgSize) 18 | done := make(chan bool) 19 | if profile { 20 | f, err := os.Create("prof_bcqarl") 21 | if err != nil { 22 | panic(err.Error()) 23 | } 24 | pprof.StartCPUProfile(f) 25 | defer pprof.StopCPUProfile() 26 | } 27 | go bcqarlDequeue(msgCount, q, done) 28 | go bcqarlEnqueue(msgCount, q, done) 29 | <-done 30 | <-done 31 | } 32 | 33 | func bcqarlEnqueue(msgCount int64, q *spscq.ByteChunkQ, done chan bool) { 34 | runtime.LockOSThread() 35 | for i := int64(0); i < msgCount; i++ { 36 | writeBuffer := q.AcquireWrite() 37 | for writeBuffer == nil { 38 | writeBuffer = q.AcquireWrite() 39 | } 40 | writeBuffer[0] = byte(i) 41 | q.ReleaseWriteLazy() 42 | } 43 | done <- true 44 | } 45 | 46 | func bcqarlDequeue(msgCount int64, q *spscq.ByteChunkQ, done chan bool) { 47 | runtime.LockOSThread() 48 | start := time.Now().UnixNano() 49 | sum := int64(0) 50 | checksum := int64(0) 51 | for i := int64(0); i < msgCount; i++ { 52 | readBuffer := q.AcquireRead() 53 | for readBuffer == nil { 54 | readBuffer = q.AcquireRead() 55 | } 56 | sum += int64(readBuffer[0]) 57 | checksum += int64(byte(i)) 58 | q.ReleaseReadLazy() 59 | } 60 | nanos := time.Now().UnixNano() - start 61 | printSummary(msgCount, nanos, q.FailedWrites(), q.FailedReads(), "bcqarl") 62 | expect(sum, checksum) 63 | done <- true 64 | } 65 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/bmqar.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | 13 | "github.com/fmstephe/flib/queues/spscq" 14 | ) 15 | 16 | func bmqarTest(msgCount, pause, msgSize, qSize int64, profile bool) { 17 | q, _ := spscq.NewByteMsgQ(qSize, pause) 18 | done := make(chan bool) 19 | if profile { 20 | f, err := os.Create("prof_bmqar") 21 | if err != nil { 22 | panic(err.Error()) 23 | } 24 | pprof.StartCPUProfile(f) 25 | defer pprof.StopCPUProfile() 26 | } 27 | go bmqarDequeue(msgCount, q, done) 28 | go bmqarEnqueue(msgCount, msgSize, q, done) 29 | <-done 30 | <-done 31 | } 32 | 33 | func bmqarEnqueue(msgCount, msgSize int64, q *spscq.ByteMsgQ, done chan bool) { 34 | runtime.LockOSThread() 35 | for i := int64(0); i < msgCount; i++ { 36 | writeBuffer := q.AcquireWrite(msgSize) 37 | for writeBuffer == nil { 38 | writeBuffer = q.AcquireWrite(msgSize) 39 | } 40 | writeBuffer[0] = byte(i) 41 | q.ReleaseWrite() 42 | } 43 | done <- true 44 | } 45 | 46 | func bmqarDequeue(msgCount int64, q *spscq.ByteMsgQ, done chan bool) { 47 | runtime.LockOSThread() 48 | start := time.Now().UnixNano() 49 | sum := int64(0) 50 | checksum := int64(0) 51 | for i := int64(0); i < msgCount; i++ { 52 | readBuffer := q.AcquireRead() 53 | for readBuffer == nil { 54 | readBuffer = q.AcquireRead() 55 | } 56 | sum += int64(readBuffer[0]) 57 | checksum += int64(byte(i)) 58 | q.ReleaseRead() 59 | } 60 | nanos := time.Now().UnixNano() - start 61 | printSummary(msgCount, nanos, q.FailedWrites(), q.FailedReads(), "bmqar") 62 | expect(sum, checksum) 63 | done <- true 64 | } 65 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/bmqarl.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | 13 | "github.com/fmstephe/flib/queues/spscq" 14 | ) 15 | 16 | func bmqarlTest(msgCount, pause, msgSize, qSize int64, profile bool) { 17 | q, _ := spscq.NewByteMsgQ(qSize, pause) 18 | done := make(chan bool) 19 | if profile { 20 | f, err := os.Create("prof_bmqarl") 21 | if err != nil { 22 | panic(err.Error()) 23 | } 24 | pprof.StartCPUProfile(f) 25 | defer pprof.StopCPUProfile() 26 | } 27 | go bmqarlDequeue(msgCount, q, done) 28 | go bmqarlEnqueue(msgCount, msgSize, q, done) 29 | <-done 30 | <-done 31 | } 32 | 33 | func bmqarlEnqueue(msgCount, msgSize int64, q *spscq.ByteMsgQ, done chan bool) { 34 | runtime.LockOSThread() 35 | for i := int64(0); i < msgCount; i++ { 36 | writeBuffer := q.AcquireWrite(msgSize) 37 | for writeBuffer == nil { 38 | writeBuffer = q.AcquireWrite(msgSize) 39 | } 40 | writeBuffer[0] = byte(i) 41 | q.ReleaseWriteLazy() 42 | } 43 | done <- true 44 | } 45 | 46 | func bmqarlDequeue(msgCount int64, q *spscq.ByteMsgQ, done chan bool) { 47 | runtime.LockOSThread() 48 | start := time.Now().UnixNano() 49 | sum := int64(0) 50 | checksum := int64(0) 51 | for i := int64(0); i < msgCount; i++ { 52 | readBuffer := q.AcquireRead() 53 | for readBuffer == nil { 54 | readBuffer = q.AcquireRead() 55 | } 56 | sum += int64(readBuffer[0]) 57 | checksum += int64(byte(i)) 58 | q.ReleaseReadLazy() 59 | } 60 | nanos := time.Now().UnixNano() - start 61 | printSummary(msgCount, nanos, q.FailedWrites(), q.FailedReads(), "bmqarl") 62 | expect(sum, checksum) 63 | done <- true 64 | } 65 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "flag" 9 | "fmt" 10 | "runtime" 11 | "runtime/debug" 12 | "unsafe" 13 | 14 | "github.com/fmstephe/flib/fstrconv" 15 | ) 16 | 17 | var ( 18 | all = flag.Bool("all", false, "Runs all queue tests") 19 | // ByteMsgQ 20 | bmqar = flag.Bool("bmqar", false, "Runs ByteMsgQ using Acquire/Release methods") 21 | bmqarl = flag.Bool("bmqarl", false, "Runs ByteMsgQ using lazy Acquire/Release methods") 22 | msgSize = flag.Int64("msgSize", 64, "The size of messages to read/write in ByteMsgQ tests") 23 | // ByteChunkQ 24 | bcqar = flag.Bool("bcqar", false, "Runs ByteChunkQ using Acquire/Release methods") 25 | bcqarl = flag.Bool("bcqarl", false, "Runs ByteChunkQ with lazy Acquire/Release methods") 26 | chunkSize = flag.Int64("chunkSize", 64, "The number of bytes to read/write in ByteChunkQ tests") 27 | // PointerQ 28 | pqar = flag.Bool("pqar", false, "Runs PointerQ using Acquire/Release methods") 29 | pqarl = flag.Bool("pqarl", false, "Runs PointerQ using lazy Acquire/Release methods") 30 | pqs = flag.Bool("pqs", false, "Runs PointerQ reading and writing a pointer at a time") 31 | pqsl = flag.Bool("pqsl", false, "Runs PointerQ lazily reading and writing a pointer at a time") 32 | batchSize = flag.Int64("batchSize", 64, "The size of the read/write batches used by PointerQ") 33 | // Addtional flags 34 | millionMsgs = flag.Int64("mm", 100, "The number of messages (in millions) to send") 35 | qSize = flag.Int64("qSize", 1024*1024, "The size of the queue's ring-buffer") 36 | pause = flag.Int64("pause", 20*1000, "The size of the pause when a read or write fails") 37 | profile = flag.Bool("profile", false, "Activates the Go profiler, outputting into a prof_* file.") 38 | ) 39 | 40 | func main() { 41 | runtime.GOMAXPROCS(4) 42 | flag.Parse() 43 | msgCount := (*millionMsgs) * 1e6 44 | debug.SetGCPercent(-1) 45 | if *bmqar || *all { 46 | bmqarTest(msgCount, *pause, *msgSize, *qSize, *profile) 47 | } 48 | runtime.GC() 49 | if *bmqarl || *all { 50 | bmqarlTest(msgCount, *pause, *msgSize, *qSize, *profile) 51 | } 52 | runtime.GC() 53 | if *bcqar || *all { 54 | bcqarTest(msgCount, *pause, *chunkSize, *qSize, *profile) 55 | } 56 | runtime.GC() 57 | if *bcqarl || *all { 58 | bcqarlTest(msgCount, *pause, *chunkSize, *qSize, *profile) 59 | } 60 | runtime.GC() 61 | if *pqar || *all { 62 | pqarTest(msgCount, *pause, *batchSize, *qSize, *profile) 63 | } 64 | runtime.GC() 65 | if *pqarl || *all { 66 | pqarlTest(msgCount, *pause, *batchSize, *qSize, *profile) 67 | } 68 | runtime.GC() 69 | if *pqs || *all { 70 | pqsTest(msgCount, *pause, *qSize, *profile) 71 | } 72 | runtime.GC() 73 | if *pqsl || *all { 74 | pqslTest(msgCount, *pause, *qSize, *profile) 75 | } 76 | } 77 | 78 | func printSummary(msgs, nanos, failedWrites, failedReads int64, name string) { 79 | sMsgs := fstrconv.ItoaComma(msgs) 80 | sNanos := fstrconv.ItoaComma(nanos) 81 | sFailedWrites := fstrconv.ItoaComma(failedWrites) 82 | sFailedReads := fstrconv.ItoaComma(failedReads) 83 | print(fmt.Sprintf("\n%s\nMsgs %s\nNanos %s\nfailedWrites %s\nfailedReads %s\n", name, sMsgs, sNanos, sFailedWrites, sFailedReads)) 84 | } 85 | 86 | func expect(sum, checksum int64) { 87 | if sum != checksum { 88 | print(fmt.Sprintf("Sum does not match checksum. sum = %d, checksum = %d\n", sum, checksum)) 89 | } 90 | } 91 | 92 | func getValidPointers(num int64) (ptrs []unsafe.Pointer, checksum int64) { 93 | ptrs = make([]unsafe.Pointer, num) 94 | for i := range ptrs { 95 | intVal := 0 96 | ptrs[i] = unsafe.Pointer(&intVal) 97 | checksum += int64(uintptr(ptrs[i])) 98 | } 99 | return ptrs, checksum 100 | } 101 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/pqar.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | "unsafe" 13 | 14 | "github.com/fmstephe/flib/queues/spscq" 15 | ) 16 | 17 | func pqarTest(msgCount, pause, batchSize, qSize int64, profile bool) { 18 | ptrs, checksum := getValidPointers(msgCount) 19 | q, _ := spscq.NewPointerQ(qSize, pause) 20 | done := make(chan bool) 21 | if profile { 22 | f, err := os.Create("prof_pqar") 23 | if err != nil { 24 | panic(err.Error()) 25 | } 26 | pprof.StartCPUProfile(f) 27 | defer pprof.StopCPUProfile() 28 | } 29 | go pqarDequeue(msgCount, q, batchSize, checksum, done) 30 | go pqarEnqueue(msgCount, q, batchSize, ptrs, done) 31 | <-done 32 | <-done 33 | } 34 | 35 | func pqarEnqueue(msgCount int64, q *spscq.PointerQ, batchSize int64, ptrs []unsafe.Pointer, done chan bool) { 36 | runtime.LockOSThread() 37 | for t := int64(0); t < msgCount; { 38 | if batchSize > msgCount-t { 39 | batchSize = msgCount - t 40 | } 41 | buffer := q.AcquireWrite(batchSize) 42 | // NB: It cuts ~40% of run time to use copy 43 | copy(buffer, ptrs[t:t+int64(len(buffer))]) 44 | /* 45 | for i := range buffer { 46 | buffer[i] = ptrs[t+int64(i)] 47 | } 48 | */ 49 | q.ReleaseWrite() 50 | t += int64(len(buffer)) 51 | } 52 | done <- true 53 | } 54 | 55 | func pqarDequeue(msgCount int64, q *spscq.PointerQ, batchSize int64, checksum int64, done chan bool) { 56 | runtime.LockOSThread() 57 | start := time.Now().UnixNano() 58 | sum := int64(0) 59 | for t := int64(0); t < msgCount; { 60 | buffer := q.AcquireRead(batchSize) 61 | for i := range buffer { 62 | sum += int64(uintptr(buffer[i])) 63 | } 64 | q.ReleaseRead() 65 | t += int64(len(buffer)) 66 | } 67 | nanos := time.Now().UnixNano() - start 68 | printSummary(msgCount, nanos, q.FailedWrites(), q.FailedReads(), "pqar") 69 | expect(sum, checksum) 70 | done <- true 71 | } 72 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/pqarl.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | "unsafe" 13 | 14 | "github.com/fmstephe/flib/queues/spscq" 15 | ) 16 | 17 | func pqarlTest(msgCount, pause, batchSize, qSize int64, profile bool) { 18 | ptrs, checksum := getValidPointers(msgCount) 19 | q, _ := spscq.NewPointerQ(qSize, pause) 20 | done := make(chan bool) 21 | if profile { 22 | f, err := os.Create("prof_pqarl") 23 | if err != nil { 24 | panic(err.Error()) 25 | } 26 | pprof.StartCPUProfile(f) 27 | defer pprof.StopCPUProfile() 28 | } 29 | go pqarlDequeue(msgCount, q, batchSize, checksum, done) 30 | go pqarlEnqueue(msgCount, q, batchSize, ptrs, done) 31 | <-done 32 | <-done 33 | } 34 | 35 | func pqarlEnqueue(msgCount int64, q *spscq.PointerQ, batchSize int64, ptrs []unsafe.Pointer, done chan bool) { 36 | runtime.LockOSThread() 37 | for t := int64(0); t < msgCount; { 38 | if batchSize > msgCount-t { 39 | batchSize = msgCount - t 40 | } 41 | buffer := q.AcquireWrite(batchSize) 42 | // NB: It cuts ~40% of run time to use copy 43 | copy(buffer, ptrs[t:t+int64(len(buffer))]) 44 | /* 45 | for i := range buffer { 46 | buffer[i] = ptrs[t+int64(i)] 47 | } 48 | */ 49 | q.ReleaseWriteLazy() 50 | t += int64(len(buffer)) 51 | } 52 | done <- true 53 | } 54 | 55 | func pqarlDequeue(msgCount int64, q *spscq.PointerQ, batchSize int64, checksum int64, done chan bool) { 56 | runtime.LockOSThread() 57 | start := time.Now().UnixNano() 58 | sum := int64(0) 59 | for t := int64(0); t < msgCount; { 60 | buffer := q.AcquireRead(batchSize) 61 | for i := range buffer { 62 | sum += int64(uintptr(buffer[i])) 63 | } 64 | q.ReleaseReadLazy() 65 | t += int64(len(buffer)) 66 | } 67 | nanos := time.Now().UnixNano() - start 68 | printSummary(msgCount, nanos, q.FailedWrites(), q.FailedReads(), "pqarl") 69 | expect(sum, checksum) 70 | done <- true 71 | } 72 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/pqs.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | "unsafe" 13 | 14 | "github.com/fmstephe/flib/queues/spscq" 15 | ) 16 | 17 | func pqsTest(msgCount, pause, qSize int64, profile bool) { 18 | ptrs, checksum := getValidPointers(msgCount) 19 | q, _ := spscq.NewPointerQ(qSize, pause) 20 | done := make(chan bool) 21 | if profile { 22 | f, err := os.Create("prof_pqs") 23 | if err != nil { 24 | panic(err.Error()) 25 | } 26 | pprof.StartCPUProfile(f) 27 | defer pprof.StopCPUProfile() 28 | } 29 | go pqsDequeue(msgCount, q, checksum, done) 30 | go pqsEnqueue(msgCount, q, ptrs, done) 31 | <-done 32 | <-done 33 | } 34 | 35 | func pqsEnqueue(msgCount int64, q *spscq.PointerQ, ptrs []unsafe.Pointer, done chan bool) { 36 | runtime.LockOSThread() 37 | t := 1 38 | for _, ptr := range ptrs { 39 | w := q.WriteSingle(ptr) 40 | for w == false { 41 | w = q.WriteSingle(ptr) 42 | } 43 | t++ 44 | } 45 | done <- true 46 | } 47 | 48 | func pqsDequeue(msgCount int64, q *spscq.PointerQ, checksum int64, done chan bool) { 49 | runtime.LockOSThread() 50 | start := time.Now().UnixNano() 51 | sum := int64(0) 52 | var v unsafe.Pointer 53 | for i := int64(1); i <= msgCount; i++ { 54 | v = q.ReadSingle() 55 | for v == nil { 56 | v = q.ReadSingle() 57 | } 58 | sum += int64(uintptr(v)) 59 | } 60 | nanos := time.Now().UnixNano() - start 61 | printSummary(msgCount, nanos, q.FailedWrites(), q.FailedReads(), "pqs") 62 | expect(sum, checksum) 63 | done <- true 64 | } 65 | -------------------------------------------------------------------------------- /queues/spscq/perf_spscq/pqsl.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package main 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | "unsafe" 13 | 14 | "github.com/fmstephe/flib/queues/spscq" 15 | ) 16 | 17 | func pqslTest(msgCount, pause, qSize int64, profile bool) { 18 | ptrs, checksum := getValidPointers(msgCount) 19 | q, _ := spscq.NewPointerQ(qSize, pause) 20 | done := make(chan bool) 21 | if profile { 22 | f, err := os.Create("prof_pqsl") 23 | if err != nil { 24 | panic(err.Error()) 25 | } 26 | pprof.StartCPUProfile(f) 27 | defer pprof.StopCPUProfile() 28 | } 29 | go pqslDequeue(msgCount, q, checksum, done) 30 | go pqslEnqueue(msgCount, q, ptrs, done) 31 | <-done 32 | <-done 33 | } 34 | 35 | func pqslEnqueue(msgCount int64, q *spscq.PointerQ, ptrs []unsafe.Pointer, done chan bool) { 36 | runtime.LockOSThread() 37 | t := 1 38 | for _, ptr := range ptrs { 39 | w := q.WriteSingleLazy(ptr) 40 | for w == false { 41 | w = q.WriteSingleLazy(ptr) 42 | } 43 | t++ 44 | } 45 | done <- true 46 | } 47 | 48 | func pqslDequeue(msgCount int64, q *spscq.PointerQ, checksum int64, done chan bool) { 49 | runtime.LockOSThread() 50 | start := time.Now().UnixNano() 51 | sum := int64(0) 52 | for i := int64(0); i < msgCount; i++ { 53 | v := q.ReadSingleLazy() 54 | for v == nil { 55 | v = q.ReadSingleLazy() 56 | } 57 | sum += int64(uintptr(v)) 58 | } 59 | nanos := time.Now().UnixNano() - start 60 | printSummary(msgCount, nanos, q.FailedWrites(), q.FailedReads(), "pqsl") 61 | expect(sum, checksum) 62 | done <- true 63 | } 64 | -------------------------------------------------------------------------------- /queues/spscq/pointerq.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package spscq 6 | 7 | import ( 8 | "sync/atomic" 9 | "unsafe" 10 | 11 | "github.com/fmstephe/flib/fmath" 12 | "github.com/fmstephe/flib/fsync/fatomic" 13 | "github.com/fmstephe/flib/fsync/padded" 14 | "github.com/fmstephe/flib/ftime" 15 | ) 16 | 17 | type PointerQueue interface { 18 | // Batch Read/Write 19 | AcquireRead(int64) []unsafe.Pointer 20 | ReleaseRead() 21 | ReleaseReadLazy() 22 | AcquireWrite(int64) []unsafe.Pointer 23 | ReleaseWrite() 24 | ReleaseWriteLazy() 25 | // Single Read/Write 26 | ReadSingle() unsafe.Pointer 27 | WriteSingle(unsafe.Pointer) bool 28 | ReadSingleBlocking() unsafe.Pointer 29 | WriteSingleBlocking(unsafe.Pointer) 30 | ReadSingleLazy() unsafe.Pointer 31 | WriteSingleLazy(unsafe.Pointer) bool 32 | } 33 | 34 | func NewPointerQueue(size, pause int64) (PointerQueue, error) { 35 | return NewPointerQ(size, pause) 36 | } 37 | 38 | type PointerQ struct { 39 | _prebuffer padded.CacheBuffer 40 | commonQ 41 | _midbuffer padded.CacheBuffer 42 | ringBuffer []unsafe.Pointer 43 | _postbuffer padded.CacheBuffer 44 | } 45 | 46 | func NewPointerQ(size, pause int64) (*PointerQ, error) { 47 | cq, err := newCommonQ(size, pause) 48 | if err != nil { 49 | return nil, err 50 | } 51 | ringBuffer := padded.PointerSlice(int(size)) 52 | return &PointerQ{ringBuffer: ringBuffer, commonQ: cq}, nil 53 | } 54 | 55 | func (q *PointerQ) AcquireRead(bufferSize int64) []unsafe.Pointer { 56 | readTo := q.read.Value + bufferSize 57 | if readTo > q.writeCache.Value { 58 | q.writeCache.Value = atomic.LoadInt64(&q.write.Value) 59 | if readTo > q.writeCache.Value { 60 | bufferSize = q.writeCache.Value - q.read.Value 61 | if bufferSize == 0 { 62 | q.failedReads.Value++ 63 | ftime.Pause(q.pause) 64 | return nil 65 | } 66 | } 67 | } 68 | from := q.read.Value & q.mask 69 | to := fmath.Min(from+bufferSize, q.size) 70 | q.readSize.Value = to - from 71 | return q.ringBuffer[from:to] 72 | } 73 | 74 | func (q *PointerQ) ReleaseRead() { 75 | from := q.read.Value & q.mask 76 | to := from + q.readSize.Value 77 | for i := from; i < to; i++ { 78 | q.ringBuffer[i] = nil 79 | } 80 | atomic.AddInt64(&q.read.Value, q.readSize.Value) 81 | q.readSize.Value = 0 82 | } 83 | 84 | func (q *PointerQ) ReleaseReadLazy() { 85 | from := q.read.Value & q.mask 86 | to := from + q.readSize.Value 87 | for i := from; i < to; i++ { 88 | q.ringBuffer[i] = nil 89 | } 90 | fatomic.LazyStore(&q.read.Value, q.read.Value+q.readSize.Value) 91 | q.readSize.Value = 0 92 | } 93 | 94 | func (q *PointerQ) AcquireWrite(bufferSize int64) []unsafe.Pointer { 95 | writeTo := q.write.Value + bufferSize 96 | readLimit := writeTo - q.size 97 | if readLimit > q.readCache.Value { 98 | q.readCache.Value = atomic.LoadInt64(&q.read.Value) 99 | if readLimit > q.readCache.Value { 100 | q.failedWrites.Value++ 101 | ftime.Pause(q.pause) 102 | return nil 103 | } 104 | } 105 | from := q.write.Value & q.mask 106 | to := fmath.Min(from+bufferSize, q.size) 107 | q.writeSize.Value = to - from 108 | return q.ringBuffer[from:to] 109 | } 110 | 111 | func (q *PointerQ) ReleaseWrite() { 112 | atomic.AddInt64(&q.write.Value, q.writeSize.Value) 113 | q.writeSize.Value = 0 114 | } 115 | 116 | func (q *PointerQ) ReleaseWriteLazy() { 117 | fatomic.LazyStore(&q.write.Value, q.write.Value+q.writeSize.Value) 118 | q.writeSize.Value = 0 119 | } 120 | 121 | func (q *PointerQ) WriteSingle(val unsafe.Pointer) bool { 122 | b := q.writeSingle(val) 123 | if b { 124 | atomic.AddInt64(&q.write.Value, 1) 125 | } 126 | return b 127 | } 128 | 129 | func (q *PointerQ) WriteSingleBlocking(val unsafe.Pointer) { 130 | b := q.WriteSingle(val) 131 | for !b { 132 | b = q.WriteSingle(val) 133 | } 134 | } 135 | 136 | func (q *PointerQ) WriteSingleLazy(val unsafe.Pointer) bool { 137 | b := q.writeSingle(val) 138 | if b { 139 | fatomic.LazyStore(&q.write.Value, q.write.Value+1) 140 | } 141 | return b 142 | } 143 | 144 | func (q *PointerQ) writeSingle(val unsafe.Pointer) bool { 145 | write := q.write.Value 146 | readLimit := write - q.size 147 | if readLimit == q.readCache.Value { 148 | q.readCache.Value = atomic.LoadInt64(&q.read.Value) 149 | if readLimit == q.readCache.Value { 150 | q.failedWrites.Value++ 151 | ftime.Pause(q.pause) 152 | return false 153 | } 154 | } 155 | q.ringBuffer[write&q.mask] = val 156 | return true 157 | } 158 | 159 | func (q *PointerQ) ReadSingle() unsafe.Pointer { 160 | val := q.readSingle() 161 | if val != nil { 162 | atomic.AddInt64(&q.read.Value, 1) 163 | } 164 | return val 165 | } 166 | 167 | func (q *PointerQ) ReadSingleBlocking() unsafe.Pointer { 168 | val := q.ReadSingle() 169 | for val == nil { 170 | val = q.ReadSingle() 171 | } 172 | return val 173 | } 174 | 175 | func (q *PointerQ) ReadSingleLazy() unsafe.Pointer { 176 | val := q.readSingle() 177 | if val != nil { 178 | fatomic.LazyStore(&q.read.Value, q.read.Value+1) 179 | } 180 | return val 181 | } 182 | 183 | func (q *PointerQ) readSingle() unsafe.Pointer { 184 | read := q.read.Value 185 | if read == q.writeCache.Value { 186 | q.writeCache.Value = atomic.LoadInt64(&q.write.Value) 187 | if read == q.writeCache.Value { 188 | q.failedReads.Value++ 189 | ftime.Pause(q.pause) 190 | return nil 191 | } 192 | } 193 | val := q.ringBuffer[read&q.mask] 194 | q.ringBuffer[read&q.mask] = nil 195 | return val 196 | } 197 | -------------------------------------------------------------------------------- /queues/spscq/tcommon_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Francis Stephens. All rights reserved. 2 | // Use of this source code is governed by a BSD 3 | // license which can be found in LICENSE.txt 4 | 5 | package spscq 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "testing" 11 | 12 | "github.com/fmstephe/flib/fmath" 13 | ) 14 | 15 | // Test that we can call newCommonQ(...) for every power of 2 in an int64 16 | func TestNewCommonQPowerOf2(t *testing.T) { 17 | for size := int64(1); size <= maxSize; size *= 2 { 18 | _, err := newCommonQ(size, 0) 19 | if err != nil { 20 | t.Errorf("Error found for size %d", size) 21 | } 22 | } 23 | } 24 | 25 | // Test that we can't call newCommonQ(...) with a non-power of 2 size 26 | func TestNewCommonQNotPowerOf2(t *testing.T) { 27 | for size := int64(1); size < 10*1000; size++ { 28 | if !fmath.PowerOfTwo(size) { 29 | makeBadQ(size, t) 30 | } 31 | } 32 | } 33 | 34 | func makeBadQ(size int64, t *testing.T) { 35 | _, err := newCommonQ(size, 0) 36 | if err == nil { 37 | t.Errorf("No error detected for size %d", size) 38 | } 39 | } 40 | 41 | func copyForRead(cq *commonQ) *commonQ { 42 | snap := &commonQ{} 43 | // immutable 44 | snap.size = cq.size 45 | snap.mask = cq.mask 46 | // write 47 | snap.write.Value = -1 48 | snap.writeSize.Value = -1 49 | snap.failedWrites.Value = -1 50 | snap.readCache.Value = -1 51 | // read 52 | snap.read.Value = cq.read.Value 53 | snap.readSize.Value = cq.readSize.Value 54 | snap.failedReads.Value = cq.failedReads.Value 55 | snap.writeCache.Value = cq.writeCache.Value 56 | return snap 57 | } 58 | 59 | func copyForWrite(cq *commonQ) *commonQ { 60 | snap := &commonQ{} 61 | // immutable 62 | snap.size = cq.size 63 | snap.mask = cq.mask 64 | // write 65 | snap.write.Value = cq.write.Value 66 | snap.writeSize.Value = cq.writeSize.Value 67 | snap.failedWrites.Value = cq.failedWrites.Value 68 | snap.readCache.Value = cq.readCache.Value 69 | // read 70 | snap.read.Value = -1 71 | snap.readSize.Value = -1 72 | snap.failedReads.Value = -1 73 | snap.writeCache.Value = -1 74 | return snap 75 | } 76 | 77 | func testAcquireWrite(writeBufferSize, from, to int64, cq, snap *commonQ) error { 78 | actualWriteSize := to - from 79 | if actualWriteSize == 0 && cq.failedWrites.Value != snap.failedWrites.Value+1 { 80 | return errors.New(fmt.Sprintf("failedWrites not incremented. Expected %d, found %d", snap.failedWrites.Value+1, cq.failedWrites.Value)) 81 | } 82 | if actualWriteSize > writeBufferSize { 83 | return errors.New(fmt.Sprintf("Actual write size (%d) larger than requested buffer size (%d)", actualWriteSize, writeBufferSize)) 84 | } 85 | if (actualWriteSize < writeBufferSize) && (cq.write.Value+actualWriteSize) != (cq.readCache.Value+cq.size) { 86 | if (cq.write.Value+cq.writeSize.Value)%cq.size != 0 { 87 | return errors.New(fmt.Sprintf("Actual write size (%d) could have been bigger.\nsnap %s\ncq %s", actualWriteSize, snap.String(), cq.String())) 88 | } 89 | } 90 | if (cq.write.Value + actualWriteSize) > (cq.readCache.Value + cq.size) { 91 | return errors.New(fmt.Sprintf("Actual write size (%d) overwrites unread data.\ncq %s", actualWriteSize, cq.String())) 92 | } 93 | if cq.writeSize.Value != actualWriteSize { 94 | return errors.New(fmt.Sprintf("cq.writeSize (%d) does not equal actual write size (%d)", cq.writeSize.Value, actualWriteSize)) 95 | } 96 | if from > to { 97 | return errors.New(fmt.Sprintf("from (%d) is greater than to (%d)", from, to)) 98 | } 99 | if from >= cq.size || from < 0 { 100 | return errors.New(fmt.Sprintf("from (%d) must be a valid index for an array of size %d", from, cq.size)) 101 | } 102 | if to > cq.size || to < 0 { 103 | return errors.New(fmt.Sprintf("to (%d) must be a valid index for an array of size %d", to, cq.size)) 104 | } 105 | return nil 106 | } 107 | 108 | func testReleaseWrite(cq, snap *commonQ) error { 109 | if cq.writeSize.Value != 0 { 110 | return errors.New(fmt.Sprintf("cq.writeSize was not reset to 0, %d found instead", cq.writeSize.Value)) 111 | } 112 | if cq.write.Value != snap.write.Value+snap.writeSize.Value { 113 | return errors.New(fmt.Sprintf("write has not been advanced by the correct amount.\nsnap %s\ncq %s", snap.String(), cq.String())) 114 | } 115 | return nil 116 | } 117 | 118 | func testAcquireRead(readBufferSize, from, to int64, cq, snap *commonQ) error { 119 | actualReadSize := to - from 120 | if actualReadSize == 0 && cq.failedReads.Value != snap.failedReads.Value+1 { 121 | return errors.New(fmt.Sprintf("failedReads not incremented. Expected %d, found %d", snap.failedReads.Value+1, cq.failedReads.Value)) 122 | } 123 | if actualReadSize > readBufferSize { 124 | return errors.New(fmt.Sprintf("Actual read size (%d) larger than requested buffer size (%d)", actualReadSize, readBufferSize)) 125 | } 126 | if (actualReadSize < readBufferSize) && (cq.read.Value+actualReadSize) != (cq.writeCache.Value) { 127 | if (cq.read.Value+cq.readSize.Value)%cq.size != 0 { 128 | return errors.New(fmt.Sprintf("Actual read size (%d) could have been bigger.\nsnap %s\ncq %s", actualReadSize, snap.String(), cq.String())) 129 | } 130 | } 131 | if (cq.read.Value + actualReadSize) > cq.writeCache.Value { 132 | return errors.New(fmt.Sprintf("Actual read size (%d) reads past write position (%d).\ncq %s", actualReadSize, cq.write.Value, cq.String())) 133 | } 134 | if cq.readSize.Value != actualReadSize { 135 | 136 | return errors.New(fmt.Sprintf("cq.readSize (%d) does not equal actual read size (%d)", cq.readSize.Value, actualReadSize)) 137 | } 138 | if from > to { 139 | return errors.New(fmt.Sprintf("from (%d) is greater than to (%d)", from, to)) 140 | } 141 | if from >= cq.size || from < 0 { 142 | return errors.New(fmt.Sprintf("from (%d) must be a valid index for an array of size %d", from, cq.size)) 143 | } 144 | if to > cq.size || to < 0 { 145 | return errors.New(fmt.Sprintf("to (%d) must be a valid index for an array of size %d", to, cq.size)) 146 | } 147 | return nil 148 | } 149 | 150 | func testReleaseRead(cq, snap *commonQ) error { 151 | if cq.readSize.Value != 0 { 152 | return errors.New(fmt.Sprintf("cq.readSize was not reset to 0, %d found instead", cq.readSize.Value)) 153 | } 154 | if cq.read.Value != snap.read.Value+snap.readSize.Value { 155 | return errors.New(fmt.Sprintf("read has not been advanced by the correct amount.\nsnap %s\ncq %s", snap.String(), cq.String())) 156 | } 157 | return nil 158 | } 159 | 160 | func TestEvenWriteRead(t *testing.T) { 161 | for i := uint(0); i <= 41; i += 4 { 162 | size := int64(1 << i) 163 | bufferSize := fmath.Max(size/128, 1) 164 | testSequentialReadWrites(t, size, bufferSize, bufferSize, 512) 165 | } 166 | } 167 | 168 | func TestLightWriteHeavyRead(t *testing.T) { 169 | for i := uint(0); i <= 41; i += 4 { 170 | size := int64(1 << i) 171 | bufferSize := fmath.Max(size/128, 1) 172 | testSequentialReadWrites(t, size, bufferSize, bufferSize*2, 512) 173 | } 174 | } 175 | 176 | func TestHeavyWriteLightRead(t *testing.T) { 177 | for i := uint(0); i <= 41; i += 4 { 178 | size := int64(1 << i) 179 | bufferSize := fmath.Max(size/128, 1) 180 | testSequentialReadWrites(t, size, bufferSize*2, bufferSize, 512) 181 | } 182 | } 183 | 184 | func testSequentialReadWrites(t *testing.T, size int64, writeSize, readSize, iterations int64) { 185 | cqs, err := newCommonQ(size, 0) 186 | if err != nil { 187 | t.Error(err.Error()) 188 | return 189 | } 190 | cq := &cqs 191 | for j := int64(0); j < iterations; j++ { 192 | // write 193 | snap := copyForWrite(cq) 194 | wfrom, wto := cq.acquireWrite(writeSize) 195 | if err := testAcquireWrite(writeSize, wfrom, wto, cq, snap); err != nil { 196 | t.Error(err.Error()) 197 | return 198 | } 199 | snap = copyForWrite(cq) 200 | cq.ReleaseWrite() 201 | if err := testReleaseWrite(cq, snap); err != nil { 202 | t.Error(err.Error()) 203 | return 204 | } 205 | // read 206 | snap = copyForRead(cq) 207 | rfrom, rto := cq.acquireRead(readSize) 208 | if err := testAcquireRead(readSize, rfrom, rto, cq, snap); err != nil { 209 | t.Error(err.Error()) 210 | return 211 | } 212 | snap = copyForRead(cq) 213 | cq.ReleaseRead() 214 | if err := testReleaseRead(cq, snap); err != nil { 215 | t.Error(err.Error()) 216 | return 217 | } 218 | } 219 | } 220 | 221 | func TestEvenWriteReadConc(t *testing.T) { 222 | for i := uint(0); i <= 41; i += 4 { 223 | size := int64(1 << i) 224 | bufferSize := fmath.Max(size/128, 1) 225 | testConcurrentReadWrites(t, size, bufferSize, bufferSize, 512) 226 | } 227 | } 228 | 229 | func TestLightWriteHeavyReadConc(t *testing.T) { 230 | for i := uint(0); i <= 41; i += 4 { 231 | size := int64(1 << i) 232 | bufferSize := fmath.Max(size/128, 1) 233 | testConcurrentReadWrites(t, size, bufferSize, bufferSize*2, 512) 234 | } 235 | } 236 | 237 | func TestHeavyWriteLightReadConc(t *testing.T) { 238 | for i := uint(0); i <= 41; i += 4 { 239 | size := int64(1 << i) 240 | bufferSize := fmath.Max(size/128, 1) 241 | testConcurrentReadWrites(t, size, bufferSize*2, bufferSize, 512) 242 | } 243 | } 244 | 245 | func testConcurrentReadWrites(t *testing.T, size int64, writeSize, readSize, iterations int64) { 246 | cqs, err := newCommonQ(size, 0) 247 | if err != nil { 248 | t.Error(err.Error()) 249 | return 250 | } 251 | end := make(chan bool, 2) 252 | go func(cq *commonQ) { 253 | // write 254 | defer func() { 255 | end <- true 256 | }() 257 | for i := int64(0); i < iterations*writeSize; { 258 | snap := copyForWrite(cq) 259 | wfrom, wto := cq.acquireWrite(writeSize) 260 | if err := testAcquireWrite(writeSize, wfrom, wto, cq, snap); err != nil { 261 | t.Error(err.Error()) 262 | return 263 | } 264 | snap = copyForWrite(cq) 265 | cq.ReleaseWrite() 266 | if err := testReleaseWrite(cq, snap); err != nil { 267 | t.Error(err.Error()) 268 | return 269 | } 270 | i += (wto - wfrom) 271 | } 272 | }(&cqs) 273 | go func(cq *commonQ) { 274 | // read 275 | defer func() { 276 | end <- true 277 | }() 278 | for i := int64(0); i < iterations*writeSize; { 279 | snap := copyForRead(cq) 280 | rfrom, rto := cq.acquireRead(readSize) 281 | if err := testAcquireRead(readSize, rfrom, rto, cq, snap); err != nil { 282 | t.Error(err.Error()) 283 | return 284 | } 285 | snap = copyForRead(cq) 286 | cq.ReleaseRead() 287 | if err := testReleaseRead(cq, snap); err != nil { 288 | t.Error(err.Error()) 289 | return 290 | } 291 | i += (rto - rfrom) 292 | } 293 | }(&cqs) 294 | <-end 295 | <-end 296 | } 297 | --------------------------------------------------------------------------------