├── txn
├── output.txt
├── tarjan_test.go
├── chaos.go
├── tarjan.go
├── debug.go
├── dockey_test.go
├── sim_test.go
├── txn.go
├── txn_test.go
└── flusher.go
├── syscall_windows_test.go
├── syscall_test.go
├── README.md
└── suite_test.go
/txn/output.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/syscall_windows_test.go:
--------------------------------------------------------------------------------
1 | package mgo_test
2 |
3 | func stop(pid int) (err error) {
4 | panicOnWindows() // Always does.
5 | return nil
6 | }
7 |
8 | func cont(pid int) (err error) {
9 | panicOnWindows() // Always does.
10 | return nil
11 | }
12 |
--------------------------------------------------------------------------------
/syscall_test.go:
--------------------------------------------------------------------------------
1 | // +build !windows
2 |
3 | package mgo_test
4 |
5 | import (
6 | "syscall"
7 | )
8 |
9 | func stop(pid int) (err error) {
10 | return syscall.Kill(pid, syscall.SIGSTOP)
11 | }
12 |
13 | func cont(pid int) (err error) {
14 | return syscall.Kill(pid, syscall.SIGCONT)
15 | }
16 |
--------------------------------------------------------------------------------
/txn/tarjan_test.go:
--------------------------------------------------------------------------------
1 | package txn
2 |
3 | import (
4 | "fmt"
5 | "gopkg.in/mgo.v2/bson"
6 | . "gopkg.in/check.v1"
7 | )
8 |
9 | type TarjanSuite struct{}
10 |
11 | var _ = Suite(TarjanSuite{})
12 |
13 | func bid(n int) bson.ObjectId {
14 | return bson.ObjectId(fmt.Sprintf("%024d", n))
15 | }
16 |
17 | func bids(ns ...int) (ids []bson.ObjectId) {
18 | for _, n := range ns {
19 | ids = append(ids, bid(n))
20 | }
21 | return
22 | }
23 |
24 | func (TarjanSuite) TestExample(c *C) {
25 | successors := map[bson.ObjectId][]bson.ObjectId{
26 | bid(1): bids(2, 3),
27 | bid(2): bids(1, 5),
28 | bid(3): bids(4),
29 | bid(4): bids(3, 5),
30 | bid(5): bids(6),
31 | bid(6): bids(7),
32 | bid(7): bids(8),
33 | bid(8): bids(6, 9),
34 | bid(9): bids(),
35 | }
36 |
37 | c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{
38 | bids(9),
39 | bids(6, 7, 8),
40 | bids(5),
41 | bids(3, 4),
42 | bids(1, 2),
43 | })
44 | }
45 |
--------------------------------------------------------------------------------
/txn/chaos.go:
--------------------------------------------------------------------------------
1 | package txn
2 |
3 | import (
4 | mrand "math/rand"
5 | "time"
6 | )
7 |
8 | var chaosEnabled = false
9 | var chaosSetting Chaos
10 |
11 | // Chaos holds parameters for the failure injection mechanism.
12 | type Chaos struct {
13 | // KillChance is the 0.0 to 1.0 chance that a given checkpoint
14 | // within the algorithm will raise an interruption that will
15 | // stop the procedure.
16 | KillChance float64
17 |
18 | // SlowdownChance is the 0.0 to 1.0 chance that a given checkpoint
19 | // within the algorithm will be delayed by Slowdown before
20 | // continuing.
21 | SlowdownChance float64
22 | Slowdown time.Duration
23 |
24 | // If Breakpoint is set, the above settings will only affect the
25 | // named breakpoint.
26 | Breakpoint string
27 | }
28 |
29 | // SetChaos sets the failure injection parameters to c.
30 | func SetChaos(c Chaos) {
31 | chaosSetting = c
32 | chaosEnabled = c.KillChance > 0 || c.SlowdownChance > 0
33 | }
34 |
35 | func chaos(bpname string) {
36 | if !chaosEnabled {
37 | return
38 | }
39 | switch chaosSetting.Breakpoint {
40 | case "", bpname:
41 | kc := chaosSetting.KillChance
42 | if kc > 0 && mrand.Intn(1000) < int(kc*1000) {
43 | panic(chaosError{})
44 | }
45 | if bpname == "insert" {
46 | return
47 | }
48 | sc := chaosSetting.SlowdownChance
49 | if sc > 0 && mrand.Intn(1000) < int(sc*1000) {
50 | time.Sleep(chaosSetting.Slowdown)
51 | }
52 | }
53 | }
54 |
55 | type chaosError struct{}
56 |
57 | func (f *flusher) handleChaos(err *error) {
58 | v := recover()
59 | if v == nil {
60 | return
61 | }
62 | if _, ok := v.(chaosError); ok {
63 | f.debugf("Killed by chaos!")
64 | *err = ErrChaos
65 | return
66 | }
67 | panic(v)
68 | }
69 |
--------------------------------------------------------------------------------
/txn/tarjan.go:
--------------------------------------------------------------------------------
1 | package txn
2 |
3 | import (
4 | "gopkg.in/mgo.v2/bson"
5 | "sort"
6 | )
7 |
8 | func tarjanSort(successors map[bson.ObjectId][]bson.ObjectId) [][]bson.ObjectId {
9 | // http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
10 | data := &tarjanData{
11 | successors: successors,
12 | nodes: make([]tarjanNode, 0, len(successors)),
13 | index: make(map[bson.ObjectId]int, len(successors)),
14 | }
15 |
16 | for id := range successors {
17 | id := bson.ObjectId(string(id))
18 | if _, seen := data.index[id]; !seen {
19 | data.strongConnect(id)
20 | }
21 | }
22 |
23 | // Sort connected components to stabilize the algorithm.
24 | for _, ids := range data.output {
25 | if len(ids) > 1 {
26 | sort.Sort(idList(ids))
27 | }
28 | }
29 | return data.output
30 | }
31 |
32 | type tarjanData struct {
33 | successors map[bson.ObjectId][]bson.ObjectId
34 | output [][]bson.ObjectId
35 |
36 | nodes []tarjanNode
37 | stack []bson.ObjectId
38 | index map[bson.ObjectId]int
39 | }
40 |
41 | type tarjanNode struct {
42 | lowlink int
43 | stacked bool
44 | }
45 |
46 | type idList []bson.ObjectId
47 |
48 | func (l idList) Len() int { return len(l) }
49 | func (l idList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
50 | func (l idList) Less(i, j int) bool { return l[i] < l[j] }
51 |
52 | func (data *tarjanData) strongConnect(id bson.ObjectId) *tarjanNode {
53 | index := len(data.nodes)
54 | data.index[id] = index
55 | data.stack = append(data.stack, id)
56 | data.nodes = append(data.nodes, tarjanNode{index, true})
57 | node := &data.nodes[index]
58 |
59 | for _, succid := range data.successors[id] {
60 | succindex, seen := data.index[succid]
61 | if !seen {
62 | succnode := data.strongConnect(succid)
63 | if succnode.lowlink < node.lowlink {
64 | node.lowlink = succnode.lowlink
65 | }
66 | } else if data.nodes[succindex].stacked {
67 | // Part of the current strongly-connected component.
68 | if succindex < node.lowlink {
69 | node.lowlink = succindex
70 | }
71 | }
72 | }
73 |
74 | if node.lowlink == index {
75 | // Root node; pop stack and output new
76 | // strongly-connected component.
77 | var scc []bson.ObjectId
78 | i := len(data.stack) - 1
79 | for {
80 | stackid := data.stack[i]
81 | stackindex := data.index[stackid]
82 | data.nodes[stackindex].stacked = false
83 | scc = append(scc, stackid)
84 | if stackindex == index {
85 | break
86 | }
87 | i--
88 | }
89 | data.stack = data.stack[:i]
90 | data.output = append(data.output, scc)
91 | }
92 |
93 | return node
94 | }
95 |
--------------------------------------------------------------------------------
/txn/debug.go:
--------------------------------------------------------------------------------
1 | package txn
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "sort"
7 | "sync/atomic"
8 |
9 | "gopkg.in/mgo.v2/bson"
10 | )
11 |
12 | var (
13 | debugEnabled bool
14 | logger log_Logger
15 | )
16 |
17 | type log_Logger interface {
18 | Output(calldepth int, s string) error
19 | }
20 |
21 | // Specify the *log.Logger where logged messages should be sent to.
22 | func SetLogger(l log_Logger) {
23 | logger = l
24 | }
25 |
26 | // SetDebug enables or disables debugging.
27 | func SetDebug(debug bool) {
28 | debugEnabled = debug
29 | }
30 |
31 | var ErrChaos = fmt.Errorf("interrupted by chaos")
32 |
33 | var debugId uint32
34 |
35 | func debugPrefix() string {
36 | d := atomic.AddUint32(&debugId, 1) - 1
37 | s := make([]byte, 0, 10)
38 | for i := uint(0); i < 8; i++ {
39 | s = append(s, "abcdefghijklmnop"[(d>>(4*i))&0xf])
40 | if d>>(4*(i+1)) == 0 {
41 | break
42 | }
43 | }
44 | s = append(s, ')', ' ')
45 | return string(s)
46 | }
47 |
48 | func logf(format string, args ...interface{}) {
49 | if logger != nil {
50 | logger.Output(2, fmt.Sprintf(format, argsForLog(args)...))
51 | }
52 | }
53 |
54 | func debugf(format string, args ...interface{}) {
55 | if debugEnabled && logger != nil {
56 | logger.Output(2, fmt.Sprintf(format, argsForLog(args)...))
57 | }
58 | }
59 |
60 | func argsForLog(args []interface{}) []interface{} {
61 | for i, arg := range args {
62 | switch v := arg.(type) {
63 | case bson.ObjectId:
64 | args[i] = v.Hex()
65 | case []bson.ObjectId:
66 | lst := make([]string, len(v))
67 | for j, id := range v {
68 | lst[j] = id.Hex()
69 | }
70 | args[i] = lst
71 | case map[docKey][]bson.ObjectId:
72 | buf := &bytes.Buffer{}
73 | var dkeys docKeys
74 | for dkey := range v {
75 | dkeys = append(dkeys, dkey)
76 | }
77 | sort.Sort(dkeys)
78 | for i, dkey := range dkeys {
79 | if i > 0 {
80 | buf.WriteByte(' ')
81 | }
82 | buf.WriteString(fmt.Sprintf("%v: {", dkey))
83 | for j, id := range v[dkey] {
84 | if j > 0 {
85 | buf.WriteByte(' ')
86 | }
87 | buf.WriteString(id.Hex())
88 | }
89 | buf.WriteByte('}')
90 | }
91 | args[i] = buf.String()
92 | case map[docKey][]int64:
93 | buf := &bytes.Buffer{}
94 | var dkeys docKeys
95 | for dkey := range v {
96 | dkeys = append(dkeys, dkey)
97 | }
98 | sort.Sort(dkeys)
99 | for i, dkey := range dkeys {
100 | if i > 0 {
101 | buf.WriteByte(' ')
102 | }
103 | buf.WriteString(fmt.Sprintf("%v: %v", dkey, v[dkey]))
104 | }
105 | args[i] = buf.String()
106 | }
107 | }
108 | return args
109 | }
110 |
--------------------------------------------------------------------------------
/txn/dockey_test.go:
--------------------------------------------------------------------------------
1 | package txn
2 |
3 | import (
4 | "sort"
5 |
6 | . "gopkg.in/check.v1"
7 | )
8 |
9 | type DocKeySuite struct{}
10 |
11 | var _ = Suite(&DocKeySuite{})
12 |
13 | type T struct {
14 | A int
15 | B string
16 | }
17 |
18 | type T2 struct {
19 | A int
20 | B string
21 | }
22 |
23 | type T3 struct {
24 | A int
25 | B string
26 | }
27 |
28 | type T4 struct {
29 | A int
30 | B string
31 | }
32 |
33 | type T5 struct {
34 | F int
35 | Q string
36 | }
37 |
38 | type T6 struct {
39 | A int
40 | B string
41 | }
42 |
43 | type T7 struct {
44 | A bool
45 | B float64
46 | }
47 |
48 | type T8 struct {
49 | A int
50 | B string
51 | }
52 |
53 | type T9 struct {
54 | A int
55 | B string
56 | C bool
57 | }
58 |
59 | type T10 struct {
60 | C int `bson:"a"`
61 | D string `bson:"b,omitempty"`
62 | }
63 |
64 | type T11 struct {
65 | C int
66 | D string
67 | }
68 |
69 | type T12 struct {
70 | S string
71 | }
72 |
73 | type T13 struct {
74 | p, q, r bool
75 | S string
76 | }
77 |
78 | var docKeysTests = [][]docKeys{
79 | {{
80 | {"c", 1},
81 | {"c", 5},
82 | {"c", 2},
83 | }, {
84 | {"c", 1},
85 | {"c", 2},
86 | {"c", 5},
87 | }}, {{
88 | {"c", "foo"},
89 | {"c", "bar"},
90 | {"c", "bob"},
91 | }, {
92 | {"c", "bar"},
93 | {"c", "bob"},
94 | {"c", "foo"},
95 | }}, {{
96 | {"c", 0.2},
97 | {"c", 0.07},
98 | {"c", 0.9},
99 | }, {
100 | {"c", 0.07},
101 | {"c", 0.2},
102 | {"c", 0.9},
103 | }}, {{
104 | {"c", true},
105 | {"c", false},
106 | {"c", true},
107 | }, {
108 | {"c", false},
109 | {"c", true},
110 | {"c", true},
111 | }}, {{
112 | {"c", T{1, "b"}},
113 | {"c", T{1, "a"}},
114 | {"c", T{0, "b"}},
115 | {"c", T{0, "a"}},
116 | }, {
117 | {"c", T{0, "a"}},
118 | {"c", T{0, "b"}},
119 | {"c", T{1, "a"}},
120 | {"c", T{1, "b"}},
121 | }}, {{
122 | {"c", T{1, "a"}},
123 | {"c", T{0, "a"}},
124 | }, {
125 | {"c", T{0, "a"}},
126 | {"c", T{1, "a"}},
127 | }}, {{
128 | {"c", T3{0, "b"}},
129 | {"c", T2{1, "b"}},
130 | {"c", T3{1, "a"}},
131 | {"c", T2{0, "a"}},
132 | }, {
133 | {"c", T2{0, "a"}},
134 | {"c", T3{0, "b"}},
135 | {"c", T3{1, "a"}},
136 | {"c", T2{1, "b"}},
137 | }}, {{
138 | {"c", T5{1, "b"}},
139 | {"c", T4{1, "b"}},
140 | {"c", T5{0, "a"}},
141 | {"c", T4{0, "a"}},
142 | }, {
143 | {"c", T4{0, "a"}},
144 | {"c", T5{0, "a"}},
145 | {"c", T4{1, "b"}},
146 | {"c", T5{1, "b"}},
147 | }}, {{
148 | {"c", T6{1, "b"}},
149 | {"c", T7{true, 0.2}},
150 | {"c", T6{0, "a"}},
151 | {"c", T7{false, 0.04}},
152 | }, {
153 | {"c", T6{0, "a"}},
154 | {"c", T6{1, "b"}},
155 | {"c", T7{false, 0.04}},
156 | {"c", T7{true, 0.2}},
157 | }}, {{
158 | {"c", T9{1, "b", true}},
159 | {"c", T8{1, "b"}},
160 | {"c", T9{0, "a", false}},
161 | {"c", T8{0, "a"}},
162 | }, {
163 | {"c", T9{0, "a", false}},
164 | {"c", T8{0, "a"}},
165 | {"c", T9{1, "b", true}},
166 | {"c", T8{1, "b"}},
167 | }}, {{
168 | {"b", 2},
169 | {"a", 5},
170 | {"c", 2},
171 | {"b", 1},
172 | }, {
173 | {"a", 5},
174 | {"b", 1},
175 | {"b", 2},
176 | {"c", 2},
177 | }}, {{
178 | {"c", T11{1, "a"}},
179 | {"c", T11{1, "a"}},
180 | {"c", T10{1, "a"}},
181 | }, {
182 | {"c", T10{1, "a"}},
183 | {"c", T11{1, "a"}},
184 | {"c", T11{1, "a"}},
185 | }}, {{
186 | {"c", T12{"a"}},
187 | {"c", T13{false, true, false, "a"}},
188 | {"c", T12{"b"}},
189 | {"c", T13{false, true, false, "b"}},
190 | }, {
191 | {"c", T12{"a"}},
192 | {"c", T13{false, true, false, "a"}},
193 | {"c", T12{"b"}},
194 | {"c", T13{false, true, false, "b"}},
195 | }},
196 | }
197 |
198 | func (s *DocKeySuite) TestSort(c *C) {
199 | for _, test := range docKeysTests {
200 | keys := test[0]
201 | expected := test[1]
202 | sort.Sort(keys)
203 | c.Check(keys, DeepEquals, expected)
204 | }
205 | }
206 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 翻墙代理上网工具汇集 #
2 |
3 |
4 | ShadowsocksR、 Shadowsocks、ClashR、vmess 、Clash、V2ray 、vpn代理、vpn 翻墙代理上网工具下载大全、vpn代理
5 |
6 |
7 |
8 |
9 |
10 | | 工具 | Windows | MacOS | Android | IOS | 备注 |
11 | | ------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | :----------------------------------------------------------- |
12 | | Shadowsocks | [shadowsocks-win](https://github.com/shadowsocks/shadowsocks-windows/releases) | [ShadowsocksX-NG](https://github.com/shadowsocks/ShadowsocksX-NG/releases/) | [shadowsocks-android](https://github.com/shadowsocks/shadowsocks-android/releases) |**[Shadowrocket](https://github.com/githubvpn007/v2rayNvpn/releases/tag/Shadowrocket_2.1.12)**
[QuantumultX](https://apps.apple.com/us/app/id1443988620) |Shadowrocket_2.1.12可以本地安装
IOS工具下载需要用美区的AppleID |
13 | | ShadowsocksR | [SSR](https://github.com/shadowsocksrr/shadowsocksr-csharp/releases) | 使用 Shadowsocks | **[SSRR](https://github.com/shadowsocksrr/shadowsocksr-android/releases)** | 使用 Shadowsocks | |
14 | | Clash | **[Clash](https://github.com/Fndroid/clash_for_windows_pkg/releases)** | **[ClashX](https://github.com/yichengchen/clashX/releases)**
[Clash for Windows for Mac](https://github.com/Fndroid/clash_for_windows_pkg/releases) | **[ClashR](https://github.com/BROBIRD/ClashForAndroid/releases)** | | |
15 | | V2ray | [V2rayN](https://github.com/2dust/v2rayN/releases)
[Qv2ray](https://github.com/Qv2ray/Qv2ray/releases)
[V2rayW](https://github.com/Cenmrev/V2RayW/releases)
[V2RayS](https://github.com/Shinlor/V2RayS/releases)
[Mellow](https://github.com/mellow-io/mellow/releases) | [V2rayX](https://github.com/Cenmrev/V2RayX/releases)
[V2rayU](https://github.com/yanue/V2rayU/releases)
[ClashX](https://github.com/yichengchen/clashX/releases) | [V2rayNG](https://github.com/2dust/v2rayNG/releases)
[BifrostV](https://github.com/githubvpn007/v2rayNvpn/releases/tag/android-BifrostV0.6.8)
[Clash](https://github.com/Kr328/ClashForAndroid/releases)
[Kitsunebi](https://github.com/eycorsican/kitsunebi-android/releases)| [kitsunebi](https://apps.apple.com/us/app/kitsunebi-proxy-utility/id1446584073)
[Shadowrocket](https://github.com/githubvpn007/v2rayNvpn/releases/tag/Shadowrocket_2.1.12)
pepi
i2Ray
Quantumult
QuantumultX
Surge 4 | IOS 工具大多需要美区账号下载
16 | | Xray | [winXray](https://github.com/TheMRLL/winxray/releases)
[Qv2ray](https://github.com/Qv2ray/Qv2ray/releases)
[V2RayN](https://github.com/2dust/v2rayN/releases)
[Clash](https://github.com/Fndroid/clash_for_windows_pkg/releases) | [Qv2ray](https://github.com/Qv2ray/Qv2ray/releases/tag/v2.7.0) | [V2rayNG](https://github.com/2dust/v2rayNG/releases)
[Kitsunebi](https://github.com/eycorsican/kitsunebi-android/releases) |[Shadowrocket](https://github.com/githubvpn007/v2rayNvpn/releases/tag/Shadowrocket_2.1.12) | Shadowrocket 使用爱思助手安装
17 | | Trojan客户端 | [Trojan官方版Windows客户端](https://github.com/trojan-gfw/trojan/releases)
[Clash](https://github.com/Fndroid/clash_for_windows_pkg/releases)
[Trojan-Qt5](https://github.com/McDull-GitHub/trojan-qt5/releases) | [Trojan官方版Mac客户端](https://github.com/trojan-gfw/trojan/releases)
[ClashX](https://github.com/yichengchen/clashX/releases)
[Trojan-Qt5](https://github.com/McDull-GitHub/trojan-qt5/releases) | [igniter](https://github.com/trojan-gfw/igniter/releases)
[Clash](https://github.com/BROBIRD/ClashForAndroid/releases) | [Shadowrocket](https://github.com/githubvpn007/v2rayNvpn/releases/tag/Shadowrocket_2.1.12)
QuantumultX | QuantumultX 需要美区账号下载 |
18 | | Trojan-Go客户端 | [winXray](https://github.com/TheMRLL/winxray/releases)
[Qv2ray](https://github.com/Qv2ray/Qv2ray/releases) | [Qv2ray](https://github.com/Qv2ray/Qv2ray/releases) | [Igniter-Go(自己编译)](https://codeload.github.com/p4gefau1t/trojan-go-android/zip/v0.1.0-pre-alpha20) | [Shadowrocket](https://github.com/githubvpn007/v2rayNvpn/releases/tag/Shadowrocket_2.1.12)
19 |
20 |
21 |
22 |
23 | # 分享一家牛X的机场:九联★★★★★
24 |
25 | 流媒体,ChatGPT等等都完美支持
26 |
27 |
28 | ## 便宜又非常稳定
29 |
30 |
31 |
32 |
33 | [绿灯备用1](https://www.lvdeng.info) [绿灯备用2](https://www.lvdeng.info)
34 |
35 |
36 | 说句心里话,推荐绿灯不光是因为确实好用,还有它们的返米非常高,口罩期间,被公司裁员,作为一个中年猿,可以说生存压力很大,幸亏绿灯,每月稳定返米补贴。感谢!
37 |
38 |
--------------------------------------------------------------------------------
/suite_test.go:
--------------------------------------------------------------------------------
1 | // mgo - MongoDB driver for Go
2 | //
3 | // Copyright (c) 2010-2012 - Gustavo Niemeyer
4 | //
5 | // All rights reserved.
6 | //
7 | // Redistribution and use in source and binary forms, with or without
8 | // modification, are permitted provided that the following conditions are met:
9 | //
10 | // 1. Redistributions of source code must retain the above copyright notice, this
11 | // list of conditions and the following disclaimer.
12 | // 2. Redistributions in binary form must reproduce the above copyright notice,
13 | // this list of conditions and the following disclaimer in the documentation
14 | // and/or other materials provided with the distribution.
15 | //
16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 | // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 | // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 | // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20 | // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 | // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 |
27 | package mgo_test
28 |
29 | import (
30 | "bytes"
31 | "errors"
32 | "flag"
33 | "fmt"
34 | "net"
35 | "os/exec"
36 | "runtime"
37 | "strconv"
38 | "testing"
39 | "time"
40 |
41 | . "gopkg.in/check.v1"
42 | "gopkg.in/mgo.v2"
43 | "gopkg.in/mgo.v2/bson"
44 | )
45 |
46 | var fast = flag.Bool("fast", false, "Skip slow tests")
47 |
48 | type M bson.M
49 |
50 | type cLogger C
51 |
52 | func (c *cLogger) Output(calldepth int, s string) error {
53 | ns := time.Now().UnixNano()
54 | t := float64(ns%100e9) / 1e9
55 | ((*C)(c)).Logf("[LOG] %.05f %s", t, s)
56 | return nil
57 | }
58 |
59 | func TestAll(t *testing.T) {
60 | TestingT(t)
61 | }
62 |
63 | type S struct {
64 | session *mgo.Session
65 | stopped bool
66 | build mgo.BuildInfo
67 | frozen []string
68 | }
69 |
70 | func (s *S) versionAtLeast(v ...int) (result bool) {
71 | for i := range v {
72 | if i == len(s.build.VersionArray) {
73 | return false
74 | }
75 | if s.build.VersionArray[i] != v[i] {
76 | return s.build.VersionArray[i] >= v[i]
77 | }
78 | }
79 | return true
80 | }
81 |
82 | var _ = Suite(&S{})
83 |
84 | func (s *S) SetUpSuite(c *C) {
85 | mgo.SetDebug(true)
86 | mgo.SetStats(true)
87 | s.StartAll()
88 |
89 | session, err := mgo.Dial("localhost:40001")
90 | c.Assert(err, IsNil)
91 | s.build, err = session.BuildInfo()
92 | c.Check(err, IsNil)
93 | session.Close()
94 | }
95 |
96 | func (s *S) SetUpTest(c *C) {
97 | err := run("mongo --nodb harness/mongojs/dropall.js")
98 | if err != nil {
99 | panic(err.Error())
100 | }
101 | mgo.SetLogger((*cLogger)(c))
102 | mgo.ResetStats()
103 | }
104 |
105 | func (s *S) TearDownTest(c *C) {
106 | if s.stopped {
107 | s.Stop(":40201")
108 | s.Stop(":40202")
109 | s.Stop(":40203")
110 | s.StartAll()
111 | }
112 | for _, host := range s.frozen {
113 | if host != "" {
114 | s.Thaw(host)
115 | }
116 | }
117 | var stats mgo.Stats
118 | for i := 0; ; i++ {
119 | stats = mgo.GetStats()
120 | if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
121 | break
122 | }
123 | if i == 20 {
124 | c.Fatal("Test left sockets in a dirty state")
125 | }
126 | c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
127 | time.Sleep(500 * time.Millisecond)
128 | }
129 | for i := 0; ; i++ {
130 | stats = mgo.GetStats()
131 | if stats.Clusters == 0 {
132 | break
133 | }
134 | if i == 60 {
135 | c.Fatal("Test left clusters alive")
136 | }
137 | c.Logf("Waiting for clusters to die: %d alive", stats.Clusters)
138 | time.Sleep(1 * time.Second)
139 | }
140 | }
141 |
142 | func (s *S) Stop(host string) {
143 | // Give a moment for slaves to sync and avoid getting rollback issues.
144 | panicOnWindows()
145 | time.Sleep(2 * time.Second)
146 | err := run("svc -d _harness/daemons/" + supvName(host))
147 | if err != nil {
148 | panic(err)
149 | }
150 | s.stopped = true
151 | }
152 |
153 | func (s *S) pid(host string) int {
154 | // Note recent releases of lsof force 'f' to be present in the output (WTF?).
155 | cmd := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fpf")
156 | output, err := cmd.CombinedOutput()
157 | if err != nil {
158 | panic(err)
159 | }
160 | pidstr := string(bytes.Fields(output[1:])[0])
161 | pid, err := strconv.Atoi(pidstr)
162 | if err != nil {
163 | panic(fmt.Errorf("cannot convert pid to int: %q, command line: %q", pidstr, cmd.Args))
164 | }
165 | return pid
166 | }
167 |
168 | func (s *S) Freeze(host string) {
169 | err := stop(s.pid(host))
170 | if err != nil {
171 | panic(err)
172 | }
173 | s.frozen = append(s.frozen, host)
174 | }
175 |
176 | func (s *S) Thaw(host string) {
177 | err := cont(s.pid(host))
178 | if err != nil {
179 | panic(err)
180 | }
181 | for i, frozen := range s.frozen {
182 | if frozen == host {
183 | s.frozen[i] = ""
184 | }
185 | }
186 | }
187 |
188 | func (s *S) StartAll() {
189 | if s.stopped {
190 | // Restart any stopped nodes.
191 | run("svc -u _harness/daemons/*")
192 | err := run("mongo --nodb harness/mongojs/wait.js")
193 | if err != nil {
194 | panic(err)
195 | }
196 | s.stopped = false
197 | }
198 | }
199 |
200 | func run(command string) error {
201 | var output []byte
202 | var err error
203 | if runtime.GOOS == "windows" {
204 | output, err = exec.Command("cmd", "/C", command).CombinedOutput()
205 | } else {
206 | output, err = exec.Command("/bin/sh", "-c", command).CombinedOutput()
207 | }
208 |
209 | if err != nil {
210 | msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output))
211 | return errors.New(msg)
212 | }
213 | return nil
214 | }
215 |
216 | var supvNames = map[string]string{
217 | "40001": "db1",
218 | "40002": "db2",
219 | "40011": "rs1a",
220 | "40012": "rs1b",
221 | "40013": "rs1c",
222 | "40021": "rs2a",
223 | "40022": "rs2b",
224 | "40023": "rs2c",
225 | "40031": "rs3a",
226 | "40032": "rs3b",
227 | "40033": "rs3c",
228 | "40041": "rs4a",
229 | "40101": "cfg1",
230 | "40102": "cfg2",
231 | "40103": "cfg3",
232 | "40201": "s1",
233 | "40202": "s2",
234 | "40203": "s3",
235 | }
236 |
237 | // supvName returns the daemon name for the given host address.
238 | func supvName(host string) string {
239 | host, port, err := net.SplitHostPort(host)
240 | if err != nil {
241 | panic(err)
242 | }
243 | name, ok := supvNames[port]
244 | if !ok {
245 | panic("Unknown host: " + host)
246 | }
247 | return name
248 | }
249 |
250 | func hostPort(host string) string {
251 | _, port, err := net.SplitHostPort(host)
252 | if err != nil {
253 | panic(err)
254 | }
255 | return port
256 | }
257 |
258 | func panicOnWindows() {
259 | if runtime.GOOS == "windows" {
260 | panic("the test suite is not yet fully supported on Windows")
261 | }
262 | }
263 |
--------------------------------------------------------------------------------
/txn/sim_test.go:
--------------------------------------------------------------------------------
1 | package txn_test
2 |
3 | import (
4 | "flag"
5 | "gopkg.in/mgo.v2"
6 | "gopkg.in/mgo.v2/bson"
7 | "gopkg.in/mgo.v2/dbtest"
8 | "gopkg.in/mgo.v2/txn"
9 | . "gopkg.in/check.v1"
10 | "math/rand"
11 | "time"
12 | )
13 |
14 | var (
15 | duration = flag.Duration("duration", 200*time.Millisecond, "duration for each simulation")
16 | seed = flag.Int64("seed", 0, "seed for rand")
17 | )
18 |
19 | type params struct {
20 | killChance float64
21 | slowdownChance float64
22 | slowdown time.Duration
23 |
24 | unsafe bool
25 | workers int
26 | accounts int
27 | changeHalf bool
28 | reinsertCopy bool
29 | reinsertZeroed bool
30 | changelog bool
31 |
32 | changes int
33 | }
34 |
35 | func (s *S) TestSim1Worker(c *C) {
36 | simulate(c, &s.server, params{
37 | workers: 1,
38 | accounts: 4,
39 | killChance: 0.01,
40 | slowdownChance: 0.3,
41 | slowdown: 100 * time.Millisecond,
42 | })
43 | }
44 |
45 | func (s *S) TestSim4WorkersDense(c *C) {
46 | simulate(c, &s.server, params{
47 | workers: 4,
48 | accounts: 2,
49 | killChance: 0.01,
50 | slowdownChance: 0.3,
51 | slowdown: 100 * time.Millisecond,
52 | })
53 | }
54 |
55 | func (s *S) TestSim4WorkersSparse(c *C) {
56 | simulate(c, &s.server, params{
57 | workers: 4,
58 | accounts: 10,
59 | killChance: 0.01,
60 | slowdownChance: 0.3,
61 | slowdown: 100 * time.Millisecond,
62 | })
63 | }
64 |
65 | func (s *S) TestSimHalf1Worker(c *C) {
66 | simulate(c, &s.server, params{
67 | workers: 1,
68 | accounts: 4,
69 | changeHalf: true,
70 | killChance: 0.01,
71 | slowdownChance: 0.3,
72 | slowdown: 100 * time.Millisecond,
73 | })
74 | }
75 |
76 | func (s *S) TestSimHalf4WorkersDense(c *C) {
77 | simulate(c, &s.server, params{
78 | workers: 4,
79 | accounts: 2,
80 | changeHalf: true,
81 | killChance: 0.01,
82 | slowdownChance: 0.3,
83 | slowdown: 100 * time.Millisecond,
84 | })
85 | }
86 |
87 | func (s *S) TestSimHalf4WorkersSparse(c *C) {
88 | simulate(c, &s.server, params{
89 | workers: 4,
90 | accounts: 10,
91 | changeHalf: true,
92 | killChance: 0.01,
93 | slowdownChance: 0.3,
94 | slowdown: 100 * time.Millisecond,
95 | })
96 | }
97 |
98 | func (s *S) TestSimReinsertCopy1Worker(c *C) {
99 | simulate(c, &s.server, params{
100 | workers: 1,
101 | accounts: 10,
102 | reinsertCopy: true,
103 | killChance: 0.01,
104 | slowdownChance: 0.3,
105 | slowdown: 100 * time.Millisecond,
106 | })
107 | }
108 |
109 | func (s *S) TestSimReinsertCopy4Workers(c *C) {
110 | simulate(c, &s.server, params{
111 | workers: 4,
112 | accounts: 10,
113 | reinsertCopy: true,
114 | killChance: 0.01,
115 | slowdownChance: 0.3,
116 | slowdown: 100 * time.Millisecond,
117 | })
118 | }
119 |
120 | func (s *S) TestSimReinsertZeroed1Worker(c *C) {
121 | simulate(c, &s.server, params{
122 | workers: 1,
123 | accounts: 10,
124 | reinsertZeroed: true,
125 | killChance: 0.01,
126 | slowdownChance: 0.3,
127 | slowdown: 100 * time.Millisecond,
128 | })
129 | }
130 |
131 | func (s *S) TestSimReinsertZeroed4Workers(c *C) {
132 | simulate(c, &s.server, params{
133 | workers: 4,
134 | accounts: 10,
135 | reinsertZeroed: true,
136 | killChance: 0.01,
137 | slowdownChance: 0.3,
138 | slowdown: 100 * time.Millisecond,
139 | })
140 | }
141 |
142 | func (s *S) TestSimChangeLog(c *C) {
143 | simulate(c, &s.server, params{
144 | workers: 4,
145 | accounts: 10,
146 | killChance: 0.01,
147 | slowdownChance: 0.3,
148 | slowdown: 100 * time.Millisecond,
149 | changelog: true,
150 | })
151 | }
152 |
153 | type balanceChange struct {
154 | id bson.ObjectId
155 | origin int
156 | target int
157 | amount int
158 | }
159 |
160 | func simulate(c *C, server *dbtest.DBServer, params params) {
161 | seed := *seed
162 | if seed == 0 {
163 | seed = time.Now().UnixNano()
164 | }
165 | rand.Seed(seed)
166 | c.Logf("Seed: %v", seed)
167 |
168 | txn.SetChaos(txn.Chaos{
169 | KillChance: params.killChance,
170 | SlowdownChance: params.slowdownChance,
171 | Slowdown: params.slowdown,
172 | })
173 | defer txn.SetChaos(txn.Chaos{})
174 |
175 | session := server.Session()
176 | defer session.Close()
177 |
178 | db := session.DB("test")
179 | tc := db.C("tc")
180 |
181 | runner := txn.NewRunner(tc)
182 |
183 | tclog := db.C("tc.log")
184 | if params.changelog {
185 | info := mgo.CollectionInfo{
186 | Capped: true,
187 | MaxBytes: 1000000,
188 | }
189 | err := tclog.Create(&info)
190 | c.Assert(err, IsNil)
191 | runner.ChangeLog(tclog)
192 | }
193 |
194 | accounts := db.C("accounts")
195 | for i := 0; i < params.accounts; i++ {
196 | err := accounts.Insert(M{"_id": i, "balance": 300})
197 | c.Assert(err, IsNil)
198 | }
199 | var stop time.Time
200 | if params.changes <= 0 {
201 | stop = time.Now().Add(*duration)
202 | }
203 |
204 | max := params.accounts
205 | if params.reinsertCopy || params.reinsertZeroed {
206 | max = int(float64(params.accounts) * 1.5)
207 | }
208 |
209 | changes := make(chan balanceChange, 1024)
210 |
211 | //session.SetMode(mgo.Eventual, true)
212 | for i := 0; i < params.workers; i++ {
213 | go func() {
214 | n := 0
215 | for {
216 | if n > 0 && n == params.changes {
217 | break
218 | }
219 | if !stop.IsZero() && time.Now().After(stop) {
220 | break
221 | }
222 |
223 | change := balanceChange{
224 | id: bson.NewObjectId(),
225 | origin: rand.Intn(max),
226 | target: rand.Intn(max),
227 | amount: 100,
228 | }
229 |
230 | var old Account
231 | var oldExists bool
232 | if params.reinsertCopy || params.reinsertZeroed {
233 | if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound {
234 | c.Check(err, IsNil)
235 | change.amount = old.Balance
236 | oldExists = true
237 | }
238 | }
239 |
240 | var ops []txn.Op
241 | switch {
242 | case params.reinsertCopy && oldExists:
243 | ops = []txn.Op{{
244 | C: "accounts",
245 | Id: change.origin,
246 | Assert: M{"balance": change.amount},
247 | Remove: true,
248 | }, {
249 | C: "accounts",
250 | Id: change.target,
251 | Assert: txn.DocMissing,
252 | Insert: M{"balance": change.amount},
253 | }}
254 | case params.reinsertZeroed && oldExists:
255 | ops = []txn.Op{{
256 | C: "accounts",
257 | Id: change.target,
258 | Assert: txn.DocMissing,
259 | Insert: M{"balance": 0},
260 | }, {
261 | C: "accounts",
262 | Id: change.origin,
263 | Assert: M{"balance": change.amount},
264 | Remove: true,
265 | }, {
266 | C: "accounts",
267 | Id: change.target,
268 | Assert: txn.DocExists,
269 | Update: M{"$inc": M{"balance": change.amount}},
270 | }}
271 | case params.changeHalf:
272 | ops = []txn.Op{{
273 | C: "accounts",
274 | Id: change.origin,
275 | Assert: M{"balance": M{"$gte": change.amount}},
276 | Update: M{"$inc": M{"balance": -change.amount / 2}},
277 | }, {
278 | C: "accounts",
279 | Id: change.target,
280 | Assert: txn.DocExists,
281 | Update: M{"$inc": M{"balance": change.amount / 2}},
282 | }, {
283 | C: "accounts",
284 | Id: change.origin,
285 | Update: M{"$inc": M{"balance": -change.amount / 2}},
286 | }, {
287 | C: "accounts",
288 | Id: change.target,
289 | Update: M{"$inc": M{"balance": change.amount / 2}},
290 | }}
291 | default:
292 | ops = []txn.Op{{
293 | C: "accounts",
294 | Id: change.origin,
295 | Assert: M{"balance": M{"$gte": change.amount}},
296 | Update: M{"$inc": M{"balance": -change.amount}},
297 | }, {
298 | C: "accounts",
299 | Id: change.target,
300 | Assert: txn.DocExists,
301 | Update: M{"$inc": M{"balance": change.amount}},
302 | }}
303 | }
304 |
305 | err := runner.Run(ops, change.id, nil)
306 | if err != nil && err != txn.ErrAborted && err != txn.ErrChaos {
307 | c.Check(err, IsNil)
308 | }
309 | n++
310 | changes <- change
311 | }
312 | changes <- balanceChange{}
313 | }()
314 | }
315 |
316 | alive := params.workers
317 | changeLog := make([]balanceChange, 0, 1024)
318 | for alive > 0 {
319 | change := <-changes
320 | if change.id == "" {
321 | alive--
322 | } else {
323 | changeLog = append(changeLog, change)
324 | }
325 | }
326 | c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted."))
327 |
328 | txn.SetChaos(txn.Chaos{})
329 | err := runner.ResumeAll()
330 | c.Assert(err, IsNil)
331 |
332 | n, err := accounts.Count()
333 | c.Check(err, IsNil)
334 | c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed."))
335 |
336 | n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count()
337 | c.Check(err, IsNil)
338 | c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n))
339 |
340 | globalBalance := 0
341 | iter := accounts.Find(nil).Iter()
342 | account := Account{}
343 | for iter.Next(&account) {
344 | globalBalance += account.Balance
345 | }
346 | c.Check(iter.Close(), IsNil)
347 | c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant."))
348 |
349 | // Compute and verify the exact final state of all accounts.
350 | balance := make(map[int]int)
351 | for i := 0; i < params.accounts; i++ {
352 | balance[i] += 300
353 | }
354 | var applied, aborted int
355 | for _, change := range changeLog {
356 | err := runner.Resume(change.id)
357 | if err == txn.ErrAborted {
358 | aborted++
359 | continue
360 | } else if err != nil {
361 | c.Fatalf("resuming %s failed: %v", change.id, err)
362 | }
363 | balance[change.origin] -= change.amount
364 | balance[change.target] += change.amount
365 | applied++
366 | }
367 | iter = accounts.Find(nil).Iter()
368 | for iter.Next(&account) {
369 | c.Assert(account.Balance, Equals, balance[account.Id])
370 | }
371 | c.Check(iter.Close(), IsNil)
372 | c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted)
373 |
374 | if params.changelog {
375 | n, err := tclog.Count()
376 | c.Assert(err, IsNil)
377 | // Check if the capped collection is full.
378 | dummy := make([]byte, 1024)
379 | tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy})
380 | m, err := tclog.Count()
381 | c.Assert(err, IsNil)
382 | if m == n+1 {
383 | // Wasn't full, so it must have seen it all.
384 | c.Assert(err, IsNil)
385 | c.Assert(n, Equals, applied)
386 | }
387 | }
388 | }
389 |
--------------------------------------------------------------------------------
/txn/txn.go:
--------------------------------------------------------------------------------
1 | // The txn package implements support for multi-document transactions.
2 | //
3 | // For details check the following blog post:
4 | //
5 | // http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb
6 | //
7 | package txn
8 |
9 | import (
10 | "encoding/binary"
11 | "fmt"
12 | "reflect"
13 | "sort"
14 | "strings"
15 | "sync"
16 |
17 | "gopkg.in/mgo.v2"
18 | "gopkg.in/mgo.v2/bson"
19 |
20 | crand "crypto/rand"
21 | mrand "math/rand"
22 | )
23 |
24 | type state int
25 |
26 | const (
27 | tpreparing state = 1 // One or more documents not prepared
28 | tprepared state = 2 // Prepared but not yet ready to run
29 | taborting state = 3 // Assertions failed, cleaning up
30 | tapplying state = 4 // Changes are in progress
31 | taborted state = 5 // Pre-conditions failed, nothing done
32 | tapplied state = 6 // All changes applied
33 | )
34 |
35 | func (s state) String() string {
36 | switch s {
37 | case tpreparing:
38 | return "preparing"
39 | case tprepared:
40 | return "prepared"
41 | case taborting:
42 | return "aborting"
43 | case tapplying:
44 | return "applying"
45 | case taborted:
46 | return "aborted"
47 | case tapplied:
48 | return "applied"
49 | }
50 | panic(fmt.Errorf("unknown state: %d", s))
51 | }
52 |
53 | var rand *mrand.Rand
54 | var randmu sync.Mutex
55 |
56 | func init() {
57 | var seed int64
58 | err := binary.Read(crand.Reader, binary.BigEndian, &seed)
59 | if err != nil {
60 | panic(err)
61 | }
62 | rand = mrand.New(mrand.NewSource(seed))
63 | }
64 |
65 | type transaction struct {
66 | Id bson.ObjectId `bson:"_id"`
67 | State state `bson:"s"`
68 | Info interface{} `bson:"i,omitempty"`
69 | Ops []Op `bson:"o"`
70 | Nonce string `bson:"n,omitempty"`
71 | Revnos []int64 `bson:"r,omitempty"`
72 |
73 | docKeysCached docKeys
74 | }
75 |
76 | func (t *transaction) String() string {
77 | if t.Nonce == "" {
78 | return t.Id.Hex()
79 | }
80 | return string(t.token())
81 | }
82 |
83 | func (t *transaction) done() bool {
84 | return t.State == tapplied || t.State == taborted
85 | }
86 |
87 | func (t *transaction) token() token {
88 | if t.Nonce == "" {
89 | panic("transaction has no nonce")
90 | }
91 | return tokenFor(t)
92 | }
93 |
94 | func (t *transaction) docKeys() docKeys {
95 | if t.docKeysCached != nil {
96 | return t.docKeysCached
97 | }
98 | dkeys := make(docKeys, 0, len(t.Ops))
99 | NextOp:
100 | for _, op := range t.Ops {
101 | dkey := op.docKey()
102 | for i := range dkeys {
103 | if dkey == dkeys[i] {
104 | continue NextOp
105 | }
106 | }
107 | dkeys = append(dkeys, dkey)
108 | }
109 | sort.Sort(dkeys)
110 | t.docKeysCached = dkeys
111 | return dkeys
112 | }
113 |
114 | // tokenFor returns a unique transaction token that
115 | // is composed by t's id and a nonce. If t already has
116 | // a nonce assigned to it, it will be used, otherwise
117 | // a new nonce will be generated.
118 | func tokenFor(t *transaction) token {
119 | nonce := t.Nonce
120 | if nonce == "" {
121 | nonce = newNonce()
122 | }
123 | return token(t.Id.Hex() + "_" + nonce)
124 | }
125 |
126 | func newNonce() string {
127 | randmu.Lock()
128 | r := rand.Uint32()
129 | randmu.Unlock()
130 | n := make([]byte, 8)
131 | for i := uint(0); i < 8; i++ {
132 | n[i] = "0123456789abcdef"[(r>>(4*i))&0xf]
133 | }
134 | return string(n)
135 | }
136 |
137 | type token string
138 |
139 | func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) }
140 | func (tt token) nonce() string { return string(tt[25:]) }
141 |
142 | // Op represents an operation to a single document that may be
143 | // applied as part of a transaction with other operations.
144 | type Op struct {
145 | // C and Id identify the collection and document this operation
146 | // refers to. Id is matched against the "_id" document field.
147 | C string `bson:"c"`
148 | Id interface{} `bson:"d"`
149 |
150 | // Assert optionally holds a query document that is used to
151 | // test the operation document at the time the transaction is
152 | // going to be applied. The assertions for all operations in
153 | // a transaction are tested before any changes take place,
154 | // and the transaction is entirely aborted if any of them
155 | // fails. This is also the only way to prevent a transaction
156 | // from being being applied (the transaction continues despite
157 | // the outcome of Insert, Update, and Remove).
158 | Assert interface{} `bson:"a,omitempty"`
159 |
160 | // The Insert, Update and Remove fields describe the mutation
161 | // intended by the operation. At most one of them may be set
162 | // per operation. If none are set, Assert must be set and the
163 | // operation becomes a read-only test.
164 | //
165 | // Insert holds the document to be inserted at the time the
166 | // transaction is applied. The Id field will be inserted
167 | // into the document automatically as its _id field. The
168 | // transaction will continue even if the document already
169 | // exists. Use Assert with txn.DocMissing if the insertion is
170 | // required.
171 | //
172 | // Update holds the update document to be applied at the time
173 | // the transaction is applied. The transaction will continue
174 | // even if a document with Id is missing. Use Assert to
175 | // test for the document presence or its contents.
176 | //
177 | // Remove indicates whether to remove the document with Id.
178 | // The transaction continues even if the document doesn't yet
179 | // exist at the time the transaction is applied. Use Assert
180 | // with txn.DocExists to make sure it will be removed.
181 | Insert interface{} `bson:"i,omitempty"`
182 | Update interface{} `bson:"u,omitempty"`
183 | Remove bool `bson:"r,omitempty"`
184 | }
185 |
186 | func (op *Op) isChange() bool {
187 | return op.Update != nil || op.Insert != nil || op.Remove
188 | }
189 |
190 | func (op *Op) docKey() docKey {
191 | return docKey{op.C, op.Id}
192 | }
193 |
194 | func (op *Op) name() string {
195 | switch {
196 | case op.Update != nil:
197 | return "update"
198 | case op.Insert != nil:
199 | return "insert"
200 | case op.Remove:
201 | return "remove"
202 | case op.Assert != nil:
203 | return "assert"
204 | }
205 | return "none"
206 | }
207 |
208 | const (
209 | // DocExists and DocMissing may be used on an operation's
210 | // Assert value to assert that the document with the given
211 | // Id exists or does not exist, respectively.
212 | DocExists = "d+"
213 | DocMissing = "d-"
214 | )
215 |
216 | // A Runner applies operations as part of a transaction onto any number
217 | // of collections within a database. See the Run method for details.
218 | type Runner struct {
219 | tc *mgo.Collection // txns
220 | sc *mgo.Collection // stash
221 | lc *mgo.Collection // log
222 | }
223 |
224 | // NewRunner returns a new transaction runner that uses tc to hold its
225 | // transactions.
226 | //
227 | // Multiple transaction collections may exist in a single database, but
228 | // all collections that are touched by operations in a given transaction
229 | // collection must be handled exclusively by it.
230 | //
231 | // A second collection with the same name of tc but suffixed by ".stash"
232 | // will be used for implementing the transactional behavior of insert
233 | // and remove operations.
234 | func NewRunner(tc *mgo.Collection) *Runner {
235 | return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil}
236 | }
237 |
238 | var ErrAborted = fmt.Errorf("transaction aborted")
239 |
240 | // Run creates a new transaction with ops and runs it immediately.
241 | // The id parameter specifies the transaction id, and may be written
242 | // down ahead of time to later verify the success of the change and
243 | // resume it, when the procedure is interrupted for any reason. If
244 | // empty, a random id will be generated.
245 | // The info parameter, if not nil, is included under the "i"
246 | // field of the transaction document.
247 | //
248 | // Operations across documents are not atomically applied, but are
249 | // guaranteed to be eventually all applied in the order provided or
250 | // all aborted, as long as the affected documents are only modified
251 | // through transactions. If documents are simultaneously modified
252 | // by transactions and out of transactions the behavior is undefined.
253 | //
254 | // If Run returns no errors, all operations were applied successfully.
255 | // If it returns ErrAborted, one or more operations can't be applied
256 | // and the transaction was entirely aborted with no changes performed.
257 | // Otherwise, if the transaction is interrupted while running for any
258 | // reason, it may be resumed explicitly or by attempting to apply
259 | // another transaction on any of the documents targeted by ops, as
260 | // long as the interruption was made after the transaction document
261 | // itself was inserted. Run Resume with the obtained transaction id
262 | // to confirm whether the transaction was applied or not.
263 | //
264 | // Any number of transactions may be run concurrently, with one
265 | // runner or many.
266 | func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) {
267 | const efmt = "error in transaction op %d: %s"
268 | for i := range ops {
269 | op := &ops[i]
270 | if op.C == "" || op.Id == nil {
271 | return fmt.Errorf(efmt, i, "C or Id missing")
272 | }
273 | changes := 0
274 | if op.Insert != nil {
275 | changes++
276 | }
277 | if op.Update != nil {
278 | changes++
279 | }
280 | if op.Remove {
281 | changes++
282 | }
283 | if changes > 1 {
284 | return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set")
285 | }
286 | if changes == 0 && op.Assert == nil {
287 | return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set")
288 | }
289 | }
290 | if id == "" {
291 | id = bson.NewObjectId()
292 | }
293 |
294 | // Insert transaction sooner rather than later, to stay on the safer side.
295 | t := transaction{
296 | Id: id,
297 | Ops: ops,
298 | State: tpreparing,
299 | Info: info,
300 | }
301 | if err = r.tc.Insert(&t); err != nil {
302 | return err
303 | }
304 | if err = flush(r, &t); err != nil {
305 | return err
306 | }
307 | if t.State == taborted {
308 | return ErrAborted
309 | } else if t.State != tapplied {
310 | panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
311 | }
312 | return nil
313 | }
314 |
315 | // ResumeAll resumes all pending transactions. All ErrAborted errors
316 | // from individual transactions are ignored.
317 | func (r *Runner) ResumeAll() (err error) {
318 | debugf("Resuming all unfinished transactions")
319 | iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter()
320 | var t transaction
321 | for iter.Next(&t) {
322 | if t.State == tapplied || t.State == taborted {
323 | continue
324 | }
325 | debugf("Resuming %s from %q", t.Id, t.State)
326 | if err := flush(r, &t); err != nil {
327 | return err
328 | }
329 | if !t.done() {
330 | panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
331 | }
332 | }
333 | return nil
334 | }
335 |
336 | // Resume resumes the transaction with id. It returns mgo.ErrNotFound
337 | // if the transaction is not found. Otherwise, it has the same semantics
338 | // of the Run method after the transaction is inserted.
339 | func (r *Runner) Resume(id bson.ObjectId) (err error) {
340 | t, err := r.load(id)
341 | if err != nil {
342 | return err
343 | }
344 | if !t.done() {
345 | debugf("Resuming %s from %q", t, t.State)
346 | if err := flush(r, t); err != nil {
347 | return err
348 | }
349 | }
350 | if t.State == taborted {
351 | return ErrAborted
352 | } else if t.State != tapplied {
353 | panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State))
354 | }
355 | return nil
356 | }
357 |
358 | // ChangeLog enables logging of changes to the given collection
359 | // every time a transaction that modifies content is done being
360 | // applied.
361 | //
362 | // Saved documents are in the format:
363 | //
364 | // {"_id": , : {"d": [, ...], "r": [, ...]}}
365 | //
366 | // The document revision is the value of the txn-revno field after
367 | // the change has been applied. Negative values indicate the document
368 | // was not present in the collection. Revisions will not change when
369 | // updates or removes are applied to missing documents or inserts are
370 | // attempted when the document isn't present.
371 | func (r *Runner) ChangeLog(logc *mgo.Collection) {
372 | r.lc = logc
373 | }
374 |
375 | // PurgeMissing removes from collections any state that refers to transaction
376 | // documents that for whatever reason have been lost from the system (removed
377 | // by accident or lost in a hard crash, for example).
378 | //
379 | // This method should very rarely be needed, if at all, and should never be
380 | // used during the normal operation of an application. Its purpose is to put
381 | // a system that has seen unavoidable corruption back in a working state.
382 | func (r *Runner) PurgeMissing(collections ...string) error {
383 | type M map[string]interface{}
384 | type S []interface{}
385 |
386 | type TDoc struct {
387 | Id interface{} "_id"
388 | TxnQueue []string "txn-queue"
389 | }
390 |
391 | found := make(map[bson.ObjectId]bool)
392 |
393 | sort.Strings(collections)
394 | for _, collection := range collections {
395 | c := r.tc.Database.C(collection)
396 | iter := c.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter()
397 | var tdoc TDoc
398 | for iter.Next(&tdoc) {
399 | for _, txnToken := range tdoc.TxnQueue {
400 | txnId := bson.ObjectIdHex(txnToken[:24])
401 | if found[txnId] {
402 | continue
403 | }
404 | if r.tc.FindId(txnId).One(nil) == nil {
405 | found[txnId] = true
406 | continue
407 | }
408 | logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tdoc.Id, txnId)
409 | err := c.UpdateId(tdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
410 | if err != nil {
411 | return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
412 | }
413 | }
414 | }
415 | if err := iter.Close(); err != nil {
416 | return fmt.Errorf("transaction queue iteration error for %s: %v", collection, err)
417 | }
418 | }
419 |
420 | type StashTDoc struct {
421 | Id docKey "_id"
422 | TxnQueue []string "txn-queue"
423 | }
424 |
425 | iter := r.sc.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter()
426 | var stdoc StashTDoc
427 | for iter.Next(&stdoc) {
428 | for _, txnToken := range stdoc.TxnQueue {
429 | txnId := bson.ObjectIdHex(txnToken[:24])
430 | if found[txnId] {
431 | continue
432 | }
433 | if r.tc.FindId(txnId).One(nil) == nil {
434 | found[txnId] = true
435 | continue
436 | }
437 | logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stdoc.Id.C, stdoc.Id.Id, txnId)
438 | err := r.sc.UpdateId(stdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
439 | if err != nil {
440 | return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
441 | }
442 | }
443 | }
444 | if err := iter.Close(); err != nil {
445 | return fmt.Errorf("transaction stash iteration error: %v", err)
446 | }
447 |
448 | return nil
449 | }
450 |
451 | func (r *Runner) load(id bson.ObjectId) (*transaction, error) {
452 | var t transaction
453 | err := r.tc.FindId(id).One(&t)
454 | if err == mgo.ErrNotFound {
455 | return nil, fmt.Errorf("cannot find transaction %s", id)
456 | } else if err != nil {
457 | return nil, err
458 | }
459 | return &t, nil
460 | }
461 |
462 | type typeNature int
463 |
464 | const (
465 | // The order of these values matters. Transactions
466 | // from applications using different ordering will
467 | // be incompatible with each other.
468 | _ typeNature = iota
469 | natureString
470 | natureInt
471 | natureFloat
472 | natureBool
473 | natureStruct
474 | )
475 |
476 | func valueNature(v interface{}) (value interface{}, nature typeNature) {
477 | rv := reflect.ValueOf(v)
478 | switch rv.Kind() {
479 | case reflect.String:
480 | return rv.String(), natureString
481 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
482 | return rv.Int(), natureInt
483 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
484 | return int64(rv.Uint()), natureInt
485 | case reflect.Float32, reflect.Float64:
486 | return rv.Float(), natureFloat
487 | case reflect.Bool:
488 | return rv.Bool(), natureBool
489 | case reflect.Struct:
490 | return v, natureStruct
491 | }
492 | panic("document id type unsupported by txn: " + rv.Kind().String())
493 | }
494 |
495 | type docKey struct {
496 | C string
497 | Id interface{}
498 | }
499 |
500 | type docKeys []docKey
501 |
502 | func (ks docKeys) Len() int { return len(ks) }
503 | func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] }
504 | func (ks docKeys) Less(i, j int) bool {
505 | a, b := ks[i], ks[j]
506 | if a.C != b.C {
507 | return a.C < b.C
508 | }
509 | return valuecmp(a.Id, b.Id) == -1
510 | }
511 |
512 | func valuecmp(a, b interface{}) int {
513 | av, an := valueNature(a)
514 | bv, bn := valueNature(b)
515 | if an < bn {
516 | return -1
517 | }
518 | if an > bn {
519 | return 1
520 | }
521 |
522 | if av == bv {
523 | return 0
524 | }
525 | var less bool
526 | switch an {
527 | case natureString:
528 | less = av.(string) < bv.(string)
529 | case natureInt:
530 | less = av.(int64) < bv.(int64)
531 | case natureFloat:
532 | less = av.(float64) < bv.(float64)
533 | case natureBool:
534 | less = !av.(bool) && bv.(bool)
535 | case natureStruct:
536 | less = structcmp(av, bv) == -1
537 | default:
538 | panic("unreachable")
539 | }
540 | if less {
541 | return -1
542 | }
543 | return 1
544 | }
545 |
546 | func structcmp(a, b interface{}) int {
547 | av := reflect.ValueOf(a)
548 | bv := reflect.ValueOf(b)
549 |
550 | var ai, bi = 0, 0
551 | var an, bn = av.NumField(), bv.NumField()
552 | var avi, bvi interface{}
553 | var af, bf reflect.StructField
554 | for {
555 | for ai < an {
556 | af = av.Type().Field(ai)
557 | if isExported(af.Name) {
558 | avi = av.Field(ai).Interface()
559 | ai++
560 | break
561 | }
562 | ai++
563 | }
564 | for bi < bn {
565 | bf = bv.Type().Field(bi)
566 | if isExported(bf.Name) {
567 | bvi = bv.Field(bi).Interface()
568 | bi++
569 | break
570 | }
571 | bi++
572 | }
573 | if n := valuecmp(avi, bvi); n != 0 {
574 | return n
575 | }
576 | nameA := getFieldName(af)
577 | nameB := getFieldName(bf)
578 | if nameA < nameB {
579 | return -1
580 | }
581 | if nameA > nameB {
582 | return 1
583 | }
584 | if ai == an && bi == bn {
585 | return 0
586 | }
587 | if ai == an || bi == bn {
588 | if ai == bn {
589 | return -1
590 | }
591 | return 1
592 | }
593 | }
594 | panic("unreachable")
595 | }
596 |
597 | func isExported(name string) bool {
598 | a := name[0]
599 | return a >= 'A' && a <= 'Z'
600 | }
601 |
602 | func getFieldName(f reflect.StructField) string {
603 | name := f.Tag.Get("bson")
604 | if i := strings.Index(name, ","); i >= 0 {
605 | name = name[:i]
606 | }
607 | if name == "" {
608 | name = strings.ToLower(f.Name)
609 | }
610 | return name
611 | }
612 |
--------------------------------------------------------------------------------
/txn/txn_test.go:
--------------------------------------------------------------------------------
1 | package txn_test
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "sync"
7 | "testing"
8 | "time"
9 |
10 | . "gopkg.in/check.v1"
11 | "gopkg.in/mgo.v2"
12 | "gopkg.in/mgo.v2/bson"
13 | "gopkg.in/mgo.v2/dbtest"
14 | "gopkg.in/mgo.v2/txn"
15 | )
16 |
17 | func TestAll(t *testing.T) {
18 | TestingT(t)
19 | }
20 |
21 | type S struct {
22 | server dbtest.DBServer
23 | session *mgo.Session
24 | db *mgo.Database
25 | tc, sc *mgo.Collection
26 | accounts *mgo.Collection
27 | runner *txn.Runner
28 | }
29 |
30 | var _ = Suite(&S{})
31 |
32 | type M map[string]interface{}
33 |
34 | func (s *S) SetUpSuite(c *C) {
35 | s.server.SetPath(c.MkDir())
36 | }
37 |
38 | func (s *S) TearDownSuite(c *C) {
39 | s.server.Stop()
40 | }
41 |
42 | func (s *S) SetUpTest(c *C) {
43 | s.server.Wipe()
44 |
45 | txn.SetChaos(txn.Chaos{})
46 | txn.SetLogger(c)
47 | txn.SetDebug(true)
48 |
49 | s.session = s.server.Session()
50 | s.db = s.session.DB("test")
51 | s.tc = s.db.C("tc")
52 | s.sc = s.db.C("tc.stash")
53 | s.accounts = s.db.C("accounts")
54 | s.runner = txn.NewRunner(s.tc)
55 | }
56 |
57 | func (s *S) TearDownTest(c *C) {
58 | txn.SetLogger(nil)
59 | txn.SetDebug(false)
60 | s.session.Close()
61 | }
62 |
63 | type Account struct {
64 | Id int `bson:"_id"`
65 | Balance int
66 | }
67 |
68 | func (s *S) TestDocExists(c *C) {
69 | err := s.accounts.Insert(M{"_id": 0, "balance": 300})
70 | c.Assert(err, IsNil)
71 |
72 | exists := []txn.Op{{
73 | C: "accounts",
74 | Id: 0,
75 | Assert: txn.DocExists,
76 | }}
77 | missing := []txn.Op{{
78 | C: "accounts",
79 | Id: 0,
80 | Assert: txn.DocMissing,
81 | }}
82 |
83 | err = s.runner.Run(exists, "", nil)
84 | c.Assert(err, IsNil)
85 | err = s.runner.Run(missing, "", nil)
86 | c.Assert(err, Equals, txn.ErrAborted)
87 |
88 | err = s.accounts.RemoveId(0)
89 | c.Assert(err, IsNil)
90 |
91 | err = s.runner.Run(exists, "", nil)
92 | c.Assert(err, Equals, txn.ErrAborted)
93 | err = s.runner.Run(missing, "", nil)
94 | c.Assert(err, IsNil)
95 | }
96 |
97 | func (s *S) TestInsert(c *C) {
98 | err := s.accounts.Insert(M{"_id": 0, "balance": 300})
99 | c.Assert(err, IsNil)
100 |
101 | ops := []txn.Op{{
102 | C: "accounts",
103 | Id: 0,
104 | Insert: M{"balance": 200},
105 | }}
106 |
107 | err = s.runner.Run(ops, "", nil)
108 | c.Assert(err, IsNil)
109 |
110 | var account Account
111 | err = s.accounts.FindId(0).One(&account)
112 | c.Assert(err, IsNil)
113 | c.Assert(account.Balance, Equals, 300)
114 |
115 | ops[0].Id = 1
116 | err = s.runner.Run(ops, "", nil)
117 | c.Assert(err, IsNil)
118 |
119 | err = s.accounts.FindId(1).One(&account)
120 | c.Assert(err, IsNil)
121 | c.Assert(account.Balance, Equals, 200)
122 | }
123 |
124 | func (s *S) TestInsertStructID(c *C) {
125 | type id struct {
126 | FirstName string
127 | LastName string
128 | }
129 | ops := []txn.Op{{
130 | C: "accounts",
131 | Id: id{FirstName: "John", LastName: "Jones"},
132 | Assert: txn.DocMissing,
133 | Insert: M{"balance": 200},
134 | }, {
135 | C: "accounts",
136 | Id: id{FirstName: "Sally", LastName: "Smith"},
137 | Assert: txn.DocMissing,
138 | Insert: M{"balance": 800},
139 | }}
140 |
141 | err := s.runner.Run(ops, "", nil)
142 | c.Assert(err, IsNil)
143 |
144 | n, err := s.accounts.Find(nil).Count()
145 | c.Assert(err, IsNil)
146 | c.Assert(n, Equals, 2)
147 | }
148 |
149 | func (s *S) TestRemove(c *C) {
150 | err := s.accounts.Insert(M{"_id": 0, "balance": 300})
151 | c.Assert(err, IsNil)
152 |
153 | ops := []txn.Op{{
154 | C: "accounts",
155 | Id: 0,
156 | Remove: true,
157 | }}
158 |
159 | err = s.runner.Run(ops, "", nil)
160 | c.Assert(err, IsNil)
161 |
162 | err = s.accounts.FindId(0).One(nil)
163 | c.Assert(err, Equals, mgo.ErrNotFound)
164 |
165 | err = s.runner.Run(ops, "", nil)
166 | c.Assert(err, IsNil)
167 | }
168 |
169 | func (s *S) TestUpdate(c *C) {
170 | var err error
171 | err = s.accounts.Insert(M{"_id": 0, "balance": 200})
172 | c.Assert(err, IsNil)
173 | err = s.accounts.Insert(M{"_id": 1, "balance": 200})
174 | c.Assert(err, IsNil)
175 |
176 | ops := []txn.Op{{
177 | C: "accounts",
178 | Id: 0,
179 | Update: M{"$inc": M{"balance": 100}},
180 | }}
181 |
182 | err = s.runner.Run(ops, "", nil)
183 | c.Assert(err, IsNil)
184 |
185 | var account Account
186 | err = s.accounts.FindId(0).One(&account)
187 | c.Assert(err, IsNil)
188 | c.Assert(account.Balance, Equals, 300)
189 |
190 | ops[0].Id = 1
191 |
192 | err = s.accounts.FindId(1).One(&account)
193 | c.Assert(err, IsNil)
194 | c.Assert(account.Balance, Equals, 200)
195 | }
196 |
197 | func (s *S) TestInsertUpdate(c *C) {
198 | ops := []txn.Op{{
199 | C: "accounts",
200 | Id: 0,
201 | Insert: M{"_id": 0, "balance": 200},
202 | }, {
203 | C: "accounts",
204 | Id: 0,
205 | Update: M{"$inc": M{"balance": 100}},
206 | }}
207 |
208 | err := s.runner.Run(ops, "", nil)
209 | c.Assert(err, IsNil)
210 |
211 | var account Account
212 | err = s.accounts.FindId(0).One(&account)
213 | c.Assert(err, IsNil)
214 | c.Assert(account.Balance, Equals, 300)
215 |
216 | err = s.runner.Run(ops, "", nil)
217 | c.Assert(err, IsNil)
218 |
219 | err = s.accounts.FindId(0).One(&account)
220 | c.Assert(err, IsNil)
221 | c.Assert(account.Balance, Equals, 400)
222 | }
223 |
224 | func (s *S) TestUpdateInsert(c *C) {
225 | ops := []txn.Op{{
226 | C: "accounts",
227 | Id: 0,
228 | Update: M{"$inc": M{"balance": 100}},
229 | }, {
230 | C: "accounts",
231 | Id: 0,
232 | Insert: M{"_id": 0, "balance": 200},
233 | }}
234 |
235 | err := s.runner.Run(ops, "", nil)
236 | c.Assert(err, IsNil)
237 |
238 | var account Account
239 | err = s.accounts.FindId(0).One(&account)
240 | c.Assert(err, IsNil)
241 | c.Assert(account.Balance, Equals, 200)
242 |
243 | err = s.runner.Run(ops, "", nil)
244 | c.Assert(err, IsNil)
245 |
246 | err = s.accounts.FindId(0).One(&account)
247 | c.Assert(err, IsNil)
248 | c.Assert(account.Balance, Equals, 300)
249 | }
250 |
251 | func (s *S) TestInsertRemoveInsert(c *C) {
252 | ops := []txn.Op{{
253 | C: "accounts",
254 | Id: 0,
255 | Insert: M{"_id": 0, "balance": 200},
256 | }, {
257 | C: "accounts",
258 | Id: 0,
259 | Remove: true,
260 | }, {
261 | C: "accounts",
262 | Id: 0,
263 | Insert: M{"_id": 0, "balance": 300},
264 | }}
265 |
266 | err := s.runner.Run(ops, "", nil)
267 | c.Assert(err, IsNil)
268 |
269 | var account Account
270 | err = s.accounts.FindId(0).One(&account)
271 | c.Assert(err, IsNil)
272 | c.Assert(account.Balance, Equals, 300)
273 | }
274 |
275 | func (s *S) TestQueueStashing(c *C) {
276 | txn.SetChaos(txn.Chaos{
277 | KillChance: 1,
278 | Breakpoint: "set-applying",
279 | })
280 |
281 | opses := [][]txn.Op{{{
282 | C: "accounts",
283 | Id: 0,
284 | Insert: M{"balance": 100},
285 | }}, {{
286 | C: "accounts",
287 | Id: 0,
288 | Remove: true,
289 | }}, {{
290 | C: "accounts",
291 | Id: 0,
292 | Insert: M{"balance": 200},
293 | }}, {{
294 | C: "accounts",
295 | Id: 0,
296 | Update: M{"$inc": M{"balance": 100}},
297 | }}}
298 |
299 | var last bson.ObjectId
300 | for _, ops := range opses {
301 | last = bson.NewObjectId()
302 | err := s.runner.Run(ops, last, nil)
303 | c.Assert(err, Equals, txn.ErrChaos)
304 | }
305 |
306 | txn.SetChaos(txn.Chaos{})
307 | err := s.runner.Resume(last)
308 | c.Assert(err, IsNil)
309 |
310 | var account Account
311 | err = s.accounts.FindId(0).One(&account)
312 | c.Assert(err, IsNil)
313 | c.Assert(account.Balance, Equals, 300)
314 | }
315 |
316 | func (s *S) TestInfo(c *C) {
317 | ops := []txn.Op{{
318 | C: "accounts",
319 | Id: 0,
320 | Assert: txn.DocMissing,
321 | }}
322 |
323 | id := bson.NewObjectId()
324 | err := s.runner.Run(ops, id, M{"n": 42})
325 | c.Assert(err, IsNil)
326 |
327 | var t struct{ I struct{ N int } }
328 | err = s.tc.FindId(id).One(&t)
329 | c.Assert(err, IsNil)
330 | c.Assert(t.I.N, Equals, 42)
331 | }
332 |
333 | func (s *S) TestErrors(c *C) {
334 | doc := bson.M{"foo": 1}
335 | tests := []txn.Op{{
336 | C: "c",
337 | Id: 0,
338 | }, {
339 | C: "c",
340 | Id: 0,
341 | Insert: doc,
342 | Remove: true,
343 | }, {
344 | C: "c",
345 | Id: 0,
346 | Insert: doc,
347 | Update: doc,
348 | }, {
349 | C: "c",
350 | Id: 0,
351 | Update: doc,
352 | Remove: true,
353 | }, {
354 | C: "c",
355 | Assert: doc,
356 | }, {
357 | Id: 0,
358 | Assert: doc,
359 | }}
360 |
361 | txn.SetChaos(txn.Chaos{KillChance: 1.0})
362 | for _, op := range tests {
363 | c.Logf("op: %v", op)
364 | err := s.runner.Run([]txn.Op{op}, "", nil)
365 | c.Assert(err, ErrorMatches, "error in transaction op 0: .*")
366 | }
367 | }
368 |
369 | func (s *S) TestAssertNestedOr(c *C) {
370 | // Assert uses $or internally. Ensure nesting works.
371 | err := s.accounts.Insert(M{"_id": 0, "balance": 300})
372 | c.Assert(err, IsNil)
373 |
374 | ops := []txn.Op{{
375 | C: "accounts",
376 | Id: 0,
377 | Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}},
378 | Update: bson.D{{"$inc", bson.D{{"balance", 100}}}},
379 | }}
380 |
381 | err = s.runner.Run(ops, "", nil)
382 | c.Assert(err, IsNil)
383 |
384 | var account Account
385 | err = s.accounts.FindId(0).One(&account)
386 | c.Assert(err, IsNil)
387 | c.Assert(account.Balance, Equals, 400)
388 | }
389 |
390 | func (s *S) TestVerifyFieldOrdering(c *C) {
391 | // Used to have a map in certain operations, which means
392 | // the ordering of fields would be messed up.
393 | fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}}
394 | ops := []txn.Op{{
395 | C: "accounts",
396 | Id: 0,
397 | Insert: fields,
398 | }}
399 |
400 | err := s.runner.Run(ops, "", nil)
401 | c.Assert(err, IsNil)
402 |
403 | var d bson.D
404 | err = s.accounts.FindId(0).One(&d)
405 | c.Assert(err, IsNil)
406 |
407 | var filtered bson.D
408 | for _, e := range d {
409 | switch e.Name {
410 | case "a", "b", "c":
411 | filtered = append(filtered, e)
412 | }
413 | }
414 | c.Assert(filtered, DeepEquals, fields)
415 | }
416 |
417 | func (s *S) TestChangeLog(c *C) {
418 | chglog := s.db.C("chglog")
419 | s.runner.ChangeLog(chglog)
420 |
421 | ops := []txn.Op{{
422 | C: "debts",
423 | Id: 0,
424 | Assert: txn.DocMissing,
425 | }, {
426 | C: "accounts",
427 | Id: 0,
428 | Insert: M{"balance": 300},
429 | }, {
430 | C: "accounts",
431 | Id: 1,
432 | Insert: M{"balance": 300},
433 | }, {
434 | C: "people",
435 | Id: "joe",
436 | Insert: M{"accounts": []int64{0, 1}},
437 | }}
438 | id := bson.NewObjectId()
439 | err := s.runner.Run(ops, id, nil)
440 | c.Assert(err, IsNil)
441 |
442 | type IdList []interface{}
443 | type Log struct {
444 | Docs IdList "d"
445 | Revnos []int64 "r"
446 | }
447 | var m map[string]*Log
448 | err = chglog.FindId(id).One(&m)
449 | c.Assert(err, IsNil)
450 |
451 | c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}})
452 | c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}})
453 | c.Assert(m["debts"], IsNil)
454 |
455 | ops = []txn.Op{{
456 | C: "accounts",
457 | Id: 0,
458 | Update: M{"$inc": M{"balance": 100}},
459 | }, {
460 | C: "accounts",
461 | Id: 1,
462 | Update: M{"$inc": M{"balance": 100}},
463 | }}
464 | id = bson.NewObjectId()
465 | err = s.runner.Run(ops, id, nil)
466 | c.Assert(err, IsNil)
467 |
468 | m = nil
469 | err = chglog.FindId(id).One(&m)
470 | c.Assert(err, IsNil)
471 |
472 | c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}})
473 | c.Assert(m["people"], IsNil)
474 |
475 | ops = []txn.Op{{
476 | C: "accounts",
477 | Id: 0,
478 | Remove: true,
479 | }, {
480 | C: "people",
481 | Id: "joe",
482 | Remove: true,
483 | }}
484 | id = bson.NewObjectId()
485 | err = s.runner.Run(ops, id, nil)
486 | c.Assert(err, IsNil)
487 |
488 | m = nil
489 | err = chglog.FindId(id).One(&m)
490 | c.Assert(err, IsNil)
491 |
492 | c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}})
493 | c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}})
494 | }
495 |
496 | func (s *S) TestPurgeMissing(c *C) {
497 | txn.SetChaos(txn.Chaos{
498 | KillChance: 1,
499 | Breakpoint: "set-applying",
500 | })
501 |
502 | err := s.accounts.Insert(M{"_id": 0, "balance": 100})
503 | c.Assert(err, IsNil)
504 | err = s.accounts.Insert(M{"_id": 1, "balance": 100})
505 | c.Assert(err, IsNil)
506 |
507 | ops1 := []txn.Op{{
508 | C: "accounts",
509 | Id: 3,
510 | Insert: M{"balance": 100},
511 | }}
512 |
513 | ops2 := []txn.Op{{
514 | C: "accounts",
515 | Id: 0,
516 | Remove: true,
517 | }, {
518 | C: "accounts",
519 | Id: 1,
520 | Update: M{"$inc": M{"balance": 100}},
521 | }, {
522 | C: "accounts",
523 | Id: 2,
524 | Insert: M{"balance": 100},
525 | }}
526 |
527 | first := bson.NewObjectId()
528 | c.Logf("---- Running ops1 under transaction %q, to be canceled by chaos", first.Hex())
529 | err = s.runner.Run(ops1, first, nil)
530 | c.Assert(err, Equals, txn.ErrChaos)
531 |
532 | last := bson.NewObjectId()
533 | c.Logf("---- Running ops2 under transaction %q, to be canceled by chaos", last.Hex())
534 | err = s.runner.Run(ops2, last, nil)
535 | c.Assert(err, Equals, txn.ErrChaos)
536 |
537 | c.Logf("---- Removing transaction %q", last.Hex())
538 | err = s.tc.RemoveId(last)
539 | c.Assert(err, IsNil)
540 |
541 | c.Logf("---- Disabling chaos and attempting to resume all")
542 | txn.SetChaos(txn.Chaos{})
543 | err = s.runner.ResumeAll()
544 | c.Assert(err, IsNil)
545 |
546 | again := bson.NewObjectId()
547 | c.Logf("---- Running ops2 again under transaction %q, to fail for missing transaction", again.Hex())
548 | err = s.runner.Run(ops2, again, nil)
549 | c.Assert(err, ErrorMatches, "cannot find transaction .*")
550 |
551 | c.Logf("---- Purging missing transactions")
552 | err = s.runner.PurgeMissing("accounts")
553 | c.Assert(err, IsNil)
554 |
555 | c.Logf("---- Resuming pending transactions")
556 | err = s.runner.ResumeAll()
557 | c.Assert(err, IsNil)
558 |
559 | expect := []struct{ Id, Balance int }{
560 | {0, -1},
561 | {1, 200},
562 | {2, 100},
563 | {3, 100},
564 | }
565 | var got Account
566 | for _, want := range expect {
567 | err = s.accounts.FindId(want.Id).One(&got)
568 | if want.Balance == -1 {
569 | if err != mgo.ErrNotFound {
570 | c.Errorf("Account %d should not exist, find got err=%#v", err)
571 | }
572 | } else if err != nil {
573 | c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance)
574 | } else if got.Balance != want.Balance {
575 | c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance)
576 | }
577 | }
578 | }
579 |
580 | func (s *S) TestTxnQueueStashStressTest(c *C) {
581 | txn.SetChaos(txn.Chaos{
582 | SlowdownChance: 0.3,
583 | Slowdown: 50 * time.Millisecond,
584 | })
585 | defer txn.SetChaos(txn.Chaos{})
586 |
587 | // So we can run more iterations of the test in less time.
588 | txn.SetDebug(false)
589 |
590 | const runners = 10
591 | const inserts = 10
592 | const repeat = 100
593 |
594 | for r := 0; r < repeat; r++ {
595 | var wg sync.WaitGroup
596 | wg.Add(runners)
597 | for i := 0; i < runners; i++ {
598 | go func(i, r int) {
599 | defer wg.Done()
600 |
601 | session := s.session.New()
602 | defer session.Close()
603 | runner := txn.NewRunner(s.tc.With(session))
604 |
605 | for j := 0; j < inserts; j++ {
606 | ops := []txn.Op{{
607 | C: "accounts",
608 | Id: fmt.Sprintf("insert-%d-%d", r, j),
609 | Insert: bson.M{
610 | "added-by": i,
611 | },
612 | }}
613 | err := runner.Run(ops, "", nil)
614 | if err != txn.ErrAborted {
615 | c.Check(err, IsNil)
616 | }
617 | }
618 | }(i, r)
619 | }
620 | wg.Wait()
621 | }
622 | }
623 |
624 | func (s *S) TestPurgeMissingPipelineSizeLimit(c *C) {
625 | // This test ensures that PurgeMissing can handle very large
626 | // txn-queue fields. Previous iterations of PurgeMissing would
627 | // trigger a 16MB aggregation pipeline result size limit when run
628 | // against a documents or stashes with large numbers of txn-queue
629 | // entries. PurgeMissing now no longer uses aggregation pipelines
630 | // to work around this limit.
631 |
632 | // The pipeline result size limitation was removed from MongoDB in 2.6 so
633 | // this test is only run for older MongoDB version.
634 | build, err := s.session.BuildInfo()
635 | c.Assert(err, IsNil)
636 | if build.VersionAtLeast(2, 6) {
637 | c.Skip("This tests a problem that can only happen with MongoDB < 2.6 ")
638 | }
639 |
640 | // Insert a single document to work with.
641 | err = s.accounts.Insert(M{"_id": 0, "balance": 100})
642 | c.Assert(err, IsNil)
643 |
644 | ops := []txn.Op{{
645 | C: "accounts",
646 | Id: 0,
647 | Update: M{"$inc": M{"balance": 100}},
648 | }}
649 |
650 | // Generate one successful transaction.
651 | good := bson.NewObjectId()
652 | c.Logf("---- Running ops under transaction %q", good.Hex())
653 | err = s.runner.Run(ops, good, nil)
654 | c.Assert(err, IsNil)
655 |
656 | // Generate another transaction which which will go missing.
657 | missing := bson.NewObjectId()
658 | c.Logf("---- Running ops under transaction %q (which will go missing)", missing.Hex())
659 | err = s.runner.Run(ops, missing, nil)
660 | c.Assert(err, IsNil)
661 |
662 | err = s.tc.RemoveId(missing)
663 | c.Assert(err, IsNil)
664 |
665 | // Generate a txn-queue on the test document that's large enough
666 | // that it used to cause PurgeMissing to exceed MongoDB's pipeline
667 | // result 16MB size limit (MongoDB 2.4 and older only).
668 | //
669 | // The contents of the txn-queue field doesn't matter, only that
670 | // it's big enough to trigger the size limit. The required size
671 | // can also be achieved by using multiple documents as long as the
672 | // cumulative size of all the txn-queue fields exceeds the
673 | // pipeline limit. A single document is easier to work with for
674 | // this test however.
675 | //
676 | // The txn id of the successful transaction is used fill the
677 | // txn-queue because this takes advantage of a short circuit in
678 | // PurgeMissing, dramatically speeding up the test run time.
679 | const fakeQueueLen = 250000
680 | fakeTxnQueue := make([]string, fakeQueueLen)
681 | token := good.Hex() + "_12345678" // txn id + nonce
682 | for i := 0; i < fakeQueueLen; i++ {
683 | fakeTxnQueue[i] = token
684 | }
685 |
686 | err = s.accounts.UpdateId(0, bson.M{
687 | "$set": bson.M{"txn-queue": fakeTxnQueue},
688 | })
689 | c.Assert(err, IsNil)
690 |
691 | // PurgeMissing could hit the same pipeline result size limit when
692 | // processing the txn-queue fields of stash documents so insert
693 | // the large txn-queue there too to ensure that no longer happens.
694 | err = s.sc.Insert(
695 | bson.D{{"c", "accounts"}, {"id", 0}},
696 | bson.M{"txn-queue": fakeTxnQueue},
697 | )
698 | c.Assert(err, IsNil)
699 |
700 | c.Logf("---- Purging missing transactions")
701 | err = s.runner.PurgeMissing("accounts")
702 | c.Assert(err, IsNil)
703 | }
704 |
705 | var flaky = flag.Bool("flaky", false, "Include flaky tests")
706 |
707 | func (s *S) TestTxnQueueStressTest(c *C) {
708 | // This fails about 20% of the time on Mongo 3.2 (I haven't tried
709 | // other versions) with account balance being 3999 instead of
710 | // 4000. That implies that some updates are being lost. This is
711 | // bad and we'll need to chase it down in the near future - the
712 | // only reason it's being skipped now is that it's already failing
713 | // and it's better to have the txn tests running without this one
714 | // than to have them not running at all.
715 | if !*flaky {
716 | c.Skip("Fails intermittently - disabling until fixed")
717 | }
718 | txn.SetChaos(txn.Chaos{
719 | SlowdownChance: 0.3,
720 | Slowdown: 50 * time.Millisecond,
721 | })
722 | defer txn.SetChaos(txn.Chaos{})
723 |
724 | // So we can run more iterations of the test in less time.
725 | txn.SetDebug(false)
726 |
727 | err := s.accounts.Insert(M{"_id": 0, "balance": 0}, M{"_id": 1, "balance": 0})
728 | c.Assert(err, IsNil)
729 |
730 | // Run half of the operations changing account 0 and then 1,
731 | // and the other half in the opposite order.
732 | ops01 := []txn.Op{{
733 | C: "accounts",
734 | Id: 0,
735 | Update: M{"$inc": M{"balance": 1}},
736 | }, {
737 | C: "accounts",
738 | Id: 1,
739 | Update: M{"$inc": M{"balance": 1}},
740 | }}
741 |
742 | ops10 := []txn.Op{{
743 | C: "accounts",
744 | Id: 1,
745 | Update: M{"$inc": M{"balance": 1}},
746 | }, {
747 | C: "accounts",
748 | Id: 0,
749 | Update: M{"$inc": M{"balance": 1}},
750 | }}
751 |
752 | ops := [][]txn.Op{ops01, ops10}
753 |
754 | const runners = 4
755 | const changes = 1000
756 |
757 | var wg sync.WaitGroup
758 | wg.Add(runners)
759 | for n := 0; n < runners; n++ {
760 | n := n
761 | go func() {
762 | defer wg.Done()
763 | for i := 0; i < changes; i++ {
764 | err = s.runner.Run(ops[n%2], "", nil)
765 | c.Assert(err, IsNil)
766 | }
767 | }()
768 | }
769 | wg.Wait()
770 |
771 | for id := 0; id < 2; id++ {
772 | var account Account
773 | err = s.accounts.FindId(id).One(&account)
774 | if account.Balance != runners*changes {
775 | c.Errorf("Account should have balance of %d, got %d", runners*changes, account.Balance)
776 | }
777 | }
778 | }
779 |
--------------------------------------------------------------------------------
/txn/flusher.go:
--------------------------------------------------------------------------------
1 | package txn
2 |
3 | import (
4 | "fmt"
5 |
6 | "gopkg.in/mgo.v2"
7 | "gopkg.in/mgo.v2/bson"
8 | )
9 |
10 | func flush(r *Runner, t *transaction) error {
11 | f := &flusher{
12 | Runner: r,
13 | goal: t,
14 | goalKeys: make(map[docKey]bool),
15 | queue: make(map[docKey][]token),
16 | debugId: debugPrefix(),
17 | }
18 | for _, dkey := range f.goal.docKeys() {
19 | f.goalKeys[dkey] = true
20 | }
21 | return f.run()
22 | }
23 |
24 | type flusher struct {
25 | *Runner
26 | goal *transaction
27 | goalKeys map[docKey]bool
28 | queue map[docKey][]token
29 | debugId string
30 | }
31 |
32 | func (f *flusher) run() (err error) {
33 | if chaosEnabled {
34 | defer f.handleChaos(&err)
35 | }
36 |
37 | f.debugf("Processing %s", f.goal)
38 | seen := make(map[bson.ObjectId]*transaction)
39 | if err := f.recurse(f.goal, seen); err != nil {
40 | return err
41 | }
42 | if f.goal.done() {
43 | return nil
44 | }
45 |
46 | // Sparse workloads will generally be managed entirely by recurse.
47 | // Getting here means one or more transactions have dependencies
48 | // and perhaps cycles.
49 |
50 | // Build successors data for Tarjan's sort. Must consider
51 | // that entries in txn-queue are not necessarily valid.
52 | successors := make(map[bson.ObjectId][]bson.ObjectId)
53 | ready := true
54 | for _, dqueue := range f.queue {
55 | NextPair:
56 | for i := 0; i < len(dqueue); i++ {
57 | pred := dqueue[i]
58 | predid := pred.id()
59 | predt := seen[predid]
60 | if predt == nil || predt.Nonce != pred.nonce() {
61 | continue
62 | }
63 | predsuccids, ok := successors[predid]
64 | if !ok {
65 | successors[predid] = nil
66 | }
67 |
68 | for j := i + 1; j < len(dqueue); j++ {
69 | succ := dqueue[j]
70 | succid := succ.id()
71 | succt := seen[succid]
72 | if succt == nil || succt.Nonce != succ.nonce() {
73 | continue
74 | }
75 | if _, ok := successors[succid]; !ok {
76 | successors[succid] = nil
77 | }
78 |
79 | // Found a valid pred/succ pair.
80 | i = j - 1
81 | for _, predsuccid := range predsuccids {
82 | if predsuccid == succid {
83 | continue NextPair
84 | }
85 | }
86 | successors[predid] = append(predsuccids, succid)
87 | if succid == f.goal.Id {
88 | // There are still pre-requisites to handle.
89 | ready = false
90 | }
91 | continue NextPair
92 | }
93 | }
94 | }
95 | f.debugf("Queues: %v", f.queue)
96 | f.debugf("Successors: %v", successors)
97 | if ready {
98 | f.debugf("Goal %s has no real pre-requisites", f.goal)
99 | return f.advance(f.goal, nil, true)
100 | }
101 |
102 | // Robert Tarjan's algorithm for detecting strongly-connected
103 | // components is used for topological sorting and detecting
104 | // cycles at once. The order in which transactions are applied
105 | // in commonly affected documents must be a global agreement.
106 | sorted := tarjanSort(successors)
107 | if debugEnabled {
108 | f.debugf("Tarjan output: %v", sorted)
109 | }
110 | pull := make(map[bson.ObjectId]*transaction)
111 | for i := len(sorted) - 1; i >= 0; i-- {
112 | scc := sorted[i]
113 | f.debugf("Flushing %v", scc)
114 | if len(scc) == 1 {
115 | pull[scc[0]] = seen[scc[0]]
116 | }
117 | for _, id := range scc {
118 | if err := f.advance(seen[id], pull, true); err != nil {
119 | return err
120 | }
121 | }
122 | if len(scc) > 1 {
123 | for _, id := range scc {
124 | pull[id] = seen[id]
125 | }
126 | }
127 | }
128 | return nil
129 | }
130 |
131 | func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction) error {
132 | seen[t.Id] = t
133 | err := f.advance(t, nil, false)
134 | if err != errPreReqs {
135 | return err
136 | }
137 | for _, dkey := range t.docKeys() {
138 | for _, dtt := range f.queue[dkey] {
139 | id := dtt.id()
140 | if seen[id] != nil {
141 | continue
142 | }
143 | qt, err := f.load(id)
144 | if err != nil {
145 | return err
146 | }
147 | err = f.recurse(qt, seen)
148 | if err != nil {
149 | return err
150 | }
151 | }
152 | }
153 | return nil
154 | }
155 |
156 | func (f *flusher) advance(t *transaction, pull map[bson.ObjectId]*transaction, force bool) error {
157 | for {
158 | switch t.State {
159 | case tpreparing, tprepared:
160 | revnos, err := f.prepare(t, force)
161 | if err != nil {
162 | return err
163 | }
164 | if t.State != tprepared {
165 | continue
166 | }
167 | if err = f.assert(t, revnos, pull); err != nil {
168 | return err
169 | }
170 | if t.State != tprepared {
171 | continue
172 | }
173 | if err = f.checkpoint(t, revnos); err != nil {
174 | return err
175 | }
176 | case tapplying:
177 | return f.apply(t, pull)
178 | case taborting:
179 | return f.abortOrReload(t, nil, pull)
180 | case tapplied, taborted:
181 | return nil
182 | default:
183 | panic(fmt.Errorf("transaction in unknown state: %q", t.State))
184 | }
185 | }
186 | panic("unreachable")
187 | }
188 |
189 | type stash string
190 |
191 | const (
192 | stashStable stash = ""
193 | stashInsert stash = "insert"
194 | stashRemove stash = "remove"
195 | )
196 |
197 | type txnInfo struct {
198 | Queue []token `bson:"txn-queue"`
199 | Revno int64 `bson:"txn-revno,omitempty"`
200 | Insert bson.ObjectId `bson:"txn-insert,omitempty"`
201 | Remove bson.ObjectId `bson:"txn-remove,omitempty"`
202 | }
203 |
204 | type stashState string
205 |
206 | const (
207 | stashNew stashState = ""
208 | stashInserting stashState = "inserting"
209 | )
210 |
211 | var txnFields = bson.D{{"txn-queue", 1}, {"txn-revno", 1}, {"txn-remove", 1}, {"txn-insert", 1}}
212 |
213 | var errPreReqs = fmt.Errorf("transaction has pre-requisites and force is false")
214 |
215 | // prepare injects t's id onto txn-queue for all affected documents
216 | // and collects the current txn-queue and txn-revno values during
217 | // the process. If the prepared txn-queue indicates that there are
218 | // pre-requisite transactions to be applied and the force parameter
219 | // is false, errPreReqs will be returned. Otherwise, the current
220 | // tip revision numbers for all the documents are returned.
221 | func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error) {
222 | if t.State != tpreparing {
223 | return f.rescan(t, force)
224 | }
225 | f.debugf("Preparing %s", t)
226 |
227 | // dkeys being sorted means stable iteration across all runners. This
228 | // isn't strictly required, but reduces the chances of cycles.
229 | dkeys := t.docKeys()
230 |
231 | revno := make(map[docKey]int64)
232 | info := txnInfo{}
233 | tt := tokenFor(t)
234 | NextDoc:
235 | for _, dkey := range dkeys {
236 | change := mgo.Change{
237 | Update: bson.D{{"$addToSet", bson.D{{"txn-queue", tt}}}},
238 | ReturnNew: true,
239 | }
240 | c := f.tc.Database.C(dkey.C)
241 | cquery := c.FindId(dkey.Id).Select(txnFields)
242 |
243 | RetryDoc:
244 | change.Upsert = false
245 | chaos("")
246 | if _, err := cquery.Apply(change, &info); err == nil {
247 | if info.Remove == "" {
248 | // Fast path, unless workload is insert/remove heavy.
249 | revno[dkey] = info.Revno
250 | f.queue[dkey] = info.Queue
251 | f.debugf("[A] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue)
252 | continue NextDoc
253 | } else {
254 | // Handle remove in progress before preparing it.
255 | if err := f.loadAndApply(info.Remove); err != nil {
256 | return nil, err
257 | }
258 | goto RetryDoc
259 | }
260 | } else if err != mgo.ErrNotFound {
261 | return nil, err
262 | }
263 |
264 | // Document missing. Use stash collection.
265 | change.Upsert = true
266 | chaos("")
267 | _, err := f.sc.FindId(dkey).Apply(change, &info)
268 | if err != nil {
269 | return nil, err
270 | }
271 | if info.Insert != "" {
272 | // Handle insert in progress before preparing it.
273 | if err := f.loadAndApply(info.Insert); err != nil {
274 | return nil, err
275 | }
276 | goto RetryDoc
277 | }
278 |
279 | // Must confirm stash is still in use and is the same one
280 | // prepared, since applying a remove overwrites the stash.
281 | docFound := false
282 | stashFound := false
283 | if err = c.FindId(dkey.Id).Select(txnFields).One(&info); err == nil {
284 | docFound = true
285 | } else if err != mgo.ErrNotFound {
286 | return nil, err
287 | } else if err = f.sc.FindId(dkey).One(&info); err == nil {
288 | stashFound = true
289 | if info.Revno == 0 {
290 | // Missing revno in the stash only happens when it
291 | // has been upserted, in which case it defaults to -1.
292 | // Txn-inserted documents get revno -1 while in the stash
293 | // for the first time, and -revno-1 == 2 when they go live.
294 | info.Revno = -1
295 | }
296 | } else if err != mgo.ErrNotFound {
297 | return nil, err
298 | }
299 |
300 | if docFound && info.Remove == "" || stashFound && info.Insert == "" {
301 | for _, dtt := range info.Queue {
302 | if dtt != tt {
303 | continue
304 | }
305 | // Found tt properly prepared.
306 | if stashFound {
307 | f.debugf("[B] Prepared document %v on stash with revno %d and queue: %v", dkey, info.Revno, info.Queue)
308 | } else {
309 | f.debugf("[B] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue)
310 | }
311 | revno[dkey] = info.Revno
312 | f.queue[dkey] = info.Queue
313 | continue NextDoc
314 | }
315 | }
316 |
317 | // The stash wasn't valid and tt got overwritten. Try again.
318 | f.unstashToken(tt, dkey)
319 | goto RetryDoc
320 | }
321 |
322 | // Save the prepared nonce onto t.
323 | nonce := tt.nonce()
324 | qdoc := bson.D{{"_id", t.Id}, {"s", tpreparing}}
325 | udoc := bson.D{{"$set", bson.D{{"s", tprepared}, {"n", nonce}}}}
326 | chaos("set-prepared")
327 | err = f.tc.Update(qdoc, udoc)
328 | if err == nil {
329 | t.State = tprepared
330 | t.Nonce = nonce
331 | } else if err == mgo.ErrNotFound {
332 | f.debugf("Can't save nonce of %s: LOST RACE", tt)
333 | if err := f.reload(t); err != nil {
334 | return nil, err
335 | } else if t.State == tpreparing {
336 | panic("can't save nonce yet transaction is still preparing")
337 | } else if t.State != tprepared {
338 | return t.Revnos, nil
339 | }
340 | tt = t.token()
341 | } else if err != nil {
342 | return nil, err
343 | }
344 |
345 | prereqs, found := f.hasPreReqs(tt, dkeys)
346 | if !found {
347 | // Must only happen when reloading above.
348 | return f.rescan(t, force)
349 | } else if prereqs && !force {
350 | f.debugf("Prepared queue with %s [has prereqs & not forced].", tt)
351 | return nil, errPreReqs
352 | }
353 | revnos = assembledRevnos(t.Ops, revno)
354 | if !prereqs {
355 | f.debugf("Prepared queue with %s [no prereqs]. Revnos: %v", tt, revnos)
356 | } else {
357 | f.debugf("Prepared queue with %s [forced] Revnos: %v", tt, revnos)
358 | }
359 | return revnos, nil
360 | }
361 |
362 | func (f *flusher) unstashToken(tt token, dkey docKey) error {
363 | qdoc := bson.D{{"_id", dkey}, {"txn-queue", tt}}
364 | udoc := bson.D{{"$pull", bson.D{{"txn-queue", tt}}}}
365 | chaos("")
366 | if err := f.sc.Update(qdoc, udoc); err == nil {
367 | chaos("")
368 | err = f.sc.Remove(bson.D{{"_id", dkey}, {"txn-queue", bson.D{}}})
369 | } else if err != mgo.ErrNotFound {
370 | return err
371 | }
372 | return nil
373 | }
374 |
375 | func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) {
376 | f.debugf("Rescanning %s", t)
377 | if t.State != tprepared {
378 | panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State))
379 | }
380 |
381 | // dkeys being sorted means stable iteration across all
382 | // runners. This isn't strictly required, but reduces the chances
383 | // of cycles.
384 | dkeys := t.docKeys()
385 |
386 | tt := t.token()
387 | if !force {
388 | prereqs, found := f.hasPreReqs(tt, dkeys)
389 | if found && prereqs {
390 | // Its state is already known.
391 | return nil, errPreReqs
392 | }
393 | }
394 |
395 | revno := make(map[docKey]int64)
396 | info := txnInfo{}
397 | for _, dkey := range dkeys {
398 | const retries = 3
399 | retry := -1
400 |
401 | RetryDoc:
402 | retry++
403 | c := f.tc.Database.C(dkey.C)
404 | if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound {
405 | // Document is missing. Look in stash.
406 | chaos("")
407 | if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound {
408 | // Stash also doesn't exist. Maybe someone applied it.
409 | if err := f.reload(t); err != nil {
410 | return nil, err
411 | } else if t.State != tprepared {
412 | return t.Revnos, err
413 | }
414 | // Not applying either.
415 | if retry < retries {
416 | // Retry since there might be an insert/remove race.
417 | goto RetryDoc
418 | }
419 | // Neither the doc nor the stash seem to exist.
420 | return nil, fmt.Errorf("cannot find document %v for applying transaction %s", dkey, t)
421 | } else if err != nil {
422 | return nil, err
423 | }
424 | // Stash found.
425 | if info.Insert != "" {
426 | // Handle insert in progress before assuming ordering is good.
427 | if err := f.loadAndApply(info.Insert); err != nil {
428 | return nil, err
429 | }
430 | goto RetryDoc
431 | }
432 | if info.Revno == 0 {
433 | // Missing revno in the stash means -1.
434 | info.Revno = -1
435 | }
436 | } else if err != nil {
437 | return nil, err
438 | } else if info.Remove != "" {
439 | // Handle remove in progress before assuming ordering is good.
440 | if err := f.loadAndApply(info.Remove); err != nil {
441 | return nil, err
442 | }
443 | goto RetryDoc
444 | }
445 | revno[dkey] = info.Revno
446 |
447 | found := false
448 | for _, id := range info.Queue {
449 | if id == tt {
450 | found = true
451 | break
452 | }
453 | }
454 | f.queue[dkey] = info.Queue
455 | if !found {
456 | // Rescanned transaction id was not in the queue. This could mean one
457 | // of three things:
458 | // 1) The transaction was applied and popped by someone else. This is
459 | // the common case.
460 | // 2) We've read an out-of-date queue from the stash. This can happen
461 | // when someone else was paused for a long while preparing another
462 | // transaction for this document, and improperly upserted to the
463 | // stash when unpaused (after someone else inserted the document).
464 | // This is rare but possible.
465 | // 3) There's an actual bug somewhere, or outside interference. Worst
466 | // possible case.
467 | f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue)
468 | err := f.reload(t)
469 | if t.State == tpreparing || t.State == tprepared {
470 | if retry < retries {
471 | // Case 2.
472 | goto RetryDoc
473 | }
474 | // Case 3.
475 | return nil, fmt.Errorf("cannot find transaction %s in queue for document %v", t, dkey)
476 | }
477 | // Case 1.
478 | return t.Revnos, err
479 | }
480 | }
481 |
482 | prereqs, found := f.hasPreReqs(tt, dkeys)
483 | if !found {
484 | panic("rescanning loop guarantees that this can't happen")
485 | } else if prereqs && !force {
486 | f.debugf("Rescanned queue with %s: has prereqs, not forced", tt)
487 | return nil, errPreReqs
488 | }
489 | revnos = assembledRevnos(t.Ops, revno)
490 | if !prereqs {
491 | f.debugf("Rescanned queue with %s: no prereqs, revnos: %v", tt, revnos)
492 | } else {
493 | f.debugf("Rescanned queue with %s: has prereqs, forced, revnos: %v", tt, revnos)
494 | }
495 | return revnos, nil
496 | }
497 |
498 | func assembledRevnos(ops []Op, revno map[docKey]int64) []int64 {
499 | revnos := make([]int64, len(ops))
500 | for i, op := range ops {
501 | dkey := op.docKey()
502 | revnos[i] = revno[dkey]
503 | drevno := revno[dkey]
504 | switch {
505 | case op.Insert != nil && drevno < 0:
506 | revno[dkey] = -drevno + 1
507 | case op.Update != nil && drevno >= 0:
508 | revno[dkey] = drevno + 1
509 | case op.Remove && drevno >= 0:
510 | revno[dkey] = -drevno - 1
511 | }
512 | }
513 | return revnos
514 | }
515 |
516 | func (f *flusher) hasPreReqs(tt token, dkeys docKeys) (prereqs, found bool) {
517 | found = true
518 | NextDoc:
519 | for _, dkey := range dkeys {
520 | for _, dtt := range f.queue[dkey] {
521 | if dtt == tt {
522 | continue NextDoc
523 | } else if dtt.id() != tt.id() {
524 | prereqs = true
525 | }
526 | }
527 | found = false
528 | }
529 | return
530 | }
531 |
532 | func (f *flusher) reload(t *transaction) error {
533 | var newt transaction
534 | query := f.tc.FindId(t.Id)
535 | query.Select(bson.D{{"s", 1}, {"n", 1}, {"r", 1}})
536 | if err := query.One(&newt); err != nil {
537 | return fmt.Errorf("failed to reload transaction: %v", err)
538 | }
539 | t.State = newt.State
540 | t.Nonce = newt.Nonce
541 | t.Revnos = newt.Revnos
542 | f.debugf("Reloaded %s: %q", t, t.State)
543 | return nil
544 | }
545 |
546 | func (f *flusher) loadAndApply(id bson.ObjectId) error {
547 | t, err := f.load(id)
548 | if err != nil {
549 | return err
550 | }
551 | return f.advance(t, nil, true)
552 | }
553 |
554 | // assert verifies that all assertions in t match the content that t
555 | // will be applied upon. If an assertion fails, the transaction state
556 | // is changed to aborted.
557 | func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) error {
558 | f.debugf("Asserting %s with revnos %v", t, revnos)
559 | if t.State != tprepared {
560 | panic(fmt.Errorf("asserting transaction in invalid state: %q", t.State))
561 | }
562 | qdoc := make(bson.D, 3)
563 | revno := make(map[docKey]int64)
564 | for i, op := range t.Ops {
565 | dkey := op.docKey()
566 | if _, ok := revno[dkey]; !ok {
567 | revno[dkey] = revnos[i]
568 | }
569 | if op.Assert == nil {
570 | continue
571 | }
572 | if op.Assert == DocMissing {
573 | if revnos[i] >= 0 {
574 | return f.abortOrReload(t, revnos, pull)
575 | }
576 | continue
577 | }
578 | if op.Insert != nil {
579 | return fmt.Errorf("Insert can only Assert txn.DocMissing", op.Assert)
580 | }
581 | // if revnos[i] < 0 { abort }?
582 |
583 | qdoc = append(qdoc[:0], bson.DocElem{"_id", op.Id})
584 | if op.Assert != DocMissing {
585 | var revnoq interface{}
586 | if n := revno[dkey]; n == 0 {
587 | revnoq = bson.D{{"$exists", false}}
588 | } else {
589 | revnoq = n
590 | }
591 | // XXX Add tt to the query here, once we're sure it's all working.
592 | // Not having it increases the chances of breaking on bad logic.
593 | qdoc = append(qdoc, bson.DocElem{"txn-revno", revnoq})
594 | if op.Assert != DocExists {
595 | qdoc = append(qdoc, bson.DocElem{"$or", []interface{}{op.Assert}})
596 | }
597 | }
598 |
599 | c := f.tc.Database.C(op.C)
600 | if err := c.Find(qdoc).Select(bson.D{{"_id", 1}}).One(nil); err == mgo.ErrNotFound {
601 | // Assertion failed or someone else started applying.
602 | return f.abortOrReload(t, revnos, pull)
603 | } else if err != nil {
604 | return err
605 | }
606 | }
607 | f.debugf("Asserting %s succeeded", t)
608 | return nil
609 | }
610 |
611 | func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) (err error) {
612 | f.debugf("Aborting or reloading %s (was %q)", t, t.State)
613 | if t.State == tprepared {
614 | qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}}
615 | udoc := bson.D{{"$set", bson.D{{"s", taborting}}}}
616 | chaos("set-aborting")
617 | if err = f.tc.Update(qdoc, udoc); err == nil {
618 | t.State = taborting
619 | } else if err == mgo.ErrNotFound {
620 | if err = f.reload(t); err != nil || t.State != taborting {
621 | f.debugf("Won't abort %s. Reloaded state: %q", t, t.State)
622 | return err
623 | }
624 | } else {
625 | return err
626 | }
627 | } else if t.State != taborting {
628 | panic(fmt.Errorf("aborting transaction in invalid state: %q", t.State))
629 | }
630 |
631 | if len(revnos) > 0 {
632 | if pull == nil {
633 | pull = map[bson.ObjectId]*transaction{t.Id: t}
634 | }
635 | seen := make(map[docKey]bool)
636 | for i, op := range t.Ops {
637 | dkey := op.docKey()
638 | if seen[op.docKey()] {
639 | continue
640 | }
641 | seen[dkey] = true
642 |
643 | pullAll := tokensToPull(f.queue[dkey], pull, "")
644 | if len(pullAll) == 0 {
645 | continue
646 | }
647 | udoc := bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}}
648 | chaos("")
649 | if revnos[i] < 0 {
650 | err = f.sc.UpdateId(dkey, udoc)
651 | } else {
652 | c := f.tc.Database.C(dkey.C)
653 | err = c.UpdateId(dkey.Id, udoc)
654 | }
655 | if err != nil && err != mgo.ErrNotFound {
656 | return err
657 | }
658 | }
659 | }
660 | udoc := bson.D{{"$set", bson.D{{"s", taborted}}}}
661 | chaos("set-aborted")
662 | if err := f.tc.UpdateId(t.Id, udoc); err != nil && err != mgo.ErrNotFound {
663 | return err
664 | }
665 | t.State = taborted
666 | f.debugf("Aborted %s", t)
667 | return nil
668 | }
669 |
670 | func (f *flusher) checkpoint(t *transaction, revnos []int64) error {
671 | var debugRevnos map[docKey][]int64
672 | if debugEnabled {
673 | debugRevnos = make(map[docKey][]int64)
674 | for i, op := range t.Ops {
675 | dkey := op.docKey()
676 | debugRevnos[dkey] = append(debugRevnos[dkey], revnos[i])
677 | }
678 | f.debugf("Ready to apply %s. Saving revnos %v", t, debugRevnos)
679 | }
680 |
681 | // Save in t the txn-revno values the transaction must run on.
682 | qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}}
683 | udoc := bson.D{{"$set", bson.D{{"s", tapplying}, {"r", revnos}}}}
684 | chaos("set-applying")
685 | err := f.tc.Update(qdoc, udoc)
686 | if err == nil {
687 | t.State = tapplying
688 | t.Revnos = revnos
689 | f.debugf("Ready to apply %s. Saving revnos %v: DONE", t, debugRevnos)
690 | } else if err == mgo.ErrNotFound {
691 | f.debugf("Ready to apply %s. Saving revnos %v: LOST RACE", t, debugRevnos)
692 | return f.reload(t)
693 | }
694 | return nil
695 | }
696 |
697 | func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) error {
698 | f.debugf("Applying transaction %s", t)
699 | if t.State != tapplying {
700 | panic(fmt.Errorf("applying transaction in invalid state: %q", t.State))
701 | }
702 | if pull == nil {
703 | pull = map[bson.ObjectId]*transaction{t.Id: t}
704 | }
705 |
706 | logRevnos := append([]int64(nil), t.Revnos...)
707 | logDoc := bson.D{{"_id", t.Id}}
708 |
709 | tt := tokenFor(t)
710 | for i := range t.Ops {
711 | op := &t.Ops[i]
712 | dkey := op.docKey()
713 | dqueue := f.queue[dkey]
714 | revno := t.Revnos[i]
715 |
716 | var opName string
717 | if debugEnabled {
718 | opName = op.name()
719 | f.debugf("Applying %s op %d (%s) on %v with txn-revno %d", t, i, opName, dkey, revno)
720 | }
721 |
722 | c := f.tc.Database.C(op.C)
723 |
724 | qdoc := bson.D{{"_id", dkey.Id}, {"txn-revno", revno}, {"txn-queue", tt}}
725 | if op.Insert != nil {
726 | qdoc[0].Value = dkey
727 | if revno == -1 {
728 | qdoc[1].Value = bson.D{{"$exists", false}}
729 | }
730 | } else if revno == 0 {
731 | // There's no document with revno 0. The only way to see it is
732 | // when an existent document participates in a transaction the
733 | // first time. Txn-inserted documents get revno -1 while in the
734 | // stash for the first time, and -revno-1 == 2 when they go live.
735 | qdoc[1].Value = bson.D{{"$exists", false}}
736 | }
737 |
738 | pullAll := tokensToPull(dqueue, pull, tt)
739 |
740 | var d bson.D
741 | var outcome string
742 | var err error
743 | switch {
744 | case op.Update != nil:
745 | if revno < 0 {
746 | err = mgo.ErrNotFound
747 | f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed")
748 | } else {
749 | newRevno := revno + 1
750 | logRevnos[i] = newRevno
751 | if d, err = objToDoc(op.Update); err != nil {
752 | return err
753 | }
754 | if d, err = addToDoc(d, "$pullAll", bson.D{{"txn-queue", pullAll}}); err != nil {
755 | return err
756 | }
757 | if d, err = addToDoc(d, "$set", bson.D{{"txn-revno", newRevno}}); err != nil {
758 | return err
759 | }
760 | chaos("")
761 | err = c.Update(qdoc, d)
762 | }
763 | case op.Remove:
764 | if revno < 0 {
765 | err = mgo.ErrNotFound
766 | } else {
767 | newRevno := -revno - 1
768 | logRevnos[i] = newRevno
769 | nonce := newNonce()
770 | stash := txnInfo{}
771 | change := mgo.Change{
772 | Update: bson.D{{"$push", bson.D{{"n", nonce}}}},
773 | Upsert: true,
774 | ReturnNew: true,
775 | }
776 | if _, err = f.sc.FindId(dkey).Apply(change, &stash); err != nil {
777 | return err
778 | }
779 | change = mgo.Change{
780 | Update: bson.D{{"$set", bson.D{{"txn-remove", t.Id}}}},
781 | ReturnNew: true,
782 | }
783 | var info txnInfo
784 | if _, err = c.Find(qdoc).Apply(change, &info); err == nil {
785 | // The document still exists so the stash previously
786 | // observed was either out of date or necessarily
787 | // contained the token being applied.
788 | f.debugf("Marked document %v to be removed on revno %d with queue: %v", dkey, info.Revno, info.Queue)
789 | updated := false
790 | if !hasToken(stash.Queue, tt) {
791 | var set, unset bson.D
792 | if revno == 0 {
793 | // Missing revno in stash means -1.
794 | set = bson.D{{"txn-queue", info.Queue}}
795 | unset = bson.D{{"n", 1}, {"txn-revno", 1}}
796 | } else {
797 | set = bson.D{{"txn-queue", info.Queue}, {"txn-revno", newRevno}}
798 | unset = bson.D{{"n", 1}}
799 | }
800 | qdoc := bson.D{{"_id", dkey}, {"n", nonce}}
801 | udoc := bson.D{{"$set", set}, {"$unset", unset}}
802 | if err = f.sc.Update(qdoc, udoc); err == nil {
803 | updated = true
804 | } else if err != mgo.ErrNotFound {
805 | return err
806 | }
807 | }
808 | if updated {
809 | f.debugf("Updated stash for document %v with revno %d and queue: %v", dkey, newRevno, info.Queue)
810 | } else {
811 | f.debugf("Stash for document %v was up-to-date", dkey)
812 | }
813 | err = c.Remove(qdoc)
814 | }
815 | }
816 | case op.Insert != nil:
817 | if revno >= 0 {
818 | err = mgo.ErrNotFound
819 | } else {
820 | newRevno := -revno + 1
821 | logRevnos[i] = newRevno
822 | if d, err = objToDoc(op.Insert); err != nil {
823 | return err
824 | }
825 | change := mgo.Change{
826 | Update: bson.D{{"$set", bson.D{{"txn-insert", t.Id}}}},
827 | ReturnNew: true,
828 | }
829 | chaos("")
830 | var info txnInfo
831 | if _, err = f.sc.Find(qdoc).Apply(change, &info); err == nil {
832 | f.debugf("Stash for document %v has revno %d and queue: %v", dkey, info.Revno, info.Queue)
833 | d = setInDoc(d, bson.D{{"_id", op.Id}, {"txn-revno", newRevno}, {"txn-queue", info.Queue}})
834 | // Unlikely yet unfortunate race in here if this gets seriously
835 | // delayed. If someone inserts+removes meanwhile, this will
836 | // reinsert, and there's no way to avoid that while keeping the
837 | // collection clean or compromising sharding. applyOps can solve
838 | // the former, but it can't shard (SERVER-1439).
839 | chaos("insert")
840 | err = c.Insert(d)
841 | if err == nil || mgo.IsDup(err) {
842 | if err == nil {
843 | f.debugf("New document %v inserted with revno %d and queue: %v", dkey, info.Revno, info.Queue)
844 | } else {
845 | f.debugf("Document %v already existed", dkey)
846 | }
847 | chaos("")
848 | if err = f.sc.Remove(qdoc); err == nil {
849 | f.debugf("Stash for document %v removed", dkey)
850 | }
851 | }
852 | }
853 | }
854 | case op.Assert != nil:
855 | // Pure assertion. No changes to apply.
856 | }
857 | if err == nil {
858 | outcome = "DONE"
859 | } else if err == mgo.ErrNotFound || mgo.IsDup(err) {
860 | outcome = "MISS"
861 | err = nil
862 | } else {
863 | outcome = err.Error()
864 | }
865 | if debugEnabled {
866 | f.debugf("Applying %s op %d (%s) on %v with txn-revno %d: %s", t, i, opName, dkey, revno, outcome)
867 | }
868 | if err != nil {
869 | return err
870 | }
871 |
872 | if f.lc != nil && op.isChange() {
873 | // Add change to the log document.
874 | var dr bson.D
875 | for li := range logDoc {
876 | elem := &logDoc[li]
877 | if elem.Name == op.C {
878 | dr = elem.Value.(bson.D)
879 | break
880 | }
881 | }
882 | if dr == nil {
883 | logDoc = append(logDoc, bson.DocElem{op.C, bson.D{{"d", []interface{}{}}, {"r", []int64{}}}})
884 | dr = logDoc[len(logDoc)-1].Value.(bson.D)
885 | }
886 | dr[0].Value = append(dr[0].Value.([]interface{}), op.Id)
887 | dr[1].Value = append(dr[1].Value.([]int64), logRevnos[i])
888 | }
889 | }
890 | t.State = tapplied
891 |
892 | if f.lc != nil {
893 | // Insert log document into the changelog collection.
894 | f.debugf("Inserting %s into change log", t)
895 | err := f.lc.Insert(logDoc)
896 | if err != nil && !mgo.IsDup(err) {
897 | return err
898 | }
899 | }
900 |
901 | // It's been applied, so errors are ignored here. It's fine for someone
902 | // else to win the race and mark it as applied, and it's also fine for
903 | // it to remain pending until a later point when someone will perceive
904 | // it has been applied and mark it at such.
905 | f.debugf("Marking %s as applied", t)
906 | chaos("set-applied")
907 | f.tc.Update(bson.D{{"_id", t.Id}, {"s", tapplying}}, bson.D{{"$set", bson.D{{"s", tapplied}}}})
908 | return nil
909 | }
910 |
911 | func tokensToPull(dqueue []token, pull map[bson.ObjectId]*transaction, dontPull token) []token {
912 | var result []token
913 | for j := len(dqueue) - 1; j >= 0; j-- {
914 | dtt := dqueue[j]
915 | if dtt == dontPull {
916 | continue
917 | }
918 | if _, ok := pull[dtt.id()]; ok {
919 | // It was handled before and this is a leftover invalid
920 | // nonce in the queue. Cherry-pick it out.
921 | result = append(result, dtt)
922 | }
923 | }
924 | return result
925 | }
926 |
927 | func objToDoc(obj interface{}) (d bson.D, err error) {
928 | data, err := bson.Marshal(obj)
929 | if err != nil {
930 | return nil, err
931 | }
932 | err = bson.Unmarshal(data, &d)
933 | if err != nil {
934 | return nil, err
935 | }
936 | return d, err
937 | }
938 |
939 | func addToDoc(doc bson.D, key string, add bson.D) (bson.D, error) {
940 | for i := range doc {
941 | elem := &doc[i]
942 | if elem.Name != key {
943 | continue
944 | }
945 | if old, ok := elem.Value.(bson.D); ok {
946 | elem.Value = append(old, add...)
947 | return doc, nil
948 | } else {
949 | return nil, fmt.Errorf("invalid %q value in change document: %#v", key, elem.Value)
950 | }
951 | }
952 | return append(doc, bson.DocElem{key, add}), nil
953 | }
954 |
955 | func setInDoc(doc bson.D, set bson.D) bson.D {
956 | dlen := len(doc)
957 | NextS:
958 | for s := range set {
959 | sname := set[s].Name
960 | for d := 0; d < dlen; d++ {
961 | if doc[d].Name == sname {
962 | doc[d].Value = set[s].Value
963 | continue NextS
964 | }
965 | }
966 | doc = append(doc, set[s])
967 | }
968 | return doc
969 | }
970 |
971 | func hasToken(tokens []token, tt token) bool {
972 | for _, ttt := range tokens {
973 | if ttt == tt {
974 | return true
975 | }
976 | }
977 | return false
978 | }
979 |
980 | func (f *flusher) debugf(format string, args ...interface{}) {
981 | if !debugEnabled {
982 | return
983 | }
984 | debugf(f.debugId+format, args...)
985 | }
986 |
--------------------------------------------------------------------------------