@@ -15,6 +15,7 @@ import (
15
15
16
16
"github.com/couchbase/sync_gateway/base"
17
17
"github.com/couchbase/sync_gateway/db"
18
+ "github.com/stretchr/testify/assert"
18
19
"github.com/stretchr/testify/require"
19
20
)
20
21
@@ -60,14 +61,122 @@ func waitForVersionAndBody(t *testing.T, dsName base.ScopeAndCollectionName, doc
60
61
}
61
62
62
63
// waitForCVAndBody waits for a document to reach a specific cv on all peers.
64
+ // This is used when asserting on the full HLV is impossible. If XDCR is running, then asserting on the full HLV for
65
+ // non CBL peers is possible. However, conflict resolution on Couchbase Lite means that Couchbase Lite can contain
66
+ // previous versions of a document.
67
+ //
68
+ // See following example:
69
+ //
70
+ // +- - - - - - -+ +- - - - - - -+
71
+ // ' cluster A ' ' cluster B '
72
+ // ' +---------+ ' ' +---------+ '
73
+ // ' | cbs1 | ' <--> ' | cbs2 | '
74
+ // ' +---------+ ' ' +---------+ '
75
+ // ' +---------+ ' ' +---------+ '
76
+ // ' | sg1 | ' ' | sg2 | '
77
+ // ' +---------+ ' ' +---------+ '
78
+ // +- - - - -- - + +- - - - - - -+
79
+ // ^ ^
80
+ // | |
81
+ // | |
82
+ // v v
83
+ // +---------+ +---------+
84
+ // | cbl1 | | cbl2 |
85
+ // +---------+ +---------+
86
+ //
87
+ // Couchbase Server, since conflict resolution in XDCR will overwrite the HLV.
88
+ // 1. sg1 creates unique document cv: 1@rosmar1
89
+ // 2. sg2 creates unique document cv: 2@rosmar2
90
+ // 3. cbl1 pulls 1@rosmar1
91
+ // 4. cbl2 pull 2@rosmar2
92
+ // 5. cbs1 pulls 2@rosmar2, overwriting cv:1@rosmar1
93
+ // 6. cbl1 pulls 2@rosmar2, creating cv: 2@rosmar2, pv:1@rosmar1 overwriting
94
+ // Final state:
95
+ // - cv:2@rosmar2 on cbs1, cbs2, cbl2
96
+ // - cv:2@rosmar2, pv:1@rosmar1 on cbl1
63
97
func waitForCVAndBody (t * testing.T , dsName base.ScopeAndCollectionName , docID string , expectedVersion BodyAndVersion , topology Topology ) {
64
98
t .Logf ("waiting for doc version on all peers, written from %s: %#v" , expectedVersion .updatePeer , expectedVersion )
65
99
for _ , peer := range topology .SortedPeers () {
66
100
t .Logf ("waiting for doc version on peer %s, written from %s: %#v" , peer , expectedVersion .updatePeer , expectedVersion )
67
- body := peer .WaitForCV (dsName , docID , expectedVersion .docMeta , topology )
101
+ var body db.Body
102
+ if peer .Type () == PeerTypeCouchbaseLite {
103
+ body = peer .WaitForCV (dsName , docID , expectedVersion .docMeta , topology )
104
+ } else {
105
+ body = peer .WaitForDocVersion (dsName , docID , expectedVersion .docMeta , topology )
106
+ }
68
107
requireBodyEqual (t , expectedVersion .body , body )
69
108
}
70
109
}
110
+
111
+ // waitForConvergingTombstones waits for all peers to have a tombstone document for a given doc ID. A matching HLV import (
112
+ // guaranteed for all Couchbase Server / Sync Gateway versions, but not CBL versions.
113
+ //
114
+ // See following example:
115
+ //
116
+ // +- - - - - - -+ +- - - - - - -+
117
+ // ' cluster A ' ' cluster B '
118
+ // ' +---------+ ' ' +---------+ '
119
+ // ' | cbs1 | ' <--> ' | cbs2 | '
120
+ // ' +---------+ ' ' +---------+ '
121
+ // ' +---------+ ' ' +---------+ '
122
+ // ' | sg1 | ' ' | sg2 | '
123
+ // ' +---------+ ' ' +---------+ '
124
+ // +- - - - - - -+ +- - - - - - -+
125
+ // ^ ^
126
+ // | |
127
+ // | |
128
+ // v v
129
+ // +---------+ +---------+
130
+ // | cbl1 | | cbl2 |
131
+ // +---------+ +---------+
132
+ //
133
+ // There is a converging document + HLV on all peers.
134
+ // 1. cbl1 deletes document cv: 5@cbl1
135
+ // 2. cbl2 deletes document cv: 6@cbl2
136
+ // 3. sg1 deletes document cv: 7@rosmar1
137
+ // 4. sg2 deletes document cv: 8@rosmar2
138
+ // 5. cbl2 pulls from sg2, creates 8@rosmar2;6@cbl2
139
+ // 6. cbl1 pulls from sg1, creates 7@rosmar1;5@cbl1
140
+ // 7. cbs1 pulls from cbs2, creating cv:8@rosmar2. This version isn't imported, so doesn't get recognized as needing
141
+ // to replicate to Couchbase Lite.
142
+ //
143
+ // Final state:
144
+ // - CBS1, CBS2: 8@rosmar2
145
+ // - CBL1: 7@rosmar1;5@cbl1
146
+ // - CBL2: 8@rosmar2;6@cbl2
147
+ func waitForConvergingTombstones (t * testing.T , dsName base.ScopeAndCollectionName , docID string , topology Topology ) {
148
+ t .Logf ("waiting for converging tombstones" )
149
+ require .EventuallyWithT (t , func (c * assert.CollectT ) {
150
+ nonCBLVersions := make (map [string ]DocMetadata )
151
+ CBLVersions := make (map [string ]DocMetadata )
152
+ for peerName , peer := range topology .SortedPeers () {
153
+ meta , body , exists := peer .GetDocumentIfExists (dsName , docID )
154
+ if ! assert .True (c , exists , "doc %s does not exist on peer %s" , docID , peer ) {
155
+ return
156
+ }
157
+ if ! assert .Nil (c , body , "expected tombstone for doc %s on peer %s" , docID , peer ) {
158
+ return
159
+ }
160
+ switch peer .Type () {
161
+ case PeerTypeCouchbaseLite :
162
+ CBLVersions [peerName ] = meta
163
+ default :
164
+ nonCBLVersions [peerName ] = meta
165
+ }
166
+ }
167
+ var nonCBLVersion * DocMetadata
168
+ for peer , version := range nonCBLVersions {
169
+ if nonCBLVersion == nil {
170
+ nonCBLVersion = & version
171
+ continue
172
+ }
173
+ assertHLVEqual (c , dsName , docID , peer , version , nil , * nonCBLVersion , topology )
174
+ }
175
+ // Is there a way to do any assertion on the CBL tombstone versions?
176
+ }, totalWaitTime , pollInterval )
177
+ }
178
+
179
+ // waitForTombstoneVersion waits for a tombstone document with a particular HLV to be present on all peers.
71
180
func waitForTombstoneVersion (t * testing.T , dsName base.ScopeAndCollectionName , docID string , expectedVersion BodyAndVersion , topology Topology ) {
72
181
t .Logf ("waiting for tombstone version on all peers, written from %s: %#v" , expectedVersion .updatePeer , expectedVersion )
73
182
for _ , peer := range topology .SortedPeers () {
@@ -78,16 +187,11 @@ func waitForTombstoneVersion(t *testing.T, dsName base.ScopeAndCollectionName, d
78
187
79
188
// createConflictingDocs will create a doc on each peer of the same doc ID to create conflicting documents, then
80
189
// returns the last peer to have a doc created on it
81
- func createConflictingDocs (t * testing. T , dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
190
+ func createConflictingDocs (dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
82
191
var documentVersion []BodyAndVersion
83
192
for peerName , peer := range topology .peers .NonImportSortedPeers () {
84
- if peer .Type () == PeerTypeCouchbaseLite {
85
- // FIXME: Skipping Couchbase Lite tests for multi actor conflicts, CBG-4434
86
- continue
87
- }
88
193
docBody := fmt .Sprintf (`{"activePeer": "%s", "topology": "%s", "action": "create"}` , peerName , topology .specDescription )
89
194
docVersion := peer .CreateDocument (dsName , docID , []byte (docBody ))
90
- t .Logf ("%s - createVersion: %#v" , peerName , docVersion .docMeta )
91
195
documentVersion = append (documentVersion , docVersion )
92
196
}
93
197
index := len (documentVersion ) - 1
@@ -98,12 +202,11 @@ func createConflictingDocs(t *testing.T, dsName base.ScopeAndCollectionName, doc
98
202
99
203
// updateConflictingDocs will update a doc on each peer of the same doc ID to create conflicting document mutations, then
100
204
// returns the last peer to have a doc updated on it.
101
- func updateConflictingDocs (t * testing. T , dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
205
+ func updateConflictingDocs (dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
102
206
var documentVersion []BodyAndVersion
103
207
for peerName , peer := range topology .peers .NonImportSortedPeers () {
104
208
docBody := fmt .Sprintf (`{"activePeer": "%s", "topology": "%s", "action": "update"}` , peerName , topology .specDescription )
105
209
docVersion := peer .WriteDocument (dsName , docID , []byte (docBody ))
106
- t .Logf ("updateVersion: %#v" , docVersion .docMeta )
107
210
documentVersion = append (documentVersion , docVersion )
108
211
}
109
212
index := len (documentVersion ) - 1
@@ -114,11 +217,10 @@ func updateConflictingDocs(t *testing.T, dsName base.ScopeAndCollectionName, doc
114
217
115
218
// deleteConflictDocs will delete a doc on each peer of the same doc ID to create conflicting document deletions, then
116
219
// returns the last peer to have a doc deleted on it
117
- func deleteConflictDocs (t * testing. T , dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
220
+ func deleteConflictDocs (dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
118
221
var documentVersion []BodyAndVersion
119
222
for peerName , peer := range topology .peers .NonImportSortedPeers () {
120
223
deleteVersion := peer .DeleteDocument (dsName , docID )
121
- t .Logf ("deleteVersion: %#v" , deleteVersion )
122
224
documentVersion = append (documentVersion , BodyAndVersion {docMeta : deleteVersion , updatePeer : peerName })
123
225
}
124
226
index := len (documentVersion ) - 1
0 commit comments