@@ -15,6 +15,7 @@ import (
15
15
16
16
"github.com/couchbase/sync_gateway/base"
17
17
"github.com/couchbase/sync_gateway/db"
18
+ "github.com/stretchr/testify/assert"
18
19
"github.com/stretchr/testify/require"
19
20
)
20
21
@@ -60,14 +61,85 @@ func waitForVersionAndBody(t *testing.T, dsName base.ScopeAndCollectionName, doc
60
61
}
61
62
62
63
// waitForCVAndBody waits for a document to reach a specific cv on all peers.
64
+ // This is used when asserting on the full HLV is impossible. If XDCR is running, then asserting on the full HLV for
65
+ // non CBL peers is possible. However, conflict resolution on Couchbase Lite means that Couchbase Lite can contain
66
+ // previous versions of a document.
67
+ //
68
+ // See following example:
69
+ // - - - - - - - + +- - - - - - -+
70
+ // ' cluster A ' ' cluster B '
71
+ // ' +---------+ ' ' +---------+ '
72
+ // ' | cbs1 | ' <--> ' | cbs2 | '
73
+ // ' +---------+ ' ' +---------+ '
74
+ // ' +---------+ ' ' +---------+ '
75
+ // ' | sg1 | ' ' | sg2 | '
76
+ // ' +---------+ ' ' +---------+ '
77
+ // - - - - - - - + +- - - - - - -+
78
+ // ^ ^
79
+ // | |
80
+ // | |
81
+ // v v
82
+ // +---------+ +---------+
83
+ // | cbl1 | | cbl2 |
84
+ // +---------+ +---------+
85
+ //
86
+ // Couchbase Server, since conflict resolution in XDCR will overwrite the HLV.
87
+ // 1. sg1 creates unique document cv: 1@rosmar1
88
+ // 2. sg2 creates unique document cv: 2@rosmar2
89
+ // 3. cbl1 pulls 1@rosmar1
90
+ // 4. cbl2 pull 2@rosmar2
91
+ // 5. cbs1 pulls 2@rosmar2, overwriting cv:1@rosmar1
92
+ // 6. cbl1 pulls 2@rosmar2, creating cv: 2@rosmar2, pv:1@rosmar1 overwriting
93
+ // Final state:
94
+ // - cv:2@rosmar2 on cbs1, cbs2, cbl2
95
+ // - cv:2@rosmar2, pv:1@rosmar1 on cbl1
63
96
func waitForCVAndBody (t * testing.T , dsName base.ScopeAndCollectionName , docID string , expectedVersion BodyAndVersion , topology Topology ) {
64
97
t .Logf ("waiting for doc version on all peers, written from %s: %#v" , expectedVersion .updatePeer , expectedVersion )
65
98
for _ , peer := range topology .SortedPeers () {
66
99
t .Logf ("waiting for doc version on peer %s, written from %s: %#v" , peer , expectedVersion .updatePeer , expectedVersion )
67
- body := peer .WaitForCV (dsName , docID , expectedVersion .docMeta , topology )
100
+ var body db.Body
101
+ if peer .Type () == PeerTypeCouchbaseLite {
102
+ body = peer .WaitForCV (dsName , docID , expectedVersion .docMeta , topology )
103
+ } else {
104
+ body = peer .WaitForDocVersion (dsName , docID , expectedVersion .docMeta , topology )
105
+ }
68
106
requireBodyEqual (t , expectedVersion .body , body )
69
107
}
70
108
}
109
+
110
+ func waitForConvergingTombstones (t * testing.T , dsName base.ScopeAndCollectionName , docID string , topology Topology ) {
111
+ t .Logf ("waiting for converging tombstones" )
112
+ require .EventuallyWithT (t , func (c * assert.CollectT ) {
113
+ nonCBLVersions := make (map [string ]DocMetadata )
114
+ CBLVersions := make (map [string ]DocMetadata )
115
+ for peerName , peer := range topology .SortedPeers () {
116
+ meta , body , exists := peer .GetDocumentIfExists (dsName , docID )
117
+ if ! assert .True (c , exists , "doc %s does not exist on peer %s" , docID , peer ) {
118
+ return
119
+ }
120
+ if ! assert .Nil (c , body , "expected tombstone for doc %s on peer %s" , docID , peer ) {
121
+ return
122
+ }
123
+ switch peer .Type () {
124
+ case PeerTypeCouchbaseLite :
125
+ CBLVersions [peerName ] = meta
126
+ default :
127
+ nonCBLVersions [peerName ] = meta
128
+ }
129
+ }
130
+ var nonCBLVersion * DocMetadata
131
+ for peer , version := range nonCBLVersions {
132
+ if nonCBLVersion == nil {
133
+ nonCBLVersion = & version
134
+ continue
135
+ }
136
+ assertHLVEqual (c , dsName , docID , peer , version , nil , * nonCBLVersion , topology )
137
+ }
138
+ // TODO:: assert on CBL versions?
139
+ }, totalWaitTime , pollInterval )
140
+ }
141
+
142
+ // waitForTombstoneVersion waits for a tombstone document with a particular HLV to be present on all peers.
71
143
func waitForTombstoneVersion (t * testing.T , dsName base.ScopeAndCollectionName , docID string , expectedVersion BodyAndVersion , topology Topology ) {
72
144
t .Logf ("waiting for tombstone version on all peers, written from %s: %#v" , expectedVersion .updatePeer , expectedVersion )
73
145
for _ , peer := range topology .SortedPeers () {
@@ -78,16 +150,11 @@ func waitForTombstoneVersion(t *testing.T, dsName base.ScopeAndCollectionName, d
78
150
79
151
// createConflictingDocs will create a doc on each peer of the same doc ID to create conflicting documents, then
80
152
// returns the last peer to have a doc created on it
81
- func createConflictingDocs (t * testing. T , dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
153
+ func createConflictingDocs (dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
82
154
var documentVersion []BodyAndVersion
83
155
for peerName , peer := range topology .peers .NonImportSortedPeers () {
84
- if peer .Type () == PeerTypeCouchbaseLite {
85
- // FIXME: Skipping Couchbase Lite tests for multi actor conflicts, CBG-4434
86
- continue
87
- }
88
156
docBody := fmt .Sprintf (`{"activePeer": "%s", "topology": "%s", "action": "create"}` , peerName , topology .specDescription )
89
157
docVersion := peer .CreateDocument (dsName , docID , []byte (docBody ))
90
- t .Logf ("%s - createVersion: %#v" , peerName , docVersion .docMeta )
91
158
documentVersion = append (documentVersion , docVersion )
92
159
}
93
160
index := len (documentVersion ) - 1
@@ -98,12 +165,11 @@ func createConflictingDocs(t *testing.T, dsName base.ScopeAndCollectionName, doc
98
165
99
166
// updateConflictingDocs will update a doc on each peer of the same doc ID to create conflicting document mutations, then
100
167
// returns the last peer to have a doc updated on it.
101
- func updateConflictingDocs (t * testing. T , dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
168
+ func updateConflictingDocs (dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
102
169
var documentVersion []BodyAndVersion
103
170
for peerName , peer := range topology .peers .NonImportSortedPeers () {
104
171
docBody := fmt .Sprintf (`{"activePeer": "%s", "topology": "%s", "action": "update"}` , peerName , topology .specDescription )
105
172
docVersion := peer .WriteDocument (dsName , docID , []byte (docBody ))
106
- t .Logf ("updateVersion: %#v" , docVersion .docMeta )
107
173
documentVersion = append (documentVersion , docVersion )
108
174
}
109
175
index := len (documentVersion ) - 1
@@ -114,11 +180,10 @@ func updateConflictingDocs(t *testing.T, dsName base.ScopeAndCollectionName, doc
114
180
115
181
// deleteConflictDocs will delete a doc on each peer of the same doc ID to create conflicting document deletions, then
116
182
// returns the last peer to have a doc deleted on it
117
- func deleteConflictDocs (t * testing. T , dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
183
+ func deleteConflictDocs (dsName base.ScopeAndCollectionName , docID string , topology Topology ) (lastWrite BodyAndVersion ) {
118
184
var documentVersion []BodyAndVersion
119
185
for peerName , peer := range topology .peers .NonImportSortedPeers () {
120
186
deleteVersion := peer .DeleteDocument (dsName , docID )
121
- t .Logf ("deleteVersion: %#v" , deleteVersion )
122
187
documentVersion = append (documentVersion , BodyAndVersion {docMeta : deleteVersion , updatePeer : peerName })
123
188
}
124
189
index := len (documentVersion ) - 1
0 commit comments