From c48a4eb71f168af92511b3a8405c4050cb7e155c Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Fri, 15 Aug 2025 09:18:53 -0400 Subject: [PATCH 1/3] CBG-4434 enable conflict tests --- rest/utilities_testing_blip_client.go | 39 +++--- topologytest/couchbase_lite_mock_peer_test.go | 6 +- topologytest/hlv_test.go | 124 ++++++++++++++++-- topologytest/multi_actor_conflict_test.go | 117 +++++++---------- topologytest/peer_test.go | 2 +- topologytest/sync_gateway_peer_test.go | 6 +- 6 files changed, 190 insertions(+), 104 deletions(-) diff --git a/rest/utilities_testing_blip_client.go b/rest/utilities_testing_blip_client.go index 8a759aa7e2..0c686bb16c 100644 --- a/rest/utilities_testing_blip_client.go +++ b/rest/utilities_testing_blip_client.go @@ -413,6 +413,9 @@ func (btcc *BlipTesterCollectionClient) GetDoc(docID string) ([]byte, *db.Hybrid if latestRev == nil { return nil, nil, nil } + if latestRev.isDelete { + return nil, &latestRev.HLV, &latestRev.version + } return latestRev.body, &latestRev.HLV, &latestRev.version } @@ -491,13 +494,12 @@ func (btr *BlipTesterReplicator) Close() { } // initHandlers sets up the blip client side handles for each message type. -func (btr *BlipTesterReplicator) initHandlers(btc *BlipTesterClient) { +func (btr *BlipTesterReplicator) initHandlers(ctx context.Context, btc *BlipTesterClient) { if btr.replicationStats == nil { btr.replicationStats = db.NewBlipSyncStats() } - ctx := base.DatabaseLogCtx(base.TestCtx(btr.bt.restTester.TB()), btr.bt.restTester.GetDatabase().Name, nil) btr.bt.blipContext.DefaultHandler = btr.defaultHandler() handlers := map[string]func(*blip.Message){ db.MessageNoRev: btr.handleNoRev(ctx, btc), @@ -960,7 +962,8 @@ func (btcc *BlipTesterCollectionClient) updateLastReplicatedRev(docID string, ve rev.message = msg } -func newBlipTesterReplication(tb testing.TB, id string, btc *BlipTesterClient, skipCollectionsInitialization bool) *BlipTesterReplicator { +// newBlipTesterReplication creates a new BlipTesterReplicator with the given id and BlipTesterClient. Used to instantiate a push or pull replication for the client. +func newBlipTesterReplication(ctx context.Context, id string, btc *BlipTesterClient, skipCollectionsInitialization bool) *BlipTesterReplicator { bt := NewBlipTesterFromSpecWithRT(btc.rt, &BlipTesterSpec{ connectingUsername: btc.Username, blipProtocols: btc.SupportedBLIPProtocols, @@ -974,13 +977,13 @@ func newBlipTesterReplication(tb testing.TB, id string, btc *BlipTesterClient, s messages: make(map[blip.MessageNumber]*blip.Message), } - r.initHandlers(btc) + r.initHandlers(ctx, btc) return r } // getCollectionsForBLIP returns collections configured by a single database instance on a restTester. If only default collection exists, it will skip returning it to test "legacy" blip mode. -func getCollectionsForBLIP(_ testing.TB, rt *RestTester) []string { +func getCollectionsForBLIP(rt *RestTester) []string { dbc := rt.GetDatabase() var collections []string for _, collection := range dbc.CollectionByID { @@ -995,6 +998,10 @@ func getCollectionsForBLIP(_ testing.TB, rt *RestTester) []string { } func (btcRunner *BlipTestClientRunner) NewBlipTesterClientOptsWithRT(rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient) { + return btcRunner.NewBlipTesterClientOptsWithRTAndContext(rt.Context(), rt, opts) +} + +func (btcRunner *BlipTestClientRunner) NewBlipTesterClientOptsWithRTAndContext(ctx context.Context, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient) { if opts == nil { opts = &BlipTesterClientOpts{} } @@ -1017,7 +1024,7 @@ func (btcRunner *BlipTestClientRunner) NewBlipTesterClientOptsWithRT(rt *RestTes hlc: rosmar.NewHybridLogicalClock(0), } btcRunner.clients[client.id] = client - client.createBlipTesterReplications() + client.createBlipTesterReplications(ctx) return client } @@ -1072,21 +1079,21 @@ func (btc *BlipTesterClient) tearDownBlipClientReplications() { } // createBlipTesterReplications creates the push and pull replications for the client. -func (btc *BlipTesterClient) createBlipTesterReplications() { +func (btc *BlipTesterClient) createBlipTesterReplications(ctx context.Context) { id, err := uuid.NewRandom() require.NoError(btc.TB(), err) - btc.pushReplication = newBlipTesterReplication(btc.TB(), "push"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization) - btc.pullReplication = newBlipTesterReplication(btc.TB(), "pull"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization) + btc.pushReplication = newBlipTesterReplication(ctx, "push"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization) + btc.pullReplication = newBlipTesterReplication(ctx, "pull"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization) - collections := getCollectionsForBLIP(btc.TB(), btc.rt) + collections := getCollectionsForBLIP(btc.rt) if !btc.BlipTesterClientOpts.SkipCollectionsInitialization && len(collections) > 0 { btc.collectionClients = make([]*BlipTesterCollectionClient, len(collections)) for i, collection := range collections { - btc.initCollectionReplication(collection, i) + btc.initCollectionReplication(ctx, collection, i) } } else { - btc.nonCollectionAwareClient = NewBlipTesterCollectionClient(btc) + btc.nonCollectionAwareClient = NewBlipTesterCollectionClient(ctx, btc) } btc.pullReplication.bt.avoidRestTesterClose = true @@ -1094,8 +1101,8 @@ func (btc *BlipTesterClient) createBlipTesterReplications() { } // initCollectionReplication initializes a BlipTesterCollectionClient for the given collection. -func (btc *BlipTesterClient) initCollectionReplication(collection string, collectionIdx int) { - btcReplicator := NewBlipTesterCollectionClient(btc) +func (btc *BlipTesterClient) initCollectionReplication(ctx context.Context, collection string, collectionIdx int) { + btcReplicator := NewBlipTesterCollectionClient(ctx, btc) btcReplicator.collection = collection btcReplicator.collectionIdx = collectionIdx btc.collectionClients[collectionIdx] = btcReplicator @@ -1466,8 +1473,8 @@ func (btcc *BlipTesterCollectionClient) UnsubPullChanges() { } // NewBlipTesterCollectionClient creates a collection specific client from a BlipTesterClient -func NewBlipTesterCollectionClient(btc *BlipTesterClient) *BlipTesterCollectionClient { - ctx, ctxCancel := context.WithCancel(btc.rt.Context()) +func NewBlipTesterCollectionClient(ctx context.Context, btc *BlipTesterClient) *BlipTesterCollectionClient { + ctx, ctxCancel := context.WithCancel(ctx) l := sync.RWMutex{} c := &BlipTesterCollectionClient{ ctx: ctx, diff --git a/topologytest/couchbase_lite_mock_peer_test.go b/topologytest/couchbase_lite_mock_peer_test.go index 5cd919477c..60ba906ce9 100644 --- a/topologytest/couchbase_lite_mock_peer_test.go +++ b/topologytest/couchbase_lite_mock_peer_test.go @@ -226,8 +226,10 @@ func (p *CouchbaseLiteMockPeer) CreateReplication(peer Peer, config PeerReplicat } const username = "user" sg.rt.CreateUser(username, []string{"*"}) - replication.btc = replication.btcRunner.NewBlipTesterClientOptsWithRT(sg.rt, &rest.BlipTesterClientOpts{ - Username: username, + // intentionally do not use base.TestCtx to drop test name for readability + ctx := base.CorrelationIDLogCtx(sg.rt.TB().Context(), p.name) + replication.btc = replication.btcRunner.NewBlipTesterClientOptsWithRTAndContext(ctx, sg.rt, &rest.BlipTesterClientOpts{ + Username: "user", SupportedBLIPProtocols: []string{db.CBMobileReplicationV4.SubprotocolString()}, AllowCreationWithoutBlipTesterClientRunner: true, SourceID: p.SourceID(), diff --git a/topologytest/hlv_test.go b/topologytest/hlv_test.go index a2e3db45c9..0fe3687068 100644 --- a/topologytest/hlv_test.go +++ b/topologytest/hlv_test.go @@ -15,6 +15,7 @@ import ( "github.com/couchbase/sync_gateway/base" "github.com/couchbase/sync_gateway/db" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -60,14 +61,122 @@ func waitForVersionAndBody(t *testing.T, dsName base.ScopeAndCollectionName, doc } // waitForCVAndBody waits for a document to reach a specific cv on all peers. +// This is used when asserting on the full HLV is impossible. If XDCR is running, then asserting on the full HLV for +// non CBL peers is possible. However, conflict resolution on Couchbase Lite means that Couchbase Lite can contain +// previous versions of a document. +// +// See following example: +// +// +- - - - - - -+ +- - - - - - -+ +// ' cluster A ' ' cluster B ' +// ' +---------+ ' ' +---------+ ' +// ' | cbs1 | ' <--> ' | cbs2 | ' +// ' +---------+ ' ' +---------+ ' +// ' +---------+ ' ' +---------+ ' +// ' | sg1 | ' ' | sg2 | ' +// ' +---------+ ' ' +---------+ ' +// +- - - - -- - + +- - - - - - -+ +// ^ ^ +// | | +// | | +// v v +// +---------+ +---------+ +// | cbl1 | | cbl2 | +// +---------+ +---------+ +// +// Couchbase Server, since conflict resolution in XDCR will overwrite the HLV. +// 1. sg1 creates unique document cv: 1@rosmar1 +// 2. sg2 creates unique document cv: 2@rosmar2 +// 3. cbl1 pulls 1@rosmar1 +// 4. cbl2 pull 2@rosmar2 +// 5. cbs1 pulls 2@rosmar2, overwriting cv:1@rosmar1 +// 6. cbl1 pulls 2@rosmar2, creating cv: 2@rosmar2, pv:1@rosmar1 overwriting +// Final state: +// - cv:2@rosmar2 on cbs1, cbs2, cbl2 +// - cv:2@rosmar2, pv:1@rosmar1 on cbl1 func waitForCVAndBody(t *testing.T, dsName base.ScopeAndCollectionName, docID string, expectedVersion BodyAndVersion, topology Topology) { t.Logf("waiting for doc version on all peers, written from %s: %#v", expectedVersion.updatePeer, expectedVersion) for _, peer := range topology.SortedPeers() { t.Logf("waiting for doc version on peer %s, written from %s: %#v", peer, expectedVersion.updatePeer, expectedVersion) - body := peer.WaitForCV(dsName, docID, expectedVersion.docMeta, topology) + var body db.Body + if peer.Type() == PeerTypeCouchbaseLite { + body = peer.WaitForCV(dsName, docID, expectedVersion.docMeta, topology) + } else { + body = peer.WaitForDocVersion(dsName, docID, expectedVersion.docMeta, topology) + } requireBodyEqual(t, expectedVersion.body, body) } } + +// waitForConvergingTombstones waits for all peers to have a tombstone document for a given doc ID. A matching HLV import ( +// guaranteed for all Couchbase Server / Sync Gateway versions, but not CBL versions. +// +// See following example: +// +// +- - - - - - -+ +- - - - - - -+ +// ' cluster A ' ' cluster B ' +// ' +---------+ ' ' +---------+ ' +// ' | cbs1 | ' <--> ' | cbs2 | ' +// ' +---------+ ' ' +---------+ ' +// ' +---------+ ' ' +---------+ ' +// ' | sg1 | ' ' | sg2 | ' +// ' +---------+ ' ' +---------+ ' +// +- - - - - - -+ +- - - - - - -+ +// ^ ^ +// | | +// | | +// v v +// +---------+ +---------+ +// | cbl1 | | cbl2 | +// +---------+ +---------+ +// +// There is a converging document + HLV on all peers. +// 1. cbl1 deletes document cv: 5@cbl1 +// 2. cbl2 deletes document cv: 6@cbl2 +// 3. sg1 deletes document cv: 7@rosmar1 +// 4. sg2 deletes document cv: 8@rosmar2 +// 5. cbl2 pulls from sg2, creates 8@rosmar2;6@cbl2 +// 6. cbl1 pulls from sg1, creates 7@rosmar1;5@cbl1 +// 7. cbs1 pulls from cbs2, creating cv:8@rosmar2. This version isn't imported, so doesn't get recognized as needing +// to replicate to Couchbase Lite. +// +// Final state: +// - CBS1, CBS2: 8@rosmar2 +// - CBL1: 7@rosmar1;5@cbl1 +// - CBL2: 8@rosmar2;6@cbl2 +func waitForConvergingTombstones(t *testing.T, dsName base.ScopeAndCollectionName, docID string, topology Topology) { + t.Logf("waiting for converging tombstones") + require.EventuallyWithT(t, func(c *assert.CollectT) { + nonCBLVersions := make(map[string]DocMetadata) + CBLVersions := make(map[string]DocMetadata) + for peerName, peer := range topology.SortedPeers() { + meta, body, exists := peer.GetDocumentIfExists(dsName, docID) + if !assert.True(c, exists, "doc %s does not exist on peer %s", docID, peer) { + return + } + if !assert.Nil(c, body, "expected tombstone for doc %s on peer %s", docID, peer) { + return + } + switch peer.Type() { + case PeerTypeCouchbaseLite: + CBLVersions[peerName] = meta + default: + nonCBLVersions[peerName] = meta + } + } + var nonCBLVersion *DocMetadata + for peer, version := range nonCBLVersions { + if nonCBLVersion == nil { + nonCBLVersion = &version + continue + } + assertHLVEqual(c, dsName, docID, peer, version, nil, *nonCBLVersion, topology) + } + // Is there a way to do any assertion on the CBL tombstone versions? + }, totalWaitTime, pollInterval) +} + +// waitForTombstoneVersion waits for a tombstone document with a particular HLV to be present on all peers. func waitForTombstoneVersion(t *testing.T, dsName base.ScopeAndCollectionName, docID string, expectedVersion BodyAndVersion, topology Topology) { t.Logf("waiting for tombstone version on all peers, written from %s: %#v", expectedVersion.updatePeer, expectedVersion) for _, peer := range topology.SortedPeers() { @@ -78,16 +187,11 @@ func waitForTombstoneVersion(t *testing.T, dsName base.ScopeAndCollectionName, d // createConflictingDocs will create a doc on each peer of the same doc ID to create conflicting documents, then // returns the last peer to have a doc created on it -func createConflictingDocs(t *testing.T, dsName base.ScopeAndCollectionName, docID string, topology Topology) (lastWrite BodyAndVersion) { +func createConflictingDocs(dsName base.ScopeAndCollectionName, docID string, topology Topology) (lastWrite BodyAndVersion) { var documentVersion []BodyAndVersion for peerName, peer := range topology.peers.NonImportSortedPeers() { - if peer.Type() == PeerTypeCouchbaseLite { - // FIXME: Skipping Couchbase Lite tests for multi actor conflicts, CBG-4434 - continue - } docBody := fmt.Sprintf(`{"activePeer": "%s", "topology": "%s", "action": "create"}`, peerName, topology.specDescription) docVersion := peer.CreateDocument(dsName, docID, []byte(docBody)) - t.Logf("%s - createVersion: %#v", peerName, docVersion.docMeta) documentVersion = append(documentVersion, docVersion) } index := len(documentVersion) - 1 @@ -98,12 +202,11 @@ func createConflictingDocs(t *testing.T, dsName base.ScopeAndCollectionName, doc // updateConflictingDocs will update a doc on each peer of the same doc ID to create conflicting document mutations, then // returns the last peer to have a doc updated on it. -func updateConflictingDocs(t *testing.T, dsName base.ScopeAndCollectionName, docID string, topology Topology) (lastWrite BodyAndVersion) { +func updateConflictingDocs(dsName base.ScopeAndCollectionName, docID string, topology Topology) (lastWrite BodyAndVersion) { var documentVersion []BodyAndVersion for peerName, peer := range topology.peers.NonImportSortedPeers() { docBody := fmt.Sprintf(`{"activePeer": "%s", "topology": "%s", "action": "update"}`, peerName, topology.specDescription) docVersion := peer.WriteDocument(dsName, docID, []byte(docBody)) - t.Logf("updateVersion: %#v", docVersion.docMeta) documentVersion = append(documentVersion, docVersion) } index := len(documentVersion) - 1 @@ -114,11 +217,10 @@ func updateConflictingDocs(t *testing.T, dsName base.ScopeAndCollectionName, doc // deleteConflictDocs will delete a doc on each peer of the same doc ID to create conflicting document deletions, then // returns the last peer to have a doc deleted on it -func deleteConflictDocs(t *testing.T, dsName base.ScopeAndCollectionName, docID string, topology Topology) (lastWrite BodyAndVersion) { +func deleteConflictDocs(dsName base.ScopeAndCollectionName, docID string, topology Topology) (lastWrite BodyAndVersion) { var documentVersion []BodyAndVersion for peerName, peer := range topology.peers.NonImportSortedPeers() { deleteVersion := peer.DeleteDocument(dsName, docID) - t.Logf("deleteVersion: %#v", deleteVersion) documentVersion = append(documentVersion, BodyAndVersion{docMeta: deleteVersion, updatePeer: peerName}) } index := len(documentVersion) - 1 diff --git a/topologytest/multi_actor_conflict_test.go b/topologytest/multi_actor_conflict_test.go index 82f0b82588..740275a34c 100644 --- a/topologytest/multi_actor_conflict_test.go +++ b/topologytest/multi_actor_conflict_test.go @@ -9,80 +9,52 @@ package topologytest import ( - "strings" "testing" ) // TestMultiActorConflictCreate -// 1. create document on each peer with different contents -// 2. start replications -// 3. wait for documents to exist with hlv sources equal to the number of active peers +// 1. create document on each peer with different contents +// 2. start replications +// 3. wait for documents to exist with a matching CV for Couchbase Lite peers, and a full HLV match for non Couchbase +// Lite peers. The body should match. func TestMultiActorConflictCreate(t *testing.T) { for _, topologySpec := range append(simpleTopologySpecifications, TopologySpecifications...) { t.Run(topologySpec.description, func(t *testing.T) { collectionName, topology := setupTests(t, topologySpec) docID := getDocID(t) - docVersion := createConflictingDocs(t, collectionName, docID, topology) + docVersion := createConflictingDocs(collectionName, docID, topology) topology.StartReplications() - // Can not assert on full HLV here. CV should converge, but CBL actors can have PV that does not match that of the other peers. - // + - - - - - - + +- - - - - - -+ - // ' cluster A ' ' cluster B ' - // ' +---------+ ' ' +---------+ ' - // ' | cbs1 | ' <--> ' | cbs2 | ' - // ' +---------+ ' ' +---------+ ' - // ' +---------+ ' ' +---------+ ' - // ' | sg1 | ' ' | sg2 | ' - // ' +---------+ ' ' +---------+ ' - // + - - - - - - + +- - - - - - -+ - // ^ ^ - // | | - // | | - // v v - // +---------+ +---------+ - // | cbl1 | | cbl2 | - // +---------+ +---------+ - // Couchbase Server, since conflict resolution in XDCR will overwrite the HLV. - // 1. sg1 creates unique document cv: 1@rosmar1 - // 2. sg2 creates unique document cv: 2@rosmar2 - // 3. cbl1 pulls 1@rosmar1 - // 4. cbl2 pull 2@rosmar2 - // 5. cbs1 pulls 2@rosmar2, overwriting cv:1@rosmar1 - // 6. cbl1 pulls 2@rosmar2, creating cv: 2@rosmar2, pv:1@rosmar1 overwriting - // Final state: - // - cv:2@rosmar2 on cbs1, cbs2, cbl2 - // - cv:2@rosmar2, pv:1@rosmar1 on cbl1 waitForCVAndBody(t, collectionName, docID, docVersion, topology) }) } } // TestMultiActorConflictUpdate -// 1. create document on each peer with different contents -// 2. start replications -// 3. wait for documents to exist with hlv sources equal to the number of active peers -// 4. stop replications -// 5. update documents on all peers -// 6. start replications -// 7. assert that the documents are deleted on all peers and have hlv sources equal to the number of active peers +// 1. create document on each peer with different contents +// 2. start replications +// 3. wait for documents to exist with a matching CV for Couchbase Lite peers, and a full HLV match for non Couchbase +// Lite peers. The body should match. +// 4. stop replications +// 5. update documents on all peers, with unique body contents. +// 6. start replications +// 7. wait for documents to exist with a matching CV for Couchbase Lite peers, and a full HLV match for non Couchbase +// Lite peers. The body should match. func TestMultiActorConflictUpdate(t *testing.T) { for _, topologySpec := range append(simpleTopologySpecifications, TopologySpecifications...) { - if strings.Contains(topologySpec.description, "CBL") { - t.Skip("CBL actor can generate conflicts and push replication fails with conflict for doc in blip tester CBL-4267") - } t.Run(topologySpec.description, func(t *testing.T) { collectionName, topology := setupTests(t, topologySpec) docID := getDocID(t) - docVersion := createConflictingDocs(t, collectionName, docID, topology) + docVersion := createConflictingDocs(collectionName, docID, topology) topology.StartReplications() - waitForVersionAndBody(t, collectionName, docID, docVersion, topology) + waitForCVAndBody(t, collectionName, docID, docVersion, topology) topology.StopReplications() - docVersion = updateConflictingDocs(t, collectionName, docID, topology) + docVersion = updateConflictingDocs(collectionName, docID, topology) topology.StartReplications() - waitForVersionAndBody(t, collectionName, docID, docVersion, topology) + waitForCVAndBody(t, collectionName, docID, docVersion, topology) }) } } @@ -97,64 +69,63 @@ func TestMultiActorConflictUpdate(t *testing.T) { // 7. assert that the documents are deleted on all peers and have hlv sources equal to the number of active peers func TestMultiActorConflictDelete(t *testing.T) { for _, topologySpec := range append(simpleTopologySpecifications, TopologySpecifications...) { - if strings.Contains(topologySpec.description, "CBL") { - t.Skip("CBL actor can generate conflicts and push replication fails with conflict for doc in blip tester CBL-4267") - } t.Run(topologySpec.description, func(t *testing.T) { collectionName, topology := setupTests(t, topologySpec) docID := getDocID(t) - docVersion := createConflictingDocs(t, collectionName, docID, topology) + docVersion := createConflictingDocs(collectionName, docID, topology) topology.StartReplications() - waitForVersionAndBody(t, collectionName, docID, docVersion, topology) + waitForCVAndBody(t, collectionName, docID, docVersion, topology) topology.StopReplications() - lastWrite := deleteConflictDocs(t, collectionName, docID, topology) + deleteConflictDocs(collectionName, docID, topology) topology.StartReplications() - waitForTombstoneVersion(t, collectionName, docID, lastWrite, topology) + waitForConvergingTombstones(t, collectionName, docID, topology) }) } } // TestMultiActorConflictResurrect -// 1. create document on each peer with different contents -// 2. start replications -// 3. wait for documents to exist with hlv sources equal to the number of active peers and the document body is equivalent to the last write -// 4. stop replications -// 5. delete documents on all peers -// 6. start replications -// 7. assert that the documents are deleted on all peers and have hlv sources equal to the number of active peers -// 8. stop replications -// 9. resurrect documents on all peers with unique contents -// 10. start replications -// 11. assert that the documents are resurrected on all peers and have hlv sources equal to the number of active peers and the document body is equivalent to the last write +// 1. create document on each peer with different contents +// 2. start replications +// 3. wait for documents to exist with hlv sources equal to the number of active peers and the document body is +// equivalent to the last write. The assertion is presently only on CV for Couchbase Lite peers, and full HLV +// for other peer types. +// 4. stop replications +// 5. delete documents on all peers +// 6. start replications +// 7. assert that the documents are deleted on all peers and that there is a converging tombstone. In this case, +// there is no assertion other than deletion for Couchbase Lite peers, but there is a full HLV assertion for other +// peer types. +// 8. stop replications +// 9. resurrect documents on all peers with unique contents +// 10. start replications +// 11. assert that the documents are resurrected on all peers and have matching hlvs for non Couchbase Lite peers and +// matching CV for Couchbase Lite peers. func TestMultiActorConflictResurrect(t *testing.T) { for _, topologySpec := range append(simpleTopologySpecifications, TopologySpecifications...) { - if strings.Contains(topologySpec.description, "CBL") { - t.Skip("CBL actor can generate conflicts and push replication fails with conflict for doc in blip tester CBL-4267") - } t.Run(topologySpec.description, func(t *testing.T) { collectionName, topology := setupTests(t, topologySpec) docID := getDocID(t) - docVersion := createConflictingDocs(t, collectionName, docID, topology) + docVersion := createConflictingDocs(collectionName, docID, topology) topology.StartReplications() - waitForVersionAndBody(t, collectionName, docID, docVersion, topology) + waitForCVAndBody(t, collectionName, docID, docVersion, topology) topology.StopReplications() - lastWrite := deleteConflictDocs(t, collectionName, docID, topology) + deleteConflictDocs(collectionName, docID, topology) topology.StartReplications() - waitForTombstoneVersion(t, collectionName, docID, lastWrite, topology) + waitForConvergingTombstones(t, collectionName, docID, topology) topology.StopReplications() - lastWriteVersion := updateConflictingDocs(t, collectionName, docID, topology) + resurrectVersion := updateConflictingDocs(collectionName, docID, topology) topology.StartReplications() - waitForVersionAndBody(t, collectionName, docID, lastWriteVersion, topology) + waitForCVAndBody(t, collectionName, docID, resurrectVersion, topology) }) } } diff --git a/topologytest/peer_test.go b/topologytest/peer_test.go index 61ea79c27b..ad6398a6cf 100644 --- a/topologytest/peer_test.go +++ b/topologytest/peer_test.go @@ -419,7 +419,7 @@ func createPeers(t *testing.T, peersOptions map[string]PeerOptions) Peers { // setupTests returns a map of peers and a list of replications. The peers will be closed and the buckets will be destroyed by t.Cleanup. func setupTests(t *testing.T, topology TopologySpecification) (base.ScopeAndCollectionName, Topology) { - base.SetUpTestLogging(t, base.LevelDebug, base.KeyImport, base.KeyVV, base.KeyCRUD, base.KeySync) + base.SetUpTestLogging(t, base.LevelDebug, base.KeyImport, base.KeyVV, base.KeyCRUD, base.KeySync, base.KeySGTest) peers := createPeers(t, topology.peers) replications := createPeerReplications(t, peers, topology.replications) return getSingleDsName(), Topology{peers: peers, replications: replications} diff --git a/topologytest/sync_gateway_peer_test.go b/topologytest/sync_gateway_peer_test.go index 6ea92c0c12..f2656636e6 100644 --- a/topologytest/sync_gateway_peer_test.go +++ b/topologytest/sync_gateway_peer_test.go @@ -74,7 +74,11 @@ func (p *SyncGatewayPeer) GetDocumentIfExists(dsName sgbucket.DataStoreName, doc return DocMetadata{}, nil, false } require.NoError(p.TB(), err) - return DocMetadataFromDocument(doc), base.Ptr(doc.Body(ctx)), true + meta = DocMetadataFromDocument(doc) + if doc.IsDeleted() { + return meta, nil, true + } + return meta, base.Ptr(doc.Body(ctx)), true } // CreateDocument creates a document on the peer. The test will fail if the document already exists. From 1953efcdff662090f7594f2aef7eb9e9182c921e Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Tue, 19 Aug 2025 08:57:31 -0400 Subject: [PATCH 2/3] Apply suggestion from @adamcfraser Co-authored-by: Adam Fraser --- topologytest/hlv_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/topologytest/hlv_test.go b/topologytest/hlv_test.go index 0fe3687068..d3060e0136 100644 --- a/topologytest/hlv_test.go +++ b/topologytest/hlv_test.go @@ -61,9 +61,9 @@ func waitForVersionAndBody(t *testing.T, dsName base.ScopeAndCollectionName, doc } // waitForCVAndBody waits for a document to reach a specific cv on all peers. -// This is used when asserting on the full HLV is impossible. If XDCR is running, then asserting on the full HLV for -// non CBL peers is possible. However, conflict resolution on Couchbase Lite means that Couchbase Lite can contain -// previous versions of a document. +// This is used for scenarios where it's valid for the full HLV to not converge. This includes cases where +// CBL conflict resolution results in additional history in the CBL version of the HLV that may not be pushed +// to CBS (e.g. remote wins) // // See following example: // From 7b2723046090a3b1984222b06f8c66c13faf7b92 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Tue, 19 Aug 2025 09:11:04 -0400 Subject: [PATCH 3/3] improve comments --- topologytest/hlv_test.go | 21 +++++++++++---------- topologytest/multi_actor_conflict_test.go | 3 +-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/topologytest/hlv_test.go b/topologytest/hlv_test.go index d3060e0136..dda15e9ee9 100644 --- a/topologytest/hlv_test.go +++ b/topologytest/hlv_test.go @@ -61,7 +61,7 @@ func waitForVersionAndBody(t *testing.T, dsName base.ScopeAndCollectionName, doc } // waitForCVAndBody waits for a document to reach a specific cv on all peers. -// This is used for scenarios where it's valid for the full HLV to not converge. This includes cases where +// This is used for scenarios where it's valid for the full HLV to not converge. This includes cases where // CBL conflict resolution results in additional history in the CBL version of the HLV that may not be pushed // to CBS (e.g. remote wins) // @@ -108,10 +108,16 @@ func waitForCVAndBody(t *testing.T, dsName base.ScopeAndCollectionName, docID st } } -// waitForConvergingTombstones waits for all peers to have a tombstone document for a given doc ID. A matching HLV import ( -// guaranteed for all Couchbase Server / Sync Gateway versions, but not CBL versions. +// waitForConvergingTombstones waits for all peers to have a tombstone document for a given doc ID. This is the +// equivalent function to waitForCVAndBody if the expected document is a tombstone. // -// See following example: +// Couchbase Server and Sync Gateway peers will have matching HLVs due XDCR conflict resolution always overwriting +// HLVs. However, it is possible that XDCR will replicate a tombstone from one Couchbase Server to antoher Couchbase +// Server and update its HLV. Since tombstones are not imported by Sync Gateway, this CV will not be replicated to +// Couchbase Server. +// +// In this case, all peers will have a tombstone for this document, but no assertions can be made on Couchbase Lite +// peers. See following example: // // +- - - - - - -+ +- - - - - - -+ // ' cluster A ' ' cluster B ' @@ -148,7 +154,6 @@ func waitForConvergingTombstones(t *testing.T, dsName base.ScopeAndCollectionNam t.Logf("waiting for converging tombstones") require.EventuallyWithT(t, func(c *assert.CollectT) { nonCBLVersions := make(map[string]DocMetadata) - CBLVersions := make(map[string]DocMetadata) for peerName, peer := range topology.SortedPeers() { meta, body, exists := peer.GetDocumentIfExists(dsName, docID) if !assert.True(c, exists, "doc %s does not exist on peer %s", docID, peer) { @@ -157,10 +162,7 @@ func waitForConvergingTombstones(t *testing.T, dsName base.ScopeAndCollectionNam if !assert.Nil(c, body, "expected tombstone for doc %s on peer %s", docID, peer) { return } - switch peer.Type() { - case PeerTypeCouchbaseLite: - CBLVersions[peerName] = meta - default: + if peer.Type() != PeerTypeCouchbaseLite { nonCBLVersions[peerName] = meta } } @@ -172,7 +174,6 @@ func waitForConvergingTombstones(t *testing.T, dsName base.ScopeAndCollectionNam } assertHLVEqual(c, dsName, docID, peer, version, nil, *nonCBLVersion, topology) } - // Is there a way to do any assertion on the CBL tombstone versions? }, totalWaitTime, pollInterval) } diff --git a/topologytest/multi_actor_conflict_test.go b/topologytest/multi_actor_conflict_test.go index 740275a34c..1e1982f1f6 100644 --- a/topologytest/multi_actor_conflict_test.go +++ b/topologytest/multi_actor_conflict_test.go @@ -90,8 +90,7 @@ func TestMultiActorConflictDelete(t *testing.T) { // 1. create document on each peer with different contents // 2. start replications // 3. wait for documents to exist with hlv sources equal to the number of active peers and the document body is -// equivalent to the last write. The assertion is presently only on CV for Couchbase Lite peers, and full HLV -// for other peer types. +// equivalent to the last write. // 4. stop replications // 5. delete documents on all peers // 6. start replications