Skip to content

Commit bac31f7

Browse files
ring: reduce SetAddrs shards lock contention
Introduces a new lock to make `SetAddrs` calls exclusive. This enables removal of `ringSharding` field locking for the duration of potentially long `newRingShards` call. Closes newly created shards if `ringSharding` was closed during `SetAddrs`. `TestRingSetAddrsContention` observes number of pings increased from ~200 to ~30_000. See related discussion redis#2190 (comment)
1 parent 69a9494 commit bac31f7

File tree

3 files changed

+46
-32
lines changed

3 files changed

+46
-32
lines changed

export_test.go

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,9 +95,6 @@ func GetSlavesAddrByName(ctx context.Context, c *SentinelClient, name string) []
9595
}
9696

9797
func (c *Ring) ShardByName(name string) *ringShard {
98-
return c.sharding.ShardByName(name)
99-
}
100-
101-
func (c *ringSharding) ShardByName(name string) *ringShard {
102-
return c.shards.m[name]
98+
shard, _ := c.sharding.GetByName(name)
99+
return shard
103100
}

ring.go

Lines changed: 34 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,7 @@ func (shard *ringShard) Vote(up bool) bool {
213213
type ringSharding struct {
214214
opt *RingOptions
215215

216+
addrsMu sync.Mutex
216217
mu sync.RWMutex
217218
shards *ringShards
218219
closed bool
@@ -245,46 +246,64 @@ func (c *ringSharding) OnNewNode(fn func(rdb *Client)) {
245246
// decrease number of shards, that you use. It will reuse shards that
246247
// existed before and close the ones that will not be used anymore.
247248
func (c *ringSharding) SetAddrs(addrs map[string]string) {
248-
c.mu.Lock()
249+
c.addrsMu.Lock()
250+
defer c.addrsMu.Unlock()
249251

252+
closeShards := func(shards map[string]*ringShard) {
253+
for addr, shard := range shards {
254+
if err := shard.Client.Close(); err != nil {
255+
internal.Logger.Printf(context.Background(), "shard.Close %s failed: %s", addr, err)
256+
}
257+
}
258+
}
259+
260+
c.mu.RLock()
250261
if c.closed {
251-
c.mu.Unlock()
262+
c.mu.RUnlock()
252263
return
253264
}
265+
shards := c.shards
266+
c.mu.RUnlock()
254267

255-
shards, cleanup := c.newRingShards(addrs, c.shards)
268+
shards, created, unused := c.newRingShards(addrs, shards)
269+
270+
c.mu.Lock()
271+
if c.closed {
272+
closeShards(created)
273+
c.mu.Unlock()
274+
return
275+
}
256276
c.shards = shards
257277
c.mu.Unlock()
258278

259279
c.rebalance()
260-
cleanup()
280+
closeShards(unused)
261281
}
262282

263283
func (c *ringSharding) newRingShards(
264284
addrs map[string]string, existingShards *ringShards,
265-
) (*ringShards, func()) {
266-
shardMap := make(map[string]*ringShard) // indexed by addr
267-
unusedShards := make(map[string]*ringShard) // indexed by addr
285+
) (shards *ringShards, created, unused map[string]*ringShard) {
286+
created = make(map[string]*ringShard)
287+
unused = make(map[string]*ringShard)
268288

269289
if existingShards != nil {
270290
for _, shard := range existingShards.list {
271-
addr := shard.Client.opt.Addr
272-
shardMap[addr] = shard
273-
unusedShards[addr] = shard
291+
unused[shard.addr] = shard
274292
}
275293
}
276294

277-
shards := &ringShards{
278-
m: make(map[string]*ringShard),
295+
shards = &ringShards{
296+
m: make(map[string]*ringShard, len(addrs)),
279297
}
280298

281299
for name, addr := range addrs {
282-
if shard, ok := shardMap[addr]; ok {
300+
if shard, ok := unused[addr]; ok {
283301
shards.m[name] = shard
284-
delete(unusedShards, addr)
302+
delete(unused, addr)
285303
} else {
286304
shard := newRingShard(c.opt, addr)
287305
shards.m[name] = shard
306+
created[addr] = shard
288307

289308
for _, fn := range c.onNewNode {
290309
fn(shard.Client)
@@ -296,13 +315,7 @@ func (c *ringSharding) newRingShards(
296315
shards.list = append(shards.list, shard)
297316
}
298317

299-
return shards, func() {
300-
for addr, shard := range unusedShards {
301-
if err := shard.Client.Close(); err != nil {
302-
internal.Logger.Printf(context.Background(), "shard.Close %s failed: %s", addr, err)
303-
}
304-
}
305-
}
318+
return
306319
}
307320

308321
func (c *ringSharding) List() []*ringShard {

ring_test.go

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -123,16 +123,18 @@ var _ = Describe("Redis Ring", func() {
123123
"ringShardOne": ":" + ringShard1Port,
124124
})
125125
Expect(ring.Len(), 1)
126+
126127
gotShard := ring.ShardByName("ringShardOne")
127-
Expect(gotShard).To(Equal(wantShard))
128+
Expect(gotShard).To(BeIdenticalTo(wantShard))
128129

129130
ring.SetAddrs(map[string]string{
130131
"ringShardOne": ":" + ringShard1Port,
131132
"ringShardTwo": ":" + ringShard2Port,
132133
})
133134
Expect(ring.Len(), 2)
135+
134136
gotShard = ring.ShardByName("ringShardOne")
135-
Expect(gotShard).To(Equal(wantShard))
137+
Expect(gotShard).To(BeIdenticalTo(wantShard))
136138
})
137139

138140
It("uses 3 shards after setting it to 3 shards", func() {
@@ -153,23 +155,25 @@ var _ = Describe("Redis Ring", func() {
153155
shardName3: shardAddr3,
154156
})
155157
Expect(ring.Len(), 3)
158+
156159
gotShard1 := ring.ShardByName(shardName1)
157160
gotShard2 := ring.ShardByName(shardName2)
158161
gotShard3 := ring.ShardByName(shardName3)
159-
Expect(gotShard1).To(Equal(wantShard1))
160-
Expect(gotShard2).To(Equal(wantShard2))
162+
Expect(gotShard1).To(BeIdenticalTo(wantShard1))
163+
Expect(gotShard2).To(BeIdenticalTo(wantShard2))
161164
Expect(gotShard3).ToNot(BeNil())
162165

163166
ring.SetAddrs(map[string]string{
164167
shardName1: shardAddr1,
165168
shardName2: shardAddr2,
166169
})
167170
Expect(ring.Len(), 2)
171+
168172
gotShard1 = ring.ShardByName(shardName1)
169173
gotShard2 = ring.ShardByName(shardName2)
170174
gotShard3 = ring.ShardByName(shardName3)
171-
Expect(gotShard1).To(Equal(wantShard1))
172-
Expect(gotShard2).To(Equal(wantShard2))
175+
Expect(gotShard1).To(BeIdenticalTo(wantShard1))
176+
Expect(gotShard2).To(BeIdenticalTo(wantShard2))
173177
Expect(gotShard3).To(BeNil())
174178
})
175179
})

0 commit comments

Comments
 (0)