-
Notifications
You must be signed in to change notification settings - Fork 214
/
caches.go
326 lines (281 loc) · 9.29 KB
/
caches.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
// Copyright (C) 2016 The GoHBase Authors. All rights reserved.
// This file is part of GoHBase.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package gohbase
import (
"bytes"
"fmt"
"io"
"log/slog"
"sync"
"github.com/tsuna/gohbase/hrpc"
"modernc.org/b/v2"
)
// clientRegionCache is client -> region cache. Used to quickly
// look up all the regioninfos that map to a specific client
type clientRegionCache struct {
m sync.RWMutex
logger *slog.Logger
regions map[hrpc.RegionClient]map[hrpc.RegionInfo]struct{}
}
// put associates a region with client for provided addrss. It returns the client if it's already
// in cache or otherwise instantiates a new one by calling newClient.
// TODO: obvious place for optimization (use map with address as key to lookup exisiting clients)
func (rcc *clientRegionCache) put(addr string, r hrpc.RegionInfo,
newClient func() hrpc.RegionClient) hrpc.RegionClient {
rcc.m.Lock()
for existingClient, regions := range rcc.regions {
// check if client already exists, checking by host and port
// because concurrent callers might try to put the same client
if addr == existingClient.Addr() {
// check client already knows about the region, checking
// by pointer is enough because we make sure that there are
// no regions with the same name around
if _, ok := regions[r]; !ok {
regions[r] = struct{}{}
}
rcc.m.Unlock()
rcc.logger.Debug("region client is already in client's cache", "client", existingClient)
return existingClient
}
}
// no such client yet
c := newClient()
rcc.regions[c] = map[hrpc.RegionInfo]struct{}{r: {}}
rcc.m.Unlock()
rcc.logger.Info("added new region client", "client", c)
return c
}
func (rcc *clientRegionCache) del(r hrpc.RegionInfo) {
rcc.m.Lock()
c := r.Client()
if c != nil {
r.SetClient(nil)
regions := rcc.regions[c]
delete(regions, r)
}
rcc.m.Unlock()
}
func (rcc *clientRegionCache) closeAll() {
rcc.m.Lock()
for client, regions := range rcc.regions {
for region := range regions {
region.MarkUnavailable()
region.SetClient(nil)
}
client.Close()
}
rcc.m.Unlock()
}
func (rcc *clientRegionCache) clientDown(c hrpc.RegionClient) map[hrpc.RegionInfo]struct{} {
rcc.m.Lock()
downregions, ok := rcc.regions[c]
delete(rcc.regions, c)
rcc.m.Unlock()
if ok {
rcc.logger.Info("removed region client", "client", c)
}
return downregions
}
// Collects information about the clientRegion cache and appends them to the two maps to reduce
// duplication of data. We do this in one function to avoid running the iterations twice
func (rcc *clientRegionCache) debugInfo(
regions map[string]hrpc.RegionInfo,
clients map[string]hrpc.RegionClient) map[string][]string {
// key = RegionClient memory address , value = List of RegionInfo addresses
clientRegionCacheMap := map[string][]string{}
rcc.m.RLock()
for client, reginfos := range rcc.regions {
clientRegionInfoMap := make([]string, len(reginfos))
// put all the region infos in the client into the keyRegionInfosMap b/c its not
// guaranteed that rcc and krc will have the same infos
clients[fmt.Sprintf("%p", client)] = client
i := 0
for regionInfo := range reginfos {
clientRegionInfoMap[i] = fmt.Sprintf("%p", regionInfo)
regions[fmt.Sprintf("%p", regionInfo)] = regionInfo
i++
}
clientRegionCacheMap[fmt.Sprintf("%p", client)] = clientRegionInfoMap
}
rcc.m.RUnlock()
return clientRegionCacheMap
}
// key -> region cache.
type keyRegionCache struct {
m sync.RWMutex
logger *slog.Logger
// Maps a []byte of a region start key to a hrpc.RegionInfo
regions *b.Tree[[]byte, hrpc.RegionInfo]
}
func (krc *keyRegionCache) get(key []byte) ([]byte, hrpc.RegionInfo) {
krc.m.RLock()
enum, ok := krc.regions.Seek(key)
if ok {
krc.m.RUnlock()
panic(fmt.Errorf("WTF: got exact match for region search key %q", key))
}
k, v, err := enum.Prev()
enum.Close()
krc.m.RUnlock()
if err == io.EOF {
// we are the beginning of the tree
return nil, nil
}
return k, v
}
// reads whole b tree in keyRegionCache and gathers debug info.
// We append that information in the given map
func (krc *keyRegionCache) debugInfo(
regions map[string]hrpc.RegionInfo) map[string]string {
regionCacheMap := map[string]string{}
krc.m.RLock()
enum, err := krc.regions.SeekFirst()
if err != nil {
krc.m.RUnlock()
return regionCacheMap
}
krc.m.RUnlock()
for {
krc.m.RLock()
k, v, err := enum.Next()
// release lock after each iteration to allow other processes a chance to get it
krc.m.RUnlock()
if err == io.EOF {
break
}
regions[fmt.Sprintf("%p", v)] = v
regionCacheMap[string(k)] = fmt.Sprintf("%p", v)
}
return regionCacheMap
}
func isRegionOverlap(regA, regB hrpc.RegionInfo) bool {
// if region's stop key is empty, it's assumed to be the greatest key
return bytes.Equal(regA.Namespace(), regB.Namespace()) &&
bytes.Equal(regA.Table(), regB.Table()) &&
(len(regB.StopKey()) == 0 || bytes.Compare(regA.StartKey(), regB.StopKey()) < 0) &&
(len(regA.StopKey()) == 0 || bytes.Compare(regA.StopKey(), regB.StartKey()) > 0)
}
func (krc *keyRegionCache) getOverlaps(reg hrpc.RegionInfo) []hrpc.RegionInfo {
var overlaps []hrpc.RegionInfo
var v hrpc.RegionInfo
var err error
// deal with empty tree in the beginning so that we don't have to check
// EOF errors for enum later
if krc.regions.Len() == 0 {
return overlaps
}
// check if key created from new region falls into any cached regions
key := createRegionSearchKey(fullyQualifiedTable(reg), reg.StartKey())
enum, ok := krc.regions.Seek(key)
if ok {
panic(fmt.Errorf("WTF: found a region with exact name as the search key %q", key))
}
// case 1: landed before the first region in cache
// enum.Prev() returns io.EOF
// enum.Next() returns io.EOF
// SeekFirst() + enum.Next() returns the first region, which has larger start key
// case 2: landed before the second region in cache
// enum.Prev() returns the first region X and moves pointer to -infinity
// enum.Next() returns io.EOF
// SeekFirst() + enum.Next() returns first region X, which has smaller start key
// case 3: landed anywhere after the second region
// enum.Prev() returns the region X before it landed, moves pointer to the region X - 1
// enum.Next() returns X - 1 and move pointer to X, which has smaller start key
_, _, _ = enum.Prev()
_, _, err = enum.Next()
if err == io.EOF {
// we are in the beginning of tree, get new enum starting
// from first region
enum.Close()
enum, err = krc.regions.SeekFirst()
if err != nil {
panic(fmt.Errorf(
"error seeking first region when getting overlaps for region %v: %v", reg, err))
}
}
_, v, err = enum.Next()
if err != nil {
panic(fmt.Errorf(
"error accessing first region when getting overlaps for region %v: %v", reg, err))
}
if isRegionOverlap(v, reg) {
overlaps = append(overlaps, v)
}
_, v, err = enum.Next()
// now append all regions that overlap until the end of the tree
// or until they don't overlap
for err != io.EOF && isRegionOverlap(v, reg) {
overlaps = append(overlaps, v)
_, v, err = enum.Next()
}
enum.Close()
return overlaps
}
// put looks up if there's already region with this name in regions cache
// and if there's, returns it in overlaps and doesn't modify the cache.
// Otherwise, it puts the region and removes all overlaps in case all of
// them are older. Returns a slice of overlapping regions and whether
// passed region was put in the cache.
func (krc *keyRegionCache) put(reg hrpc.RegionInfo) (overlaps []hrpc.RegionInfo, replaced bool) {
krc.m.Lock()
defer krc.m.Unlock()
// Update region cache metric
beforeLen := krc.regions.Len()
defer func() {
afterLen := krc.regions.Len()
cachedRegionTotal.Add(float64(afterLen - beforeLen))
}()
krc.regions.Put(reg.Name(), func(v hrpc.RegionInfo, exists bool) (hrpc.RegionInfo, bool) {
if exists {
// region is already in cache,
// note: regions with the same name have the same age
overlaps = []hrpc.RegionInfo{v}
return nil, false
}
// find all entries that are overlapping with the range of the new region.
overlaps = krc.getOverlaps(reg)
for _, o := range overlaps {
if o.ID() > reg.ID() {
// overlapping region is younger,
// don't replace any regions
// TODO: figure out if there can a case where we might
// have both older and younger overlapping regions, for
// now we only replace if all overlaps are older
return nil, false
}
}
// all overlaps are older, put the new region
replaced = true
return reg, true
})
if !replaced {
krc.logger.Debug("region is already in cache",
"region", reg, "overlaps", overlaps, "replaced", replaced)
return
}
// delete overlapping regions
// TODO: in case overlaps are always either younger or older,
// we can just greedily remove them in Put function
for _, o := range overlaps {
krc.regions.Delete(o.Name())
// let region establishers know that they can give up
o.MarkDead()
}
krc.logger.Info("added new region",
"region", reg, "overlaps", overlaps, "replaced", replaced)
return
}
func (krc *keyRegionCache) del(reg hrpc.RegionInfo) bool {
krc.m.Lock()
success := krc.regions.Delete(reg.Name())
krc.m.Unlock()
// let region establishers know that they can give up
reg.MarkDead()
if success {
cachedRegionTotal.Dec()
}
krc.logger.Debug("removed region", "region", reg)
return success
}