Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-17.0] [VTAdmin API] Fix schema cache flag, add documentation (#15704) #15718

Merged
merged 2 commits into from
Apr 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions doc/vtadmin/clusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,3 +54,24 @@ defaults:
# - schema-read-pool => for GetSchema, GetSchemas, and FindSchema api methods
# - topo-read-pool => for generic topo methods (e.g. GetKeyspace, FindAllShardsInKeyspace)
# - workflow-read-pool => for GetWorkflow/GetWorkflows api methods.

# How long to keep values in schema cache by default (duration passed to Add takes precedence).
# A value of "0m" means values will never be cached, a positive duration "1m" means items will be cached
# for that duration, and passing nothing will default to "NoExpiration".
schema-cache-default-expiration: 1m
# How many outstanding backfil requests to permit in schema cache.
# If the queue is full, calls backfill schemas will return false, and those requests will be discarded.
# A value of "0" means that the underlying channel will have a size of 0,
# and every send to the backfill queue will block until the queue is "empty" again.
schema-cache-backfill-queue-size: 0
# How often expired values are removed from schema cache.
schema-cache-cleanup-interval: 5m
# How long a backfill request is considered valid.
# If the backfill goroutin encounters a request older than this, it is discarded.
schema-cache-backfill-request-ttl: 100ms
# How much time must pass before the backfill goroutine will re-backfill the same key.
# Used to prevent multiple callers from queueing up too many requests for the same key,
# when one backfill would satisfy all of them.
schema-cache-backfill-request-duplicate-interval: 1m
# How long to wait whe attempting to enqueue a backfill request before giving up.
schema-cache-backfill-enqueue-wait-time: 50ms
8 changes: 8 additions & 0 deletions go/vt/vtadmin/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ const (
// backfill requests to still process, if a config is passed with a
// non-positive BackfillRequestTTL.
DefaultBackfillRequestTTL = time.Millisecond * 100
// DefaultBackfillQueueSize is the default value used for the size of the
// backfill queue, if a config is passed with a non-positive BackfillQueueSize.
DefaultBackfillQueueSize = 0
)

// Config is the configuration for a cache.
Expand Down Expand Up @@ -125,6 +128,11 @@ func New[Key Keyer, Value any](fillFunc func(ctx context.Context, req Key) (Valu
cfg.BackfillRequestTTL = DefaultBackfillRequestTTL
}

if cfg.BackfillQueueSize < 0 {
log.Warningf("BackfillQueueSize (%v) must be positive, defaulting to %v", cfg.BackfillQueueSize, DefaultBackfillQueueSize)
cfg.BackfillQueueSize = DefaultBackfillQueueSize
}

c := &Cache[Key, Value]{
cache: cache.New(cfg.DefaultExpiration, cfg.CleanupInterval),
lastFill: map[string]time.Time{},
Expand Down
39 changes: 39 additions & 0 deletions go/vt/vtadmin/cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,45 @@ func TestBackfillDuplicates(t *testing.T) {
}
}

func TestBackfillQueueSize(t *testing.T) {
t.Parallel()

tests := []struct {
name string
configuredBackfillQueueSize int
expectedBackfillQueueSize int
}{
{
name: "configured negative backfill queue size",
configuredBackfillQueueSize: -1,
expectedBackfillQueueSize: 0,
}, {
name: "configured 0 backfill queue size",
configuredBackfillQueueSize: 0,
expectedBackfillQueueSize: 0,
}, {
name: "configured positive backfill queue size",
configuredBackfillQueueSize: 1,
expectedBackfillQueueSize: 1,
},
}
for _, tt := range tests {
tt := tt

t.Run(tt.name, func(t *testing.T) {
t.Parallel()

c := cache.New(func(ctx context.Context, req intkey) (any, error) {
return nil, nil
}, cache.Config{
BackfillQueueSize: tt.configuredBackfillQueueSize,
})
var config cache.Config = c.Debug()["config"].(cache.Config)
assert.Equal(t, tt.expectedBackfillQueueSize, config.BackfillQueueSize)
})
}
}

func TestBackfillTTL(t *testing.T) {
t.Parallel()

Expand Down
3 changes: 3 additions & 0 deletions go/vt/vtadmin/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1424,7 +1424,10 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta

span.Annotate("cache_hit", ok)
if ok {
log.Infof("GetSchemas(cluster = %s) fetching schemas from schema cache", c.ID)
return schemas, err
} else {
log.Infof("GetSchemas(cluster = %s) bypassing schema cache", c.ID)
}
}

Expand Down
Loading