From 3cf178f1aaf6971ecf03989d667a6bd357ac7227 Mon Sep 17 00:00:00 2001 From: "vitess-bot[bot]" <108069721+vitess-bot[bot]@users.noreply.github.com> Date: Mon, 15 Apr 2024 09:28:58 -0700 Subject: [PATCH 1/2] [VTAdmin API] Fix schema cache flag, add documentation (#15704) Signed-off-by: notfelineit --- doc/vtadmin/clusters.yaml | 21 ++++++++++++++++++ go/vt/vtadmin/cache/cache.go | 8 +++++++ go/vt/vtadmin/cache/cache_test.go | 37 +++++++++++++++++++++++++++++++ go/vt/vtadmin/cluster/cluster.go | 3 +++ 4 files changed, 69 insertions(+) diff --git a/doc/vtadmin/clusters.yaml b/doc/vtadmin/clusters.yaml index 55779df60b2..0a7728e4352 100644 --- a/doc/vtadmin/clusters.yaml +++ b/doc/vtadmin/clusters.yaml @@ -54,3 +54,24 @@ defaults: # - schema-read-pool => for GetSchema, GetSchemas, and FindSchema api methods # - topo-read-pool => for generic topo methods (e.g. GetKeyspace, FindAllShardsInKeyspace) # - workflow-read-pool => for GetWorkflow/GetWorkflows api methods. + + # How long to keep values in schema cache by default (duration passed to Add takes precedence). + # A value of "0m" means values will never be cached, a positive duration "1m" means items will be cached + # for that duration, and passing nothing will default to "NoExpiration". + schema-cache-default-expiration: 1m + # How many outstanding backfil requests to permit in schema cache. + # If the queue is full, calls backfill schemas will return false, and those requests will be discarded. + # A value of "0" means that the underlying channel will have a size of 0, + # and every send to the backfill queue will block until the queue is "empty" again. + schema-cache-backfill-queue-size: 0 + # How often expired values are removed from schema cache. + schema-cache-cleanup-interval: 5m + # How long a backfill request is considered valid. + # If the backfill goroutin encounters a request older than this, it is discarded. + schema-cache-backfill-request-ttl: 100ms + # How much time must pass before the backfill goroutine will re-backfill the same key. + # Used to prevent multiple callers from queueing up too many requests for the same key, + # when one backfill would satisfy all of them. + schema-cache-backfill-request-duplicate-interval: 1m + # How long to wait whe attempting to enqueue a backfill request before giving up. + schema-cache-backfill-enqueue-wait-time: 50ms diff --git a/go/vt/vtadmin/cache/cache.go b/go/vt/vtadmin/cache/cache.go index 1768ce1f924..bc53efb80db 100644 --- a/go/vt/vtadmin/cache/cache.go +++ b/go/vt/vtadmin/cache/cache.go @@ -54,6 +54,9 @@ const ( // backfill requests to still process, if a config is passed with a // non-positive BackfillRequestTTL. DefaultBackfillRequestTTL = time.Millisecond * 100 + // DefaultBackfillQueueSize is the default value used for the size of the + // backfill queue, if a config is passed with a non-positive BackfillQueueSize. + DefaultBackfillQueueSize = 0 ) // Config is the configuration for a cache. @@ -125,6 +128,11 @@ func New[Key Keyer, Value any](fillFunc func(ctx context.Context, req Key) (Valu cfg.BackfillRequestTTL = DefaultBackfillRequestTTL } + if cfg.BackfillQueueSize < 0 { + log.Warningf("BackfillQueueSize (%v) must be positive, defaulting to %v", cfg.BackfillQueueSize, DefaultBackfillQueueSize) + cfg.BackfillQueueSize = DefaultBackfillQueueSize + } + c := &Cache[Key, Value]{ cache: cache.New(cfg.DefaultExpiration, cfg.CleanupInterval), lastFill: map[string]time.Time{}, diff --git a/go/vt/vtadmin/cache/cache_test.go b/go/vt/vtadmin/cache/cache_test.go index 93a6898db5d..4fe28b38f2f 100644 --- a/go/vt/vtadmin/cache/cache_test.go +++ b/go/vt/vtadmin/cache/cache_test.go @@ -93,6 +93,43 @@ func TestBackfillDuplicates(t *testing.T) { } } +func TestBackfillQueueSize(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + configuredBackfillQueueSize int + expectedBackfillQueueSize int + }{ + { + name: "configured negative backfill queue size", + configuredBackfillQueueSize: -1, + expectedBackfillQueueSize: 0, + }, { + name: "configured 0 backfill queue size", + configuredBackfillQueueSize: 0, + expectedBackfillQueueSize: 0, + }, { + name: "configured positive backfill queue size", + configuredBackfillQueueSize: 1, + expectedBackfillQueueSize: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + c := cache.New(func(ctx context.Context, req intkey) (any, error) { + return nil, nil + }, cache.Config{ + BackfillQueueSize: tt.configuredBackfillQueueSize, + }) + var config cache.Config = c.Debug()["config"].(cache.Config) + assert.Equal(t, tt.expectedBackfillQueueSize, config.BackfillQueueSize) + }) + } +} + func TestBackfillTTL(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/cluster.go b/go/vt/vtadmin/cluster/cluster.go index 917b64ff4d9..2acc336b406 100644 --- a/go/vt/vtadmin/cluster/cluster.go +++ b/go/vt/vtadmin/cluster/cluster.go @@ -1424,7 +1424,10 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta span.Annotate("cache_hit", ok) if ok { + log.Infof("GetSchemas(cluster = %s) fetching schemas from schema cache", c.ID) return schemas, err + } else { + log.Infof("GetSchemas(cluster = %s) bypassing schema cache", c.ID) } } From ad51083a0e56cc7af50c81c7d834652dd3e17e81 Mon Sep 17 00:00:00 2001 From: notfelineit Date: Mon, 15 Apr 2024 13:35:14 -0700 Subject: [PATCH 2/2] Fix lint error --- go/vt/vtadmin/cache/cache_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/go/vt/vtadmin/cache/cache_test.go b/go/vt/vtadmin/cache/cache_test.go index 4fe28b38f2f..d92bb32949c 100644 --- a/go/vt/vtadmin/cache/cache_test.go +++ b/go/vt/vtadmin/cache/cache_test.go @@ -116,6 +116,8 @@ func TestBackfillQueueSize(t *testing.T) { }, } for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { t.Parallel()