diff --git a/go.mod b/go.mod index 91e88fd43..d7d100d19 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,13 @@ module github.com/envoyproxy/ratelimit -go 1.21.11 +go 1.22.8 require ( github.com/DataDog/datadog-go/v5 v5.5.0 github.com/alicebob/miniredis/v2 v2.33.0 github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 github.com/coocood/freecache v1.2.4 - github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b + github.com/envoyproxy/go-control-plane v0.13.2-0.20241219025321-f011ad88ec17 github.com/go-kit/log v0.2.1 github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 @@ -23,7 +23,7 @@ require ( github.com/prometheus/client_model v0.6.0 github.com/prometheus/statsd_exporter v0.26.1 github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 @@ -47,14 +47,14 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect diff --git a/go.sum b/go.sum index 9777a08ad..7d4ecf6e9 100644 --- a/go.sum +++ b/go.sum @@ -37,9 +37,15 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1:M0BhcNaW04UV1haQO8IFSDB64dAeiBSsTMZks/sYDcQ= github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b/go.mod h1:lFu6itz1hckLR2A3aJ+ZKf3lu8HpjTsJSsqvVF6GL6g= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= +github.com/envoyproxy/go-control-plane v0.13.2-0.20241219025321-f011ad88ec17 h1:vJbk97KFgBX0QdyydT18FDmwqCeRZzUYUdm/o338h8I= +github.com/envoyproxy/go-control-plane v0.13.2-0.20241219025321-f011ad88ec17/go.mod h1:lHUJZHyVI6Q4Vr6qjD60ZHBybFRLzqoKVZGIJi0/i8s= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -107,6 +113,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 h1:pH+U6pJP0BhxqQ4njBUjOg0++WMMvv3eByWzB+oATBY= github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= @@ -142,6 +150,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 18f0b83d3..6aec1ac8a 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -26,14 +26,14 @@ type BaseRateLimiter struct { type LimitInfo struct { limit *config.RateLimit - limitBeforeIncrease uint32 - limitAfterIncrease uint32 - nearLimitThreshold uint32 - overLimitThreshold uint32 + limitBeforeIncrease uint64 + limitAfterIncrease uint64 + nearLimitThreshold uint64 + overLimitThreshold uint64 } -func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limitAfterIncrease uint32, - nearLimitThreshold uint32, overLimitThreshold uint32, +func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint64, limitAfterIncrease uint64, + nearLimitThreshold uint64, overLimitThreshold uint64, ) *LimitInfo { return &LimitInfo{ limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease, @@ -44,7 +44,7 @@ func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limit // Generates cache keys for given rate limit request. Each cache key is represented by a concatenation of // domain, descriptor and current timestamp. func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, - limits []*config.RateLimit, hitsAddend uint32, + limits []*config.RateLimit, hitsAddends []uint64, ) []CacheKey { assert.Assert(len(request.Descriptors) == len(limits)) cacheKeys := make([]CacheKey, len(request.Descriptors)) @@ -55,7 +55,7 @@ func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { - limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) + limits[i].Stats.TotalHits.Add(hitsAddends[i]) } } return cacheKeys @@ -74,14 +74,14 @@ func (this *BaseRateLimiter) IsOverLimitWithLocalCache(key string) bool { } func (this *BaseRateLimiter) IsOverLimitThresholdReached(limitInfo *LimitInfo) bool { - limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit + limitInfo.overLimitThreshold = uint64(limitInfo.limit.Limit.RequestsPerUnit) return limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold } // Generates response descriptor status based on cache key, over the limit with local cache, over the limit and // near the limit thresholds. Thresholds are checked in order and are mutually exclusive. func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo, - isOverLimitWithLocalCache bool, hitsAddend uint32, + isOverLimitWithLocalCache bool, hitsAddend uint64, ) *pb.RateLimitResponse_DescriptorStatus { if key == "" { return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, @@ -91,15 +91,15 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * isOverLimit := false if isOverLimitWithLocalCache { isOverLimit = true - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) - limitInfo.limit.Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.OverLimit.Add(hitsAddend) + limitInfo.limit.Stats.OverLimitWithLocalCache.Add(hitsAddend) responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) } else { - limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit + limitInfo.overLimitThreshold = uint64(limitInfo.limit.Limit.RequestsPerUnit) // The nearLimitThreshold is the number of requests that can be made before hitting the nearLimitRatio. // We need to know it in both the OK and OVER_LIMIT scenarios. - limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) + limitInfo.nearLimitThreshold = uint64(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease) if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { isOverLimit = true @@ -123,7 +123,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * } } else { responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, - limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) + limitInfo.limit.Limit, uint32(limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease)) // The limit is OK but we additionally want to know if we are near the limit. this.checkNearLimitThreshold(limitInfo, hitsAddend) @@ -156,38 +156,38 @@ func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expira } } -func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint64) { // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the // N hits was over the limit, then all the N hits were over limit. // Otherwise, only the difference between the current limit value and the over limit threshold // were over limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.OverLimit.Add(hitsAddend) } else { - limitInfo.limit.Stats.OverLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold)) + limitInfo.limit.Stats.OverLimit.Add(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold) // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) + limitInfo.limit.Stats.NearLimit.Add(limitInfo.overLimitThreshold - max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)) } } -func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint64) { if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold { // Here we also need to assess which portion of the hitsAddend were in the near limit range. // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, // only the difference between the current limit value and the near limit threshold were near // limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold { - limitInfo.limit.Stats.NearLimit.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.NearLimit.Add(hitsAddend) } else { - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold)) + limitInfo.limit.Stats.NearLimit.Add(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold) } } } -func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache bool, limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache bool, limitInfo *LimitInfo, hitsAddend uint64) { // Increase shadow mode statistics. For the same reason as over limit stats, // if the limit value before adding the N hits over the limit, then all N hits were over limit. if isOverLimitWithLocalCache || limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index d4f65f75b..bce17baee 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -71,10 +71,10 @@ func (this *rateLimitMemcacheImpl) DoLimit( logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.Max(1, request.HitsAddend) + hitsAddends := utils.GetHitsAddends(request) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddends) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) @@ -121,27 +121,27 @@ func (this *rateLimitMemcacheImpl) DoLimit( for i, cacheKey := range cacheKeys { rawMemcacheValue, ok := memcacheValues[cacheKey.Key] - var limitBeforeIncrease uint32 + var limitBeforeIncrease uint64 if ok { decoded, err := strconv.ParseInt(string(rawMemcacheValue.Value), 10, 32) if err != nil { logger.Errorf("Unexpected non-numeric value in memcached: %v", rawMemcacheValue) } else { - limitBeforeIncrease = uint32(decoded) + limitBeforeIncrease = uint64(decoded) } } - limitAfterIncrease := limitBeforeIncrease + hitsAddend + limitAfterIncrease := limitBeforeIncrease + hitsAddends[i] limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + limitInfo, isOverLimitWithLocalCache[i], hitsAddends[i]) } this.waitGroup.Add(1) - runAsync(func() { this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, uint64(hitsAddend)) }) + runAsync(func() { this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, hitsAddends) }) if AutoFlushForIntegrationTests { this.Flush() } @@ -150,7 +150,7 @@ func (this *rateLimitMemcacheImpl) DoLimit( } func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, isOverLimitWithLocalCache []bool, - limits []*config.RateLimit, hitsAddend uint64, + limits []*config.RateLimit, hitsAddends []uint64, ) { defer this.waitGroup.Done() for i, cacheKey := range cacheKeys { @@ -158,7 +158,7 @@ func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, i continue } - _, err := this.client.Increment(cacheKey.Key, hitsAddend) + _, err := this.client.Increment(cacheKey.Key, hitsAddends[i]) if err == memcache.ErrCacheMiss { expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit) if this.expirationJitterMaxSeconds > 0 { @@ -168,13 +168,13 @@ func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, i // Need to add instead of increment. err = this.client.Add(&memcache.Item{ Key: cacheKey.Key, - Value: []byte(strconv.FormatUint(hitsAddend, 10)), + Value: []byte(strconv.FormatUint(hitsAddends[i], 10)), Expiration: int32(expirationSeconds), }) if err == memcache.ErrNotStored { // There was a race condition to do this add. We should be able to increment // now instead. - _, err := this.client.Increment(cacheKey.Key, hitsAddend) + _, err := this.client.Increment(cacheKey.Key, hitsAddends[i]) if err != nil { logger.Errorf("Failed to increment key %s after failing to add: %s", cacheKey.Key, err) continue diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 8c551e0ce..1764e873b 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -32,15 +32,44 @@ type fixedRateLimitCacheImpl struct { baseRateLimiter *limiter.BaseRateLimiter } -func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { +func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint64, result *uint64, expirationSeconds int64) { *pipeline = client.PipeAppend(*pipeline, result, "INCRBY", key, hitsAddend) *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } -func pipelineAppendtoGet(client Client, pipeline *Pipeline, key string, result *uint32) { +func pipelineAppendtoGet(client Client, pipeline *Pipeline, key string, result *uint64) { *pipeline = client.PipeAppend(*pipeline, result, "GET", key) } +func (this *fixedRateLimitCacheImpl) getHitsAddend(hitsAddend uint64, isCacheKeyOverlimit, isCacheKeyNearlimit, + isNearLimt bool) uint64 { + // If stopCacheKeyIncrementWhenOverlimit is false, then we always increment the cache key. + if !this.stopCacheKeyIncrementWhenOverlimit { + return hitsAddend + } + + // If stopCacheKeyIncrementWhenOverlimit is true, and one of the keys is over limit, then + // we do not increment the cache key. + if isCacheKeyOverlimit { + return 0 + } + + // If stopCacheKeyIncrementWhenOverlimit is true, and none of the keys are over limit, then + // to check if any of the keys are near limit. If none of the keys are near limit, + // then we increment the cache key. + if !isCacheKeyNearlimit { + return hitsAddend + } + + // If stopCacheKeyIncrementWhenOverlimit is true, and some of the keys are near limit, then + // we only increment the cache key if the key is near limit. + if isNearLimt { + return hitsAddend + } + + return 0 +} + func (this *fixedRateLimitCacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, @@ -48,103 +77,86 @@ func (this *fixedRateLimitCacheImpl) DoLimit( ) []*pb.RateLimitResponse_DescriptorStatus { logger.Debugf("starting cache lookup") - // request.HitsAddend could be 0 (default value) if not specified by the caller in the RateLimit request. - hitsAddend := utils.Max(1, request.HitsAddend) + hitsAddends := utils.GetHitsAddends(request) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddends) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) - results := make([]uint32, len(request.Descriptors)) - currentCount := make([]uint32, len(request.Descriptors)) + results := make([]uint64, len(request.Descriptors)) + currentCount := make([]uint64, len(request.Descriptors)) var pipeline, perSecondPipeline, pipelineToGet, perSecondPipelineToGet Pipeline - hitsAddendForRedis := hitsAddend overlimitIndexes := make([]bool, len(request.Descriptors)) nearlimitIndexes := make([]bool, len(request.Descriptors)) isCacheKeyOverlimit := false + isCacheKeyNearlimit := false + + // Check if any of the keys are already to the over limit in cache. + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + continue + } + + // Check if key is over the limit in local cache. + if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { + if limits[i].ShadowMode { + logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) + } else { + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + } + isCacheKeyOverlimit = true + isOverLimitWithLocalCache[i] = true + overlimitIndexes[i] = true + } + } - if this.stopCacheKeyIncrementWhenOverlimit { - // Check if any of the keys are reaching to the over limit in redis cache. + // If none of the keys are over limit in local cache and the stopCacheKeyIncrementWhenOverlimit is true, + // then we check if any of the keys are near limit in redis cache. + if this.stopCacheKeyIncrementWhenOverlimit && !isCacheKeyOverlimit { for i, cacheKey := range cacheKeys { if cacheKey.Key == "" { continue } - // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - if limits[i].ShadowMode { - logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) - } else { - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + if this.perSecondClient != nil && cacheKey.PerSecond { + if perSecondPipelineToGet == nil { + perSecondPipelineToGet = Pipeline{} } - isOverLimitWithLocalCache[i] = true - hitsAddendForRedis = 0 - overlimitIndexes[i] = true - isCacheKeyOverlimit = true - continue + pipelineAppendtoGet(this.perSecondClient, &perSecondPipelineToGet, cacheKey.Key, ¤tCount[i]) } else { - if this.perSecondClient != nil && cacheKey.PerSecond { - if perSecondPipelineToGet == nil { - perSecondPipelineToGet = Pipeline{} - } - pipelineAppendtoGet(this.perSecondClient, &perSecondPipelineToGet, cacheKey.Key, ¤tCount[i]) - } else { - if pipelineToGet == nil { - pipelineToGet = Pipeline{} - } - pipelineAppendtoGet(this.client, &pipelineToGet, cacheKey.Key, ¤tCount[i]) + if pipelineToGet == nil { + pipelineToGet = Pipeline{} } + pipelineAppendtoGet(this.client, &pipelineToGet, cacheKey.Key, ¤tCount[i]) } } - // Only if none of the cache keys exceed the limit, call Redis to check whether the cache keys are becoming overlimited. - if len(cacheKeys) > 1 && !isCacheKeyOverlimit { - if pipelineToGet != nil { - checkError(this.client.PipeDo(pipelineToGet)) - } - if perSecondPipelineToGet != nil { - checkError(this.perSecondClient.PipeDo(perSecondPipelineToGet)) - } - - for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" { - continue - } - // Now fetch the pipeline. - limitBeforeIncrease := currentCount[i] - limitAfterIncrease := limitBeforeIncrease + hitsAddend - - limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) - - if this.baseRateLimiter.IsOverLimitThresholdReached(limitInfo) { - hitsAddendForRedis = 0 - nearlimitIndexes[i] = true - } - } + if pipelineToGet != nil { + checkError(this.client.PipeDo(pipelineToGet)) } - } else { - // Check if any of the keys are reaching to the over limit in redis cache. + if perSecondPipelineToGet != nil { + checkError(this.perSecondClient.PipeDo(perSecondPipelineToGet)) + } + for i, cacheKey := range cacheKeys { if cacheKey.Key == "" { continue } + // Now fetch the pipeline. + limitBeforeIncrease := currentCount[i] + limitAfterIncrease := limitBeforeIncrease + hitsAddends[i] - // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - if limits[i].ShadowMode { - logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) - } else { - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) - } - isOverLimitWithLocalCache[i] = true - overlimitIndexes[i] = true - continue + limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) + + if this.baseRateLimiter.IsOverLimitThresholdReached(limitInfo) { + nearlimitIndexes[i] = true + isCacheKeyNearlimit = true } } } - // Now, actually setup the pipeline, skipping empty cache keys. + // Now, actually setup the pipeline to increase the usage of cache key, skipping empty cache keys. for i, cacheKey := range cacheKeys { if cacheKey.Key == "" || overlimitIndexes[i] { continue @@ -162,20 +174,14 @@ func (this *fixedRateLimitCacheImpl) DoLimit( if perSecondPipeline == nil { perSecondPipeline = Pipeline{} } - if nearlimitIndexes[i] { - pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) - } else { - pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddendForRedis, &results[i], expirationSeconds) - } + pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, this.getHitsAddend(hitsAddends[i], + isCacheKeyOverlimit, isCacheKeyNearlimit, nearlimitIndexes[i]), &results[i], expirationSeconds) } else { if pipeline == nil { pipeline = Pipeline{} } - if nearlimitIndexes[i] { - pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) - } else { - pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddendForRedis, &results[i], expirationSeconds) - } + pipelineAppend(this.client, &pipeline, cacheKey.Key, this.getHitsAddend(hitsAddends[i], isCacheKeyOverlimit, + isCacheKeyNearlimit, nearlimitIndexes[i]), &results[i], expirationSeconds) } } @@ -201,12 +207,12 @@ func (this *fixedRateLimitCacheImpl) DoLimit( for i, cacheKey := range cacheKeys { limitAfterIncrease := results[i] - limitBeforeIncrease := limitAfterIncrease - hitsAddend + limitBeforeIncrease := limitAfterIncrease - hitsAddends[i] limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + limitInfo, isOverLimitWithLocalCache[i], hitsAddends[i]) } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index ea26bf0d2..ca0b7e8a9 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -185,6 +185,9 @@ func (this *service) shouldRateLimitWorker( snappedConfig, globalShadowMode := this.GetCurrentConfig() limitsToCheck, isUnlimited := this.constructLimitsToCheck(request, ctx, snappedConfig) + assert.Assert(len(limitsToCheck) == len(isUnlimited)) + assert.Assert(len(limitsToCheck) == len(request.Descriptors)) + responseDescriptorStatuses := this.cache.DoLimit(ctx, request, limitsToCheck) assert.Assert(len(limitsToCheck) == len(responseDescriptorStatuses)) diff --git a/src/utils/utilities.go b/src/utils/utilities.go index 83f55979a..d01bed708 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -41,13 +41,6 @@ func CalculateReset(unit *pb.RateLimitResponse_RateLimit_Unit, timeSource TimeSo return &durationpb.Duration{Seconds: sec - now%sec} } -func Max(a uint32, b uint32) uint32 { - if a > b { - return a - } - return b -} - // Mask credentials from a redis connection string like // foo,redis://user:pass@redisurl1,redis://user:pass@redisurl2 // resulting in @@ -71,3 +64,20 @@ func SanitizeStatName(s string) string { r := strings.NewReplacer(":", "_", "|", "_") return r.Replace(s) } + +func GetHitsAddends(request *pb.RateLimitRequest) []uint64 { + hitsAddends := make([]uint64, len(request.Descriptors)) + + for i, descriptor := range request.Descriptors { + if descriptor.HitsAddend != nil { + // If the per descriptor hits_addend is set, use that. It allows to be zero. The zero value is + // means check only by no increment the hits. + hitsAddends[i] = descriptor.HitsAddend.Value + } else { + // If the per descriptor hits_addend is not set, use the request's hits_addend. If the value is + // zero (default value if not specified by the caller), use 1 for backward compatibility. + hitsAddends[i] = uint64(max(1, uint64(request.HitsAddend))) + } + } + return hitsAddends +} diff --git a/test/common/common.go b/test/common/common.go index 1062a8995..afd07296b 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/wrapperspb" pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" @@ -64,6 +65,15 @@ func NewRateLimitRequest(domain string, descriptors [][][2]string, hitsAddend ui return request } +func NewRateLimitRequestWithPerDescriptorHitsAddend(domain string, descriptors [][][2]string, + hitsAddends []uint64) *pb.RateLimitRequest { + request := NewRateLimitRequest(domain, descriptors, 1) + for i, hitsAddend := range hitsAddends { + request.Descriptors[i].HitsAddend = &wrapperspb.UInt64Value{Value: hitsAddend} + } + return request +} + func AssertProtoEqual(assert *assert.Assertions, expected proto.Message, actual proto.Message) { assert.True(proto.Equal(expected, actual), fmt.Sprintf("These two protobuf messages are not equal:\nexpected: %v\nactual: %v", expected, actual)) diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index b3babcee9..8fe154369 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -31,7 +31,7 @@ func TestGenerateCacheKeys(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) - cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) + cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, []uint64{1}) assert.Equal(1, len(cacheKeys)) assert.Equal("domain_key_value_1234", cacheKeys[0].Key) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) @@ -50,7 +50,7 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) - cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) + cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, []uint64{1}) assert.Equal(1, len(cacheKeys)) assert.Equal("prefix:domain_key_value_1234", cacheKeys[0].Key) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 15d90fd47..f5fd0ceb6 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -102,14 +102,14 @@ func TestMemcached(t *testing.T) { nil, ) client.EXPECT().Increment("domain_key3_value3_997200", uint64(1)).Return(uint64(11), nil) - client.EXPECT().Increment("domain_key3_value3_subkey3_subvalue3_950400", uint64(1)).Return(uint64(13), nil) + client.EXPECT().Increment("domain_key3_value3_subkey3_subvalue3_950400", uint64(2)).Return(uint64(13), nil) - request = common.NewRateLimitRequest( + request = common.NewRateLimitRequestWithPerDescriptorHitsAddend( "domain", [][][2]string{ {{"key3", "value3"}}, {{"key3", "value3"}, {"subkey3", "subvalue3"}}, - }, 1) + }, []uint64{1, 2}) limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil, false), config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil, false), @@ -124,10 +124,10 @@ func TestMemcached(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.WithinLimit.Value()) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(2), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(2), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.WithinLimit.Value()) cache.Flush() } diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 45fe87b21..300dc3de2 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -64,7 +64,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client } - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint64(1)).SetArg(1, uint64(5)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -81,17 +81,17 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest( + request = common.NewRateLimitRequestWithPerDescriptorHitsAddend( "domain", [][][2]string{ {{"key2", "value2"}}, {{"key2", "value2"}, {"subkey2", "subvalue2"}}, - }, 1) + }, []uint64{0, 1}) limits = []*config.RateLimit{ nil, config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil, false), @@ -109,20 +109,20 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint64(0)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_997200", int64(3600)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint64(1)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest( + request = common.NewRateLimitRequestWithPerDescriptorHitsAddend( "domain", [][][2]string{ {{"key3", "value3"}}, {{"key3", "value3"}, {"subkey3", "subvalue3"}}, - }, 1) + }, []uint64{0, 1}) limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil, false), config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil, false), @@ -133,14 +133,14 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, cache.DoLimit(context.Background(), request, limits)) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.WithinLimit.Value()) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.WithinLimit.Value()) } } @@ -197,7 +197,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -224,7 +224,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -245,7 +245,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Test Over limit stats timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(16)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -266,7 +266,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Test Over limit stats with local cache timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).Times(0) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) assert.Equal( @@ -297,7 +297,7 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -320,7 +320,7 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -338,7 +338,7 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(16)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -356,7 +356,7 @@ func TestNearLimit(t *testing.T) { // Now test hitsAddend that is greater than 1 // All of it under limit, under near limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint32(3)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint64(3)).SetArg(1, uint64(5)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -373,7 +373,7 @@ func TestNearLimit(t *testing.T) { // All of it under limit, some over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint32(2)).SetArg(1, uint32(7)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint64(2)).SetArg(1, uint64(7)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key6_value6_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -390,7 +390,7 @@ func TestNearLimit(t *testing.T) { // All of it under limit, all of it over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint32(3)).SetArg(1, uint32(19)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint64(3)).SetArg(1, uint64(19)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key7_value7_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -407,7 +407,7 @@ func TestNearLimit(t *testing.T) { // Some of it over limit, all of it over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint32(3)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint64(3)).SetArg(1, uint64(22)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key8_value8_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -424,7 +424,7 @@ func TestNearLimit(t *testing.T) { // Some of it in all three places timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint32(7)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint64(7)).SetArg(1, uint64(22)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key9_value9_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -441,7 +441,7 @@ func TestNearLimit(t *testing.T) { // all of it over limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint32(3)).SetArg(1, uint32(30)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint64(3)).SetArg(1, uint64(30)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key10_value10_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -471,7 +471,7 @@ func TestRedisWithJitter(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint64(1)).SetArg(1, uint64(5)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(101)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -503,7 +503,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -530,7 +530,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -551,7 +551,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // Test Over limit stats timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(16)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -574,7 +574,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // Test Over limit stats with local cache timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).Times(0) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) @@ -614,7 +614,7 @@ func TestRedisTracer(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint64(1)).SetArg(1, uint64(5)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -644,19 +644,19 @@ func TestOverLimitWithStopCacheKeyIncrementWhenOverlimitConfig(t *testing.T) { // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(11)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint64(10)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint64(10)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}, {{"key5", "value5"}}}, 1) + request := common.NewRateLimitRequestWithPerDescriptorHitsAddend("domain", [][][2]string{{{"key4", "value4"}}, {{"key5", "value5"}}}, []uint64{1, 1}) limits := []*config.RateLimit{ config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), @@ -683,65 +683,67 @@ func TestOverLimitWithStopCacheKeyIncrementWhenOverlimitConfig(t *testing.T) { // Check the local cache stats. testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) - // Test Near Limit Stats. At Near Limit Ratio, still OK + // Test Near Limit Stats. Some hits at Near Limit Ratio, but still OK. timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(13)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(13)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint64(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint64(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(2)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint64(2)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) + request = common.NewRateLimitRequestWithPerDescriptorHitsAddend("domain", [][][2]string{{{"key4", "value4"}}, {{"key5", "value5"}}}, []uint64{2, 2}) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, cache.DoLimit(context.Background(), request, limits)) - assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) - assert.Equal(uint64(2), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(3), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(3), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.OverLimitWithLocalCache.Value()) - assert.Equal(uint64(1), limits[1].Stats.NearLimit.Value()) - assert.Equal(uint64(2), limits[1].Stats.WithinLimit.Value()) + assert.Equal(uint64(2), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(3), limits[1].Stats.WithinLimit.Value()) // Check the local cache stats. testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) // Test one key is reaching to the Overlimit threshold timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(14)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(14)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(0)).SetArg(1, uint32(14)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint64(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint64(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(0)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(14)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint64(2)).SetArg(1, uint64(15)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, cache.DoLimit(context.Background(), request, limits)) - assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(5), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) assert.Equal(uint64(2), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(3), limits[0].Stats.WithinLimit.Value()) - assert.Equal(uint64(3), limits[1].Stats.TotalHits.Value()) - assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(5), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(5), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.OverLimitWithLocalCache.Value()) - assert.Equal(uint64(2), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(3), limits[1].Stats.NearLimit.Value()) assert.Equal(uint64(3), limits[1].Stats.WithinLimit.Value()) // Check the local cache stats.