Add period for rate limiter middleware

This commit is contained in:
mpl 2020-01-08 11:44:04 +01:00 committed by Traefiker Bot
parent 377c219fd9
commit 6f4aefffe7
12 changed files with 197 additions and 21 deletions

View file

@ -3,7 +3,7 @@
To Control the Number of Requests Going to a Service
{: .subtitle }
The RateLimit middleware ensures that services will receive a _fair_ number of requests, and allows you define what is fair.
The RateLimit middleware ensures that services will receive a _fair_ number of requests, and allows one to define what fair is.
## Configuration Example
@ -24,8 +24,8 @@ metadata:
name: test-ratelimit
spec:
rateLimit:
average: 100
burst: 50
average: 100
burst: 50
```
```yaml tab="Consul Catalog"
@ -74,25 +74,32 @@ http:
### `average`
Average is the maximum rate, in requests/s, allowed for the given source.
It defaults to 0, which means no rate limiting.
`average` is the maximum rate, by default in requests by second, allowed for the given source.
It defaults to `0`, which means no rate limiting.
The rate is actually defined by dividing `average` by `period`.
So for a rate below 1 req/s, one needs to define a `period` larger than a second.
```yaml tab="Docker"
# 100 reqs/s
labels:
- "traefik.http.middlewares.test-ratelimit.ratelimit.average=100"
```
```yaml tab="Kubernetes"
# 100 reqs/s
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: test-ratelimit
spec:
rateLimit:
average: 100
average: 100
```
```yaml tab="Consul Catalog"
# 100 reqs/s
- "traefik.http.middlewares.test-ratelimit.ratelimit.average=100"
```
@ -108,12 +115,14 @@ labels:
```
```toml tab="File (TOML)"
# 100 reqs/s
[http.middlewares]
[http.middlewares.test-ratelimit.rateLimit]
average = 100
```
```yaml tab="File (YAML)"
# 100 reqs/s
http:
middlewares:
test-ratelimit:
@ -121,10 +130,78 @@ http:
average: 100
```
### `period`
`period`, in combination with `average`, defines the actual maximum rate, such as:
```go
r = average / period
```
It defaults to `1` second.
```yaml tab="Docker"
# 6 reqs/minute
labels:
- "traefik.http.middlewares.test-ratelimit.ratelimit.average=6"
- "traefik.http.middlewares.test-ratelimit.ratelimit.period=1m"
```
```yaml tab="Kubernetes"
# 6 reqs/minute
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: test-ratelimit
spec:
rateLimit:
period: 1m
average: 6
```
```yaml tab="Consul Catalog"
# 6 reqs/minute
- "traefik.http.middlewares.test-ratelimit.ratelimit.average=6"
- "traefik.http.middlewares.test-ratelimit.ratelimit.period=1m"
```
```json tab="Marathon"
"labels": {
"traefik.http.middlewares.test-ratelimit.ratelimit.average": "6",
"traefik.http.middlewares.test-ratelimit.ratelimit.period": "1m",
}
```
```yaml tab="Rancher"
# 6 reqs/minute
labels:
- "traefik.http.middlewares.test-ratelimit.ratelimit.average=6"
- "traefik.http.middlewares.test-ratelimit.ratelimit.period=1m"
```
```toml tab="File (TOML)"
# 6 reqs/minute
[http.middlewares]
[http.middlewares.test-ratelimit.rateLimit]
average = 6
period = 1m
```
```yaml tab="File (YAML)"
# 6 reqs/minute
http:
middlewares:
test-ratelimit:
rateLimit:
average: 6
period: 1m
```
### `burst`
Burst is the maximum number of requests allowed to go through in the same arbitrarily small period of time.
It defaults to 1.
`burst` is the maximum number of requests allowed to go through in the same arbitrarily small period of time.
It defaults to `1`.
```yaml tab="Docker"
labels:
@ -138,7 +215,7 @@ metadata:
name: test-ratelimit
spec:
rateLimit:
burst: 100
burst: 100
```
```yaml tab="Consul Catalog"

View file

@ -88,6 +88,7 @@
- "traefik.http.middlewares.middleware12.passtlsclientcert.info.subject.serialnumber=true"
- "traefik.http.middlewares.middleware12.passtlsclientcert.pem=true"
- "traefik.http.middlewares.middleware13.ratelimit.average=42"
- "traefik.http.middlewares.middleware13.ratelimit.period=42"
- "traefik.http.middlewares.middleware13.ratelimit.burst=42"
- "traefik.http.middlewares.middleware13.ratelimit.sourcecriterion.ipstrategy.depth=42"
- "traefik.http.middlewares.middleware13.ratelimit.sourcecriterion.ipstrategy.excludedips=foobar, foobar"

View file

@ -216,6 +216,7 @@
[http.middlewares.Middleware13]
[http.middlewares.Middleware13.rateLimit]
average = 42
period = 42
burst = 42
[http.middlewares.Middleware13.rateLimit.sourceCriterion]
requestHeaderName = "foobar"

View file

@ -243,6 +243,7 @@ http:
Middleware13:
rateLimit:
average: 42
period: 42
burst: 42
sourceCriterion:
ipstrategy:

View file

@ -102,6 +102,7 @@
| `traefik/http/middlewares/Middleware12/passTLSClientCert/info/subject/serialNumber` | `true` |
| `traefik/http/middlewares/Middleware12/passTLSClientCert/pem` | `true` |
| `traefik/http/middlewares/Middleware13/rateLimit/average` | `42` |
| `traefik/http/middlewares/Middleware13/rateLimit/period` | `42` |
| `traefik/http/middlewares/Middleware13/rateLimit/burst` | `42` |
| `traefik/http/middlewares/Middleware13/rateLimit/sourceCriterion/ipStrategy/depth` | `42` |
| `traefik/http/middlewares/Middleware13/rateLimit/sourceCriterion/ipStrategy/excludedIPs/0` | `foobar` |

View file

@ -88,6 +88,7 @@
"traefik.http.middlewares.middleware12.passtlsclientcert.info.subject.serialnumber": "true",
"traefik.http.middlewares.middleware12.passtlsclientcert.pem": "true",
"traefik.http.middlewares.middleware13.ratelimit.average": "42",
"traefik.http.middlewares.middleware13.ratelimit.period": "42",
"traefik.http.middlewares.middleware13.ratelimit.burst": "42",
"traefik.http.middlewares.middleware13.ratelimit.sourcecriterion.ipstrategy.depth": "42",
"traefik.http.middlewares.middleware13.ratelimit.sourcecriterion.ipstrategy.excludedips": "foobar, foobar",

View file

@ -250,6 +250,7 @@
[http.middlewares.Middleware10]
[http.middlewares.Middleware10.rateLimit]
average = 42
period = "1s"
burst = 42
[http.middlewares.Middleware10.rateLimit.sourceCriterion]
requestHeaderName = "foobar"

View file

@ -6,8 +6,10 @@ import (
"fmt"
"io/ioutil"
"os"
"time"
"github.com/containous/traefik/v2/pkg/ip"
"github.com/containous/traefik/v2/pkg/types"
)
// +k8s:deepcopy-gen=true
@ -296,9 +298,14 @@ type SourceCriterion struct {
// RateLimit holds the rate limiting configuration for a given router.
type RateLimit struct {
// Average is the maximum rate, in requests/s, allowed for the given source.
// Average is the maximum rate, by default in requests/s, allowed for the given source.
// It defaults to 0, which means no rate limiting.
// The rate is actually defined by dividing Average by Period. So for a rate below 1req/s,
// one needs to define a Period larger than a second.
Average int64 `json:"average,omitempty" toml:"average,omitempty" yaml:"average,omitempty"`
// Period, in combination with Average, defines the actual maximum rate, such as:
// r = Average / Period. It defaults to a second.
Period types.Duration
// Burst is the maximum number of requests allowed to arrive in the same arbitrarily small period of time.
// It defaults to 1.
Burst int64 `json:"burst,omitempty" toml:"burst,omitempty" yaml:"burst,omitempty"`
@ -308,6 +315,7 @@ type RateLimit struct {
// SetDefaults sets the default values on a RateLimit.
func (r *RateLimit) SetDefaults() {
r.Burst = 1
r.Period = types.Duration(time.Second)
r.SourceCriterion = &SourceCriterion{
IPStrategy: &IPStrategy{},
}

View file

@ -3,8 +3,10 @@ package label
import (
"fmt"
"testing"
"time"
"github.com/containous/traefik/v2/pkg/config/dynamic"
"github.com/containous/traefik/v2/pkg/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -101,6 +103,7 @@ func TestDecodeConfiguration(t *testing.T) {
"traefik.http.middlewares.Middleware11.passtlsclientcert.info.issuer.serialnumber": "true",
"traefik.http.middlewares.Middleware11.passtlsclientcert.pem": "true",
"traefik.http.middlewares.Middleware12.ratelimit.average": "42",
"traefik.http.middlewares.Middleware12.ratelimit.period": "1s",
"traefik.http.middlewares.Middleware12.ratelimit.burst": "42",
"traefik.http.middlewares.Middleware12.ratelimit.sourcecriterion.requestheadername": "foobar",
"traefik.http.middlewares.Middleware12.ratelimit.sourcecriterion.requesthost": "true",
@ -324,6 +327,7 @@ func TestDecodeConfiguration(t *testing.T) {
RateLimit: &dynamic.RateLimit{
Average: 42,
Burst: 42,
Period: types.Duration(time.Second),
SourceCriterion: &dynamic.SourceCriterion{
IPStrategy: &dynamic.IPStrategy{
Depth: 42,
@ -729,6 +733,7 @@ func TestEncodeConfiguration(t *testing.T) {
RateLimit: &dynamic.RateLimit{
Average: 42,
Burst: 42,
Period: types.Duration(time.Second),
SourceCriterion: &dynamic.SourceCriterion{
IPStrategy: &dynamic.IPStrategy{
Depth: 42,
@ -1081,6 +1086,7 @@ func TestEncodeConfiguration(t *testing.T) {
"traefik.HTTP.Middlewares.Middleware11.PassTLSClientCert.Info.Issuer.DomainComponent": "true",
"traefik.HTTP.Middlewares.Middleware11.PassTLSClientCert.PEM": "true",
"traefik.HTTP.Middlewares.Middleware12.RateLimit.Average": "42",
"traefik.HTTP.Middlewares.Middleware12.RateLimit.Period": "1000000000",
"traefik.HTTP.Middlewares.Middleware12.RateLimit.Burst": "42",
"traefik.HTTP.Middlewares.Middleware12.RateLimit.SourceCriterion.RequestHeaderName": "foobar",
"traefik.HTTP.Middlewares.Middleware12.RateLimit.SourceCriterion.RequestHost": "true",

View file

@ -29,7 +29,7 @@ type rateLimiter struct {
rate rate.Limit // reqs/s
burst int64
// maxDelay is the maximum duration we're willing to wait for a bucket reservation to become effective, in nanoseconds.
// For now it is somewhat arbitrarily set to 1/rate.
// For now it is somewhat arbitrarily set to 1/(2*rate).
maxDelay time.Duration
sourceMatcher utils.SourceExtractor
next http.Handler
@ -61,20 +61,34 @@ func New(ctx context.Context, next http.Handler, config dynamic.RateLimit, name
}
burst := config.Burst
if burst <= 0 {
if burst < 1 {
burst = 1
}
// Logically, we should set maxDelay to ~infinity when config.Average == 0 (because it means to rate limiting),
period := time.Duration(config.Period)
if period == 0 {
period = time.Second
}
// Logically, we should set maxDelay to infinity when config.Average == 0 (because it means no rate limiting),
// but since the reservation will give us a delay = 0 anyway in this case, we're good even with any maxDelay >= 0.
var maxDelay time.Duration
if config.Average != 0 {
maxDelay = time.Second / time.Duration(config.Average*2)
var rtl float64
if config.Average > 0 {
rtl = float64(config.Average*int64(time.Second)) / float64(period)
// maxDelay does not scale well for rates below 1,
// so we just cap it to the corresponding value, i.e. 0.5s, in order to keep the effective rate predictable.
// One alternative would be to switch to a no-reservation mode (Allow() method) whenever we are in such a low rate regime.
if rtl < 1 {
maxDelay = 500 * time.Millisecond
} else {
maxDelay = time.Second / (time.Duration(rtl) * 2)
}
}
return &rateLimiter{
name: name,
rate: rate.Limit(config.Average),
rate: rate.Limit(rtl),
burst: burst,
maxDelay: maxDelay,
next: next,

View file

@ -10,6 +10,7 @@ import (
"github.com/containous/traefik/v2/pkg/config/dynamic"
"github.com/containous/traefik/v2/pkg/testhelpers"
"github.com/containous/traefik/v2/pkg/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vulcand/oxy/utils"
@ -30,6 +31,15 @@ func TestNewRateLimiter(t *testing.T) {
},
expectedMaxDelay: 2500 * time.Microsecond,
},
{
desc: "maxDelay computation, low rate regime",
config: dynamic.RateLimit{
Average: 2,
Period: types.Duration(10 * time.Second),
Burst: 10,
},
expectedMaxDelay: 500 * time.Millisecond,
},
{
desc: "default SourceMatcher is remote address ip strategy",
config: dynamic.RateLimit{
@ -127,6 +137,46 @@ func TestRateLimit(t *testing.T) {
incomingLoad: 200,
burst: 300,
},
{
desc: "lower than 1/s",
config: dynamic.RateLimit{
Average: 5,
Period: types.Duration(10 * time.Second),
},
loadDuration: 2 * time.Second,
incomingLoad: 100,
burst: 0,
},
{
desc: "lower than 1/s, longer",
config: dynamic.RateLimit{
Average: 5,
Period: types.Duration(10 * time.Second),
},
loadDuration: time.Minute,
incomingLoad: 100,
burst: 0,
},
{
desc: "lower than 1/s, longer, harsher",
config: dynamic.RateLimit{
Average: 1,
Period: types.Duration(time.Minute),
},
loadDuration: time.Minute,
incomingLoad: 100,
burst: 0,
},
{
desc: "period below 1 second",
config: dynamic.RateLimit{
Average: 50,
Period: types.Duration(500 * time.Millisecond),
},
loadDuration: 2 * time.Second,
incomingLoad: 300,
burst: 0,
},
// TODO Try to disambiguate when it fails if it is because of too high a load.
// {
// desc: "Zero average ==> no rate limiting",
@ -142,6 +192,9 @@ func TestRateLimit(t *testing.T) {
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
if test.loadDuration >= time.Minute && testing.Short() {
t.Skip("skipping test in short mode.")
}
t.Parallel()
reqCount := 0
@ -152,10 +205,10 @@ func TestRateLimit(t *testing.T) {
h, err := New(context.Background(), next, test.config, "rate-limiter")
require.NoError(t, err)
period := time.Duration(1e9 / test.incomingLoad)
loadPeriod := time.Duration(1e9 / test.incomingLoad)
start := time.Now()
end := start.Add(test.loadDuration)
ticker := time.NewTicker(period)
ticker := time.NewTicker(loadPeriod)
defer ticker.Stop()
for {
if time.Now().After(end) {
@ -179,6 +232,15 @@ func TestRateLimit(t *testing.T) {
stop := time.Now()
elapsed := stop.Sub(start)
burst := test.config.Burst
if burst < 1 {
// actual default value
burst = 1
}
period := time.Duration(test.config.Period)
if period == 0 {
period = time.Second
}
if test.config.Average == 0 {
if reqCount < 75*test.incomingLoad/100 {
t.Fatalf("we (arbitrarily) expect at least 75%% of the requests to go through with no rate limiting, and yet only %d/%d went through", reqCount, test.incomingLoad)
@ -192,7 +254,8 @@ func TestRateLimit(t *testing.T) {
// Note that even when there is no bursty traffic,
// we take into account the configured burst,
// because it also helps absorbing non-bursty traffic.
wantCount := int(test.config.Average*int64(test.loadDuration/time.Second) + test.config.Burst)
rate := float64(test.config.Average) / float64(period)
wantCount := int(int64(rate*float64(test.loadDuration)) + burst)
// Allow for a 2% leeway
maxCount := wantCount * 102 / 100
// With very high CPU loads,
@ -201,10 +264,10 @@ func TestRateLimit(t *testing.T) {
// Feel free to adjust wrt to the load on e.g. the CI.
minCount := wantCount * 95 / 100
if reqCount < minCount {
t.Fatalf("rate was slower than expected: %d requests in %v", reqCount, elapsed)
t.Fatalf("rate was slower than expected: %d requests (wanted > %d) in %v", reqCount, minCount, elapsed)
}
if reqCount > maxCount {
t.Fatalf("rate was faster than expected: %d requests in %v", reqCount, elapsed)
t.Fatalf("rate was faster than expected: %d requests (wanted < %d) in %v", reqCount, maxCount, elapsed)
}
})
}

View file

@ -166,6 +166,7 @@ func Test_buildConfiguration(t *testing.T) {
"traefik/http/middlewares/Middleware07/errors/service": "foobar",
"traefik/http/middlewares/Middleware07/errors/query": "foobar",
"traefik/http/middlewares/Middleware13/rateLimit/average": "42",
"traefik/http/middlewares/Middleware13/rateLimit/period": "1s",
"traefik/http/middlewares/Middleware13/rateLimit/burst": "42",
"traefik/http/middlewares/Middleware13/rateLimit/sourceCriterion/requestHeaderName": "foobar",
"traefik/http/middlewares/Middleware13/rateLimit/sourceCriterion/requestHost": "true",
@ -331,6 +332,7 @@ func Test_buildConfiguration(t *testing.T) {
RateLimit: &dynamic.RateLimit{
Average: 42,
Burst: 42,
Period: types.Duration(time.Second),
SourceCriterion: &dynamic.SourceCriterion{
IPStrategy: &dynamic.IPStrategy{
Depth: 42,