From 920e82f11a7d048b9480956ffd023206436fe584 Mon Sep 17 00:00:00 2001 From: Alessandro Chitolina Date: Mon, 9 Nov 2020 17:12:05 +0100 Subject: [PATCH 01/13] fix: translate configured server port into correct mapped host port --- pkg/provider/ecs/config.go | 7 ++ pkg/provider/ecs/config_test.go | 135 ++++++++++++++++++++++++++++++-- 2 files changed, 134 insertions(+), 8 deletions(-) diff --git a/pkg/provider/ecs/config.go b/pkg/provider/ecs/config.go index 225669408..02684a07f 100644 --- a/pkg/provider/ecs/config.go +++ b/pkg/provider/ecs/config.go @@ -307,6 +307,13 @@ func (p Provider) getIPAddress(instance ecsInstance) string { func getPort(instance ecsInstance, serverPort string) string { if len(serverPort) > 0 { + for _, port := range instance.machine.ports { + containerPort := strconv.FormatInt(port.containerPort, 10) + if serverPort == containerPort { + return strconv.FormatInt(port.hostPort, 10) + } + } + return serverPort } diff --git a/pkg/provider/ecs/config_test.go b/pkg/provider/ecs/config_test.go index 56aab1869..ad2506694 100644 --- a/pkg/provider/ecs/config_test.go +++ b/pkg/provider/ecs/config_test.go @@ -1721,13 +1721,13 @@ func Test_buildConfiguration(t *testing.T) { name("Test"), labels(map[string]string{ "traefik.http.services.Service1.LoadBalancer.server.scheme": "h2c", - "traefik.http.services.Service1.LoadBalancer.server.port": "8080", + "traefik.http.services.Service1.LoadBalancer.server.port": "80", }), iMachine( mState(ec2.InstanceStateNameRunning), mPrivateIP("127.0.0.1"), mPorts( - mPort(0, 80, "tcp"), + mPort(80, 8080, "tcp"), ), ), ), @@ -1764,6 +1764,125 @@ func Test_buildConfiguration(t *testing.T) { }, }, }, + { + desc: "one container with label port not exposed by container", + containers: []ecsInstance{ + instance( + name("Test"), + labels(map[string]string{ + "traefik.http.services.Service1.LoadBalancer.server.scheme": "h2c", + "traefik.http.services.Service1.LoadBalancer.server.port": "8040", + }), + iMachine( + mState(ec2.InstanceStateNameRunning), + mPrivateIP("127.0.0.1"), + mPorts( + mPort(80, 8080, "tcp"), + ), + ), + ), + }, + expected: &dynamic.Configuration{ + TCP: &dynamic.TCPConfiguration{ + Routers: map[string]*dynamic.TCPRouter{}, + Services: map[string]*dynamic.TCPService{}, + }, + UDP: &dynamic.UDPConfiguration{ + Routers: map[string]*dynamic.UDPRouter{}, + Services: map[string]*dynamic.UDPService{}, + }, + HTTP: &dynamic.HTTPConfiguration{ + Routers: map[string]*dynamic.Router{ + "Test": { + Service: "Service1", + Rule: "Host(`Test.traefik.wtf`)", + }, + }, + Middlewares: map[string]*dynamic.Middleware{}, + Services: map[string]*dynamic.Service{ + "Service1": { + LoadBalancer: &dynamic.ServersLoadBalancer{ + Servers: []dynamic.Server{ + { + URL: "h2c://127.0.0.1:8040", + }, + }, + PassHostHeader: Bool(true), + }, + }, + }, + }, + }, + }, + { + desc: "one container with label and multiple ports", + containers: []ecsInstance{ + instance( + name("Test"), + labels(map[string]string{ + "traefik.http.routers.Test.rule": "Host(`Test.traefik.wtf`)", + "traefik.http.routers.Test.service": "Service1", + "traefik.http.services.Service1.LoadBalancer.server.port": "4445", + "traefik.http.routers.Test2.rule": "Host(`Test.traefik.local`)", + "traefik.http.routers.Test2.service": "Service2", + "traefik.http.services.Service2.LoadBalancer.server.port": "4444", + }), + iMachine( + mState(ec2.InstanceStateNameRunning), + mPrivateIP("127.0.0.1"), + mPorts( + mPort(4444, 32123, "tcp"), + mPort(4445, 32124, "tcp"), + ), + ), + ), + }, + expected: &dynamic.Configuration{ + TCP: &dynamic.TCPConfiguration{ + Routers: map[string]*dynamic.TCPRouter{}, + Services: map[string]*dynamic.TCPService{}, + }, + UDP: &dynamic.UDPConfiguration{ + Routers: map[string]*dynamic.UDPRouter{}, + Services: map[string]*dynamic.UDPService{}, + }, + HTTP: &dynamic.HTTPConfiguration{ + Routers: map[string]*dynamic.Router{ + "Test": { + Service: "Service1", + Rule: "Host(`Test.traefik.wtf`)", + }, + "Test2": { + Service: "Service2", + Rule: "Host(`Test.traefik.local`)", + }, + }, + Middlewares: map[string]*dynamic.Middleware{}, + Services: map[string]*dynamic.Service{ + "Service1": { + LoadBalancer: &dynamic.ServersLoadBalancer{ + Servers: []dynamic.Server{ + { + URL: "http://127.0.0.1:32124", + }, + }, + PassHostHeader: Bool(true), + }, + }, + "Service2": { + LoadBalancer: &dynamic.ServersLoadBalancer{ + Servers: []dynamic.Server{ + { + URL: "http://127.0.0.1:32123", + }, + }, + PassHostHeader: Bool(true), + }, + }, + }, + }, + }, + }, { desc: "one container with label port on two services", containers: []ecsInstance{ @@ -2274,13 +2393,13 @@ func Test_buildConfiguration(t *testing.T) { labels(map[string]string{ "traefik.tcp.routers.foo.rule": "HostSNI(`foo.bar`)", "traefik.tcp.routers.foo.tls.options": "foo", - "traefik.tcp.services.foo.loadbalancer.server.port": "8080", + "traefik.tcp.services.foo.loadbalancer.server.port": "80", }), iMachine( mState(ec2.InstanceStateNameRunning), mPrivateIP("127.0.0.1"), mPorts( - mPort(0, 80, "tcp"), + mPort(80, 8080, "tcp"), ), ), ), @@ -2327,13 +2446,13 @@ func Test_buildConfiguration(t *testing.T) { name("Test"), labels(map[string]string{ "traefik.udp.routers.foo.entrypoints": "mydns", - "traefik.udp.services.foo.loadbalancer.server.port": "8080", + "traefik.udp.services.foo.loadbalancer.server.port": "80", }), iMachine( mState(ec2.InstanceStateNameRunning), mPrivateIP("127.0.0.1"), mPorts( - mPort(0, 80, "udp"), + mPort(80, 8080, "udp"), ), ), ), @@ -2506,14 +2625,14 @@ func Test_buildConfiguration(t *testing.T) { instance( name("Test"), labels(map[string]string{ - "traefik.tcp.services.foo.loadbalancer.server.port": "8080", + "traefik.tcp.services.foo.loadbalancer.server.port": "80", "traefik.tcp.services.foo.loadbalancer.terminationdelay": "200", }), iMachine( mState(ec2.InstanceStateNameRunning), mPrivateIP("127.0.0.1"), mPorts( - mPort(0, 80, "tcp"), + mPort(80, 8080, "tcp"), ), ), ), From af22cabc6f81ff86d08a6f4046a1a10b68fafd8f Mon Sep 17 00:00:00 2001 From: james426759 <33891790+james426759@users.noreply.github.com> Date: Wed, 11 Nov 2020 00:28:04 +0800 Subject: [PATCH 02/13] Fix docs for TLS --- docs/content/https/tls.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/https/tls.md b/docs/content/https/tls.md index 71fd50222..e8513a746 100644 --- a/docs/content/https/tls.md +++ b/docs/content/https/tls.md @@ -64,7 +64,7 @@ tls: !!! important "Restriction" Any store definition other than the default one (named `default`) will be ignored, - and there is thefore only one globally available TLS store. + and there is therefore only one globally available TLS store. In the `tls.certificates` section, a list of stores can then be specified to indicate where the certificates should be stored: From 459200dd01fa4916934a7c5689b14d83e2f51cef Mon Sep 17 00:00:00 2001 From: Alexander Wellbrock Date: Tue, 10 Nov 2020 17:50:04 +0100 Subject: [PATCH 03/13] Forwardauth headers --- docs/content/middlewares/forwardauth.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/content/middlewares/forwardauth.md b/docs/content/middlewares/forwardauth.md index c98d9b9aa..dd3f86421 100644 --- a/docs/content/middlewares/forwardauth.md +++ b/docs/content/middlewares/forwardauth.md @@ -61,6 +61,18 @@ http: address: "https://example.com/auth" ``` +## Forward-Request Headers + +The following request properties are provided to the forward-auth target endpoint as `X-Forwarded-` headers. + +| Property | Forward-Request Header | +|-------------------|------------------------| +| HTTP Method | X-Forwarded-Method | +| Protocol | X-Forwarded-Proto | +| Host | X-Forwarded-Host | +| Request URI | X-Forwarded-Uri | +| Source IP-Address | X-Forwarded-For | + ## Configuration Options ### `address` From 598dcf6b62ae2eea68eccde4ad86e0fc67992a6c Mon Sep 17 00:00:00 2001 From: Douglas De Toni Machado Date: Fri, 13 Nov 2020 08:48:04 -0300 Subject: [PATCH 04/13] Improve service name lookup on TCP routers --- pkg/tcp/proxy.go | 24 +++++++++++++- pkg/tcp/proxy_test.go | 73 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/pkg/tcp/proxy.go b/pkg/tcp/proxy.go index d3401a874..f66380d49 100644 --- a/pkg/tcp/proxy.go +++ b/pkg/tcp/proxy.go @@ -10,8 +10,10 @@ import ( // Proxy forwards a TCP request to a TCP service. type Proxy struct { + address string target *net.TCPAddr terminationDelay time.Duration + refreshTarget bool } // NewProxy creates a new Proxy. @@ -21,7 +23,18 @@ func NewProxy(address string, terminationDelay time.Duration) (*Proxy, error) { return nil, err } - return &Proxy{target: tcpAddr, terminationDelay: terminationDelay}, nil + // enable the refresh of the target only if the address in an IP + refreshTarget := false + if host, _, err := net.SplitHostPort(address); err == nil && net.ParseIP(host) == nil { + refreshTarget = true + } + + return &Proxy{ + address: address, + target: tcpAddr, + refreshTarget: refreshTarget, + terminationDelay: terminationDelay, + }, nil } // ServeTCP forwards the connection to a service. @@ -31,6 +44,15 @@ func (p *Proxy) ServeTCP(conn WriteCloser) { // needed because of e.g. server.trackedConnection defer conn.Close() + if p.refreshTarget { + tcpAddr, err := net.ResolveTCPAddr("tcp", p.address) + if err != nil { + log.Errorf("Error resolving tcp address: %v", err) + return + } + p.target = tcpAddr + } + connBackend, err := net.DialTCP("tcp", nil, p.target) if err != nil { log.Errorf("Error while connection to backend: %v", err) diff --git a/pkg/tcp/proxy_test.go b/pkg/tcp/proxy_test.go index aa7dd1eaa..34f42b28d 100644 --- a/pkg/tcp/proxy_test.go +++ b/pkg/tcp/proxy_test.go @@ -5,9 +5,11 @@ import ( "fmt" "io" "net" + "sync" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -79,3 +81,74 @@ func TestCloseWrite(t *testing.T) { require.Equal(t, int64(4), n) require.Equal(t, "PONG", buffer.String()) } + +func TestLookupAddress(t *testing.T) { + testCases := []struct { + desc string + address string + expectSame assert.ComparisonAssertionFunc + }{ + { + desc: "IP doesn't need refresh", + address: "8.8.4.4:53", + expectSame: assert.Same, + }, + { + desc: "Hostname needs refresh", + address: "dns.google:53", + expectSame: assert.NotSame, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + proxy, err := NewProxy(test.address, 10*time.Millisecond) + require.NoError(t, err) + + require.NotNil(t, proxy.target) + + proxyListener, err := net.Listen("tcp", ":0") + require.NoError(t, err) + + var wg sync.WaitGroup + go func(wg *sync.WaitGroup) { + for { + conn, err := proxyListener.Accept() + require.NoError(t, err) + + proxy.ServeTCP(conn.(*net.TCPConn)) + + wg.Done() + } + }(&wg) + + var lastTarget *net.TCPAddr + + for i := 0; i < 3; i++ { + wg.Add(1) + + conn, err := net.Dial("tcp", proxyListener.Addr().String()) + require.NoError(t, err) + + _, err = conn.Write([]byte("ping\n")) + require.NoError(t, err) + + err = conn.Close() + require.NoError(t, err) + + wg.Wait() + + assert.NotNil(t, proxy.target) + + if lastTarget != nil { + test.expectSame(t, lastTarget, proxy.target) + } + + lastTarget = proxy.target + } + }) + } +} From 0fcccd35ff66e022aa2e044a19d12f611795ca36 Mon Sep 17 00:00:00 2001 From: Petyo Kunchev Date: Mon, 16 Nov 2020 16:38:04 +0200 Subject: [PATCH 05/13] /bin/bash replaced with /usr/bin/env bash to match other scripts --- script/crossbinary-default | 2 +- script/update-generated-crd-code.sh | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/script/crossbinary-default b/script/crossbinary-default index aa0466601..3a8d550f6 100755 --- a/script/crossbinary-default +++ b/script/crossbinary-default @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e if ! test -e autogen/genstatic/gen.go; then diff --git a/script/update-generated-crd-code.sh b/script/update-generated-crd-code.sh index efebb287f..78b70420c 100755 --- a/script/update-generated-crd-code.sh +++ b/script/update-generated-crd-code.sh @@ -1,4 +1,5 @@ -#!/bin/bash -e +#!/usr/bin/env bash +set -e HACK_DIR="$( cd "$( dirname "${0}" )" && pwd -P)"; export HACK_DIR REPO_ROOT=${HACK_DIR}/.. From 52eeff9f9f83fa2e7045d278981f390974e2dac3 Mon Sep 17 00:00:00 2001 From: Yoan Blanc Date: Mon, 16 Nov 2020 20:44:04 +0100 Subject: [PATCH 06/13] fix: consulcatalog to update before the first interval --- pkg/provider/consulcatalog/consul_catalog.go | 35 ++++++++++++++------ 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/pkg/provider/consulcatalog/consul_catalog.go b/pkg/provider/consulcatalog/consul_catalog.go index acb3f9c8b..6c3fb53fb 100644 --- a/pkg/provider/consulcatalog/consul_catalog.go +++ b/pkg/provider/consulcatalog/consul_catalog.go @@ -108,27 +108,28 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe. p.client, err = createClient(p.Endpoint) if err != nil { - return fmt.Errorf("error create consul client, %w", err) + return fmt.Errorf("unable to create consul client: %w", err) } + // get configuration at the provider's startup. + err = p.loadConfiguration(routineCtx, configurationChan) + if err != nil { + return fmt.Errorf("failed to get consul catalog data: %w", err) + } + + // Periodic refreshes. ticker := time.NewTicker(time.Duration(p.RefreshInterval)) + defer ticker.Stop() for { select { case <-ticker.C: - data, err := p.getConsulServicesData(routineCtx) + err = p.loadConfiguration(routineCtx, configurationChan) if err != nil { - logger.Errorf("error get consul catalog data, %v", err) - return err + return fmt.Errorf("failed to refresh consul catalog data: %w", err) } - configuration := p.buildConfiguration(routineCtx, data) - configurationChan <- dynamic.Message{ - ProviderName: "consulcatalog", - Configuration: configuration, - } case <-routineCtx.Done(): - ticker.Stop() return nil } } @@ -147,6 +148,20 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe. return nil } +func (p *Provider) loadConfiguration(ctx context.Context, configurationChan chan<- dynamic.Message) error { + data, err := p.getConsulServicesData(ctx) + if err != nil { + return err + } + + configurationChan <- dynamic.Message{ + ProviderName: "consulcatalog", + Configuration: p.buildConfiguration(ctx, data), + } + + return nil +} + func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error) { consulServiceNames, err := p.fetchServices(ctx) if err != nil { From 9177982334c9600cd41255b8799859b16acfa0c7 Mon Sep 17 00:00:00 2001 From: Harold Ozouf Date: Tue, 17 Nov 2020 17:30:03 +0100 Subject: [PATCH 07/13] Fix consul catalog panic when health and services are not in sync Co-authored-by: Kevin Pollet --- pkg/provider/consulcatalog/consul_catalog.go | 31 ++++++++++++++++---- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/pkg/provider/consulcatalog/consul_catalog.go b/pkg/provider/consulcatalog/consul_catalog.go index 6c3fb53fb..3f2345ec7 100644 --- a/pkg/provider/consulcatalog/consul_catalog.go +++ b/pkg/provider/consulcatalog/consul_catalog.go @@ -170,17 +170,22 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error var data []itemData for _, name := range consulServiceNames { - consulServices, healthServices, err := p.fetchService(ctx, name) + consulServices, statuses, err := p.fetchService(ctx, name) if err != nil { return nil, err } - for i, consulService := range consulServices { + for _, consulService := range consulServices { address := consulService.ServiceAddress if address == "" { address = consulService.Address } + status, exists := statuses[consulService.ID+consulService.ServiceID] + if !exists { + status = api.HealthAny + } + item := itemData{ ID: consulService.ServiceID, Node: consulService.Node, @@ -189,7 +194,7 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error Port: strconv.Itoa(consulService.ServicePort), Labels: tagsToNeutralLabels(consulService.ServiceTags, p.Prefix), Tags: consulService.ServiceTags, - Status: healthServices[i].Checks.AggregatedStatus(), + Status: status, } extraConf, err := p.getConfiguration(item) @@ -205,13 +210,14 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error return data, nil } -func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.CatalogService, []*api.ServiceEntry, error) { +func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.CatalogService, map[string]string, error) { var tagFilter string if !p.ExposedByDefault { tagFilter = p.Prefix + ".enable=true" } opts := &api.QueryOptions{AllowStale: p.Stale, RequireConsistent: p.RequireConsistent, UseCache: p.Cache} + opts = opts.WithContext(ctx) consulServices, _, err := p.client.Catalog().Service(name, tagFilter, opts) if err != nil { @@ -219,7 +225,22 @@ func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.Catalo } healthServices, _, err := p.client.Health().Service(name, tagFilter, false, opts) - return consulServices, healthServices, err + if err != nil { + return nil, nil, err + } + + // Index status by service and node so it can be retrieved from a CatalogService even if the health and services + // are not in sync. + statuses := make(map[string]string) + for _, health := range healthServices { + if health.Service == nil || health.Node == nil { + continue + } + + statuses[health.Node.ID+health.Service.ID] = health.Checks.AggregatedStatus() + } + + return consulServices, statuses, err } func (p *Provider) fetchServices(ctx context.Context) ([]string, error) { From 4f43c9ebb44397de869e3bdcd33427e8216c5949 Mon Sep 17 00:00:00 2001 From: Harold Ozouf Date: Thu, 19 Nov 2020 00:12:03 +0100 Subject: [PATCH 08/13] Fix missing allow-empty tag on ECS and Consul Catalog providers Co-authored-by: Kevin Pollet --- docs/content/providers/ecs.md | 7 +-- .../reference/static-configuration/cli-ref.md | 6 +++ .../reference/static-configuration/env-ref.md | 6 +++ pkg/config/static/static_config.go | 4 +- pkg/provider/ecs/ecs.go | 49 +++++++++---------- 5 files changed, 38 insertions(+), 34 deletions(-) diff --git a/docs/content/providers/ecs.md b/docs/content/providers/ecs.md index feb3f6454..ab860c524 100644 --- a/docs/content/providers/ecs.md +++ b/docs/content/providers/ecs.md @@ -13,18 +13,15 @@ Attach labels to your ECS containers and let Traefik do the rest! ```toml tab="File (TOML)" [providers.ecs] - clusters = ["default"] ``` ```yaml tab="File (YAML)" providers: - ecs: - clusters: - - default + ecs: {} ``` ```bash tab="CLI" - --providers.ecs.clusters=default + --providers.ecs=true ``` ## Policy diff --git a/docs/content/reference/static-configuration/cli-ref.md b/docs/content/reference/static-configuration/cli-ref.md index d7486781a..8ecd20175 100644 --- a/docs/content/reference/static-configuration/cli-ref.md +++ b/docs/content/reference/static-configuration/cli-ref.md @@ -330,6 +330,9 @@ TLS key `--providers.consul.username`: KV Username +`--providers.consulcatalog`: +Enable ConsulCatalog backend with default settings. (Default: ```false```) + `--providers.consulcatalog.cache`: Use local agent caching for catalog reads. (Default: ```false```) @@ -435,6 +438,9 @@ Use the ip address from the bound port, rather than from the inner network. (Def `--providers.docker.watch`: Watch Docker Swarm events. (Default: ```true```) +`--providers.ecs`: +Enable AWS ECS backend with default settings. (Default: ```false```) + `--providers.ecs.accesskeyid`: The AWS credentials access key to use for making requests diff --git a/docs/content/reference/static-configuration/env-ref.md b/docs/content/reference/static-configuration/env-ref.md index 885c29111..83c5e8480 100644 --- a/docs/content/reference/static-configuration/env-ref.md +++ b/docs/content/reference/static-configuration/env-ref.md @@ -303,6 +303,9 @@ Terminating status code (Default: ```503```) `TRAEFIK_PROVIDERS_CONSUL`: Enable Consul backend with default settings. (Default: ```false```) +`TRAEFIK_PROVIDERS_CONSULCATALOG`: +Enable ConsulCatalog backend with default settings. (Default: ```false```) + `TRAEFIK_PROVIDERS_CONSULCATALOG_CACHE`: Use local agent caching for catalog reads. (Default: ```false```) @@ -435,6 +438,9 @@ Use the ip address from the bound port, rather than from the inner network. (Def `TRAEFIK_PROVIDERS_DOCKER_WATCH`: Watch Docker Swarm events. (Default: ```true```) +`TRAEFIK_PROVIDERS_ECS`: +Enable AWS ECS backend with default settings. (Default: ```false```) + `TRAEFIK_PROVIDERS_ECS_ACCESSKEYID`: The AWS credentials access key to use for making requests diff --git a/pkg/config/static/static_config.go b/pkg/config/static/static_config.go index dca54c219..65505fbee 100644 --- a/pkg/config/static/static_config.go +++ b/pkg/config/static/static_config.go @@ -176,8 +176,8 @@ type Providers struct { KubernetesCRD *crd.Provider `description:"Enable Kubernetes backend with default settings." json:"kubernetesCRD,omitempty" toml:"kubernetesCRD,omitempty" yaml:"kubernetesCRD,omitempty" export:"true" label:"allowEmpty" file:"allowEmpty"` Rest *rest.Provider `description:"Enable Rest backend with default settings." json:"rest,omitempty" toml:"rest,omitempty" yaml:"rest,omitempty" export:"true" label:"allowEmpty" file:"allowEmpty"` Rancher *rancher.Provider `description:"Enable Rancher backend with default settings." json:"rancher,omitempty" toml:"rancher,omitempty" yaml:"rancher,omitempty" export:"true" label:"allowEmpty" file:"allowEmpty"` - ConsulCatalog *consulcatalog.Provider `description:"Enable ConsulCatalog backend with default settings." json:"consulCatalog,omitempty" toml:"consulCatalog,omitempty" yaml:"consulCatalog,omitempty" export:"true"` - Ecs *ecs.Provider `description:"Enable AWS ECS backend with default settings." json:"ecs,omitempty" toml:"ecs,omitempty" yaml:"ecs,omitempty" export:"true"` + ConsulCatalog *consulcatalog.Provider `description:"Enable ConsulCatalog backend with default settings." json:"consulCatalog,omitempty" toml:"consulCatalog,omitempty" yaml:"consulCatalog,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"` + Ecs *ecs.Provider `description:"Enable AWS ECS backend with default settings." json:"ecs,omitempty" toml:"ecs,omitempty" yaml:"ecs,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"` Consul *consul.Provider `description:"Enable Consul backend with default settings." json:"consul,omitempty" toml:"consul,omitempty" yaml:"consul,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"` Etcd *etcd.Provider `description:"Enable Etcd backend with default settings." json:"etcd,omitempty" toml:"etcd,omitempty" yaml:"etcd,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"` diff --git a/pkg/provider/ecs/ecs.go b/pkg/provider/ecs/ecs.go index 8e1b4b089..73bfe47f8 100644 --- a/pkg/provider/ecs/ecs.go +++ b/pkg/provider/ecs/ecs.go @@ -151,35 +151,25 @@ func (p Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.P operation := func() error { awsClient, err := p.createClient(logger) if err != nil { - return err + return fmt.Errorf("unable to create AWS client: %w", err) } - configuration, err := p.loadECSConfig(ctxLog, awsClient) + err = p.loadConfiguration(ctxLog, awsClient, configurationChan) if err != nil { - return err + return fmt.Errorf("failed to get ECS configuration: %w", err) } - configurationChan <- dynamic.Message{ - ProviderName: "ecs", - Configuration: configuration, - } - - reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds)) - defer reload.Stop() + ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds)) + defer ticker.Stop() for { select { - case <-reload.C: - configuration, err := p.loadECSConfig(ctxLog, awsClient) + case <-ticker.C: + err = p.loadConfiguration(ctxLog, awsClient, configurationChan) if err != nil { - logger.Errorf("Failed to load ECS configuration, error %s", err) - return err + return fmt.Errorf("failed to refresh ECS configuration: %w", err) } - configurationChan <- dynamic.Message{ - ProviderName: "ecs", - Configuration: configuration, - } case <-routineCtx.Done(): return nil } @@ -198,6 +188,20 @@ func (p Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.P return nil } +func (p *Provider) loadConfiguration(ctx context.Context, client *awsClient, configurationChan chan<- dynamic.Message) error { + instances, err := p.listInstances(ctx, client) + if err != nil { + return err + } + + configurationChan <- dynamic.Message{ + ProviderName: "ecs", + Configuration: p.buildConfiguration(ctx, instances), + } + + return nil +} + // Find all running Provider tasks in a cluster, also collect the task definitions (for docker labels) // and the EC2 instance data. func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsInstance, error) { @@ -365,15 +369,6 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI return instances, nil } -func (p *Provider) loadECSConfig(ctx context.Context, client *awsClient) (*dynamic.Configuration, error) { - instances, err := p.listInstances(ctx, client) - if err != nil { - return nil, err - } - - return p.buildConfiguration(ctx, instances), nil -} - func (p *Provider) lookupEc2Instances(ctx context.Context, client *awsClient, clusterName *string, ecsDatas map[string]*ecs.Task) (map[string]*ec2.Instance, error) { logger := log.FromContext(ctx) instanceIds := make(map[string]string) From 9fb32a47ca4db37c3608bb6484d1cef4395b45f8 Mon Sep 17 00:00:00 2001 From: Ivor Scott Date: Thu, 19 Nov 2020 10:04:04 +0100 Subject: [PATCH 09/13] Fix grammar in kubernetes ingress controller documentation --- docs/content/providers/kubernetes-ingress.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/providers/kubernetes-ingress.md b/docs/content/providers/kubernetes-ingress.md index 873ea5145..c2e1d5346 100644 --- a/docs/content/providers/kubernetes-ingress.md +++ b/docs/content/providers/kubernetes-ingress.md @@ -4,7 +4,7 @@ The Kubernetes Ingress Controller. {: .subtitle } The Traefik Kubernetes Ingress provider is a Kubernetes Ingress controller; that is to say, -it manages access to a cluster services by supporting the [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) specification. +it manages access to cluster services by supporting the [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) specification. ## Routing Configuration From a75819cae33d7572d5644d4358f35d5eb2c75335 Mon Sep 17 00:00:00 2001 From: Harold Ozouf Date: Thu, 19 Nov 2020 14:32:03 +0100 Subject: [PATCH 10/13] Filter out Helm secrets from informer caches Co-authored-by: Kevin Pollet --- pkg/provider/kubernetes/crd/client.go | 38 +++++++---- pkg/provider/kubernetes/crd/client_test.go | 63 +++++++++++++++++++ pkg/provider/kubernetes/ingress/client.go | 57 +++++++++++------ .../kubernetes/ingress/client_test.go | 56 +++++++++++++++++ 4 files changed, 183 insertions(+), 31 deletions(-) create mode 100644 pkg/provider/kubernetes/crd/client_test.go diff --git a/pkg/provider/kubernetes/crd/client.go b/pkg/provider/kubernetes/crd/client.go index 78e22e4cc..26f045d54 100644 --- a/pkg/provider/kubernetes/crd/client.go +++ b/pkg/provider/kubernetes/crd/client.go @@ -61,11 +61,12 @@ type Client interface { // TODO: add tests for the clientWrapper (and its methods) itself. type clientWrapper struct { - csCrd *versioned.Clientset - csKube *kubernetes.Clientset + csCrd versioned.Interface + csKube kubernetes.Interface - factoriesCrd map[string]externalversions.SharedInformerFactory - factoriesKube map[string]informers.SharedInformerFactory + factoriesCrd map[string]externalversions.SharedInformerFactory + factoriesKube map[string]informers.SharedInformerFactory + factoriesSecret map[string]informers.SharedInformerFactory labelSelector labels.Selector @@ -87,12 +88,13 @@ func createClientFromConfig(c *rest.Config) (*clientWrapper, error) { return newClientImpl(csKube, csCrd), nil } -func newClientImpl(csKube *kubernetes.Clientset, csCrd *versioned.Clientset) *clientWrapper { +func newClientImpl(csKube kubernetes.Interface, csCrd versioned.Interface) *clientWrapper { return &clientWrapper{ - csCrd: csCrd, - csKube: csKube, - factoriesCrd: make(map[string]externalversions.SharedInformerFactory), - factoriesKube: make(map[string]informers.SharedInformerFactory), + csCrd: csCrd, + csKube: csKube, + factoriesCrd: make(map[string]externalversions.SharedInformerFactory), + factoriesKube: make(map[string]informers.SharedInformerFactory), + factoriesSecret: make(map[string]informers.SharedInformerFactory), } } @@ -155,6 +157,10 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (< } c.watchedNamespaces = namespaces + notOwnedByHelm := func(opts *metav1.ListOptions) { + opts.LabelSelector = "owner!=helm" + } + for _, ns := range namespaces { factoryCrd := externalversions.NewSharedInformerFactoryWithOptions(c.csCrd, resyncPeriod, externalversions.WithNamespace(ns)) factoryCrd.Traefik().V1alpha1().IngressRoutes().Informer().AddEventHandler(eventHandler) @@ -169,15 +175,19 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (< factoryKube.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler) factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler) factoryKube.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler) - factoryKube.Core().V1().Secrets().Informer().AddEventHandler(eventHandler) + + factorySecret := informers.NewSharedInformerFactoryWithOptions(c.csKube, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(notOwnedByHelm)) + factorySecret.Core().V1().Secrets().Informer().AddEventHandler(eventHandler) c.factoriesCrd[ns] = factoryCrd c.factoriesKube[ns] = factoryKube + c.factoriesSecret[ns] = factorySecret } for _, ns := range namespaces { c.factoriesCrd[ns].Start(stopCh) c.factoriesKube[ns].Start(stopCh) + c.factoriesSecret[ns].Start(stopCh) } for _, ns := range namespaces { @@ -192,6 +202,12 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (< return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns) } } + + for t, ok := range c.factoriesSecret[ns].WaitForCacheSync(stopCh) { + if !ok { + return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns) + } + } } return eventCh, nil @@ -337,7 +353,7 @@ func (c *clientWrapper) GetSecret(namespace, name string) (*corev1.Secret, bool, return nil, false, fmt.Errorf("failed to get secret %s/%s: namespace is not within watched namespaces", namespace, name) } - secret, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name) + secret, err := c.factoriesSecret[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name) exist, err := translateNotFoundError(err) return secret, exist, err } diff --git a/pkg/provider/kubernetes/crd/client_test.go b/pkg/provider/kubernetes/crd/client_test.go new file mode 100644 index 000000000..a4113b56f --- /dev/null +++ b/pkg/provider/kubernetes/crd/client_test.go @@ -0,0 +1,63 @@ +package crd + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + crdfake "github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/generated/clientset/versioned/fake" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubefake "k8s.io/client-go/kubernetes/fake" +) + +func TestClientIgnoresHelmOwnedSecrets(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "secret", + }, + } + helmSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "helm-secret", + Labels: map[string]string{ + "owner": "helm", + }, + }, + } + + kubeClient := kubefake.NewSimpleClientset(helmSecret, secret) + crdClient := crdfake.NewSimpleClientset() + + client := newClientImpl(kubeClient, crdClient) + + eventCh, err := client.WatchAll(nil, nil) + require.NoError(t, err) + + select { + case event := <-eventCh: + secret, ok := event.(*corev1.Secret) + require.True(t, ok) + + assert.NotEqual(t, "helm-secret", secret.Name) + case <-time.After(50 * time.Millisecond): + assert.Fail(t, "expected to receive event for secret") + } + + select { + case <-eventCh: + assert.Fail(t, "received more than one event") + case <-time.After(50 * time.Millisecond): + } + + _, found, err := client.GetSecret("default", "secret") + require.NoError(t, err) + assert.True(t, found) + + _, found, err = client.GetSecret("default", "helm-secret") + require.NoError(t, err) + assert.False(t, found) +} diff --git a/pkg/provider/kubernetes/ingress/client.go b/pkg/provider/kubernetes/ingress/client.go index 0012465e5..e54887e75 100644 --- a/pkg/provider/kubernetes/ingress/client.go +++ b/pkg/provider/kubernetes/ingress/client.go @@ -59,8 +59,9 @@ type Client interface { } type clientWrapper struct { - clientset *kubernetes.Clientset - factories map[string]informers.SharedInformerFactory + clientset kubernetes.Interface + factoriesKube map[string]informers.SharedInformerFactory + factoriesSecret map[string]informers.SharedInformerFactory clusterFactory informers.SharedInformerFactory ingressLabelSelector labels.Selector isNamespaceAll bool @@ -123,10 +124,11 @@ func createClientFromConfig(c *rest.Config) (*clientWrapper, error) { return newClientImpl(clientset), nil } -func newClientImpl(clientset *kubernetes.Clientset) *clientWrapper { +func newClientImpl(clientset kubernetes.Interface) *clientWrapper { return &clientWrapper{ - clientset: clientset, - factories: make(map[string]informers.SharedInformerFactory), + clientset: clientset, + factoriesKube: make(map[string]informers.SharedInformerFactory), + factoriesSecret: make(map[string]informers.SharedInformerFactory), } } @@ -142,21 +144,36 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (< c.watchedNamespaces = namespaces - for _, ns := range namespaces { - factory := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns)) - factory.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler) - factory.Core().V1().Services().Informer().AddEventHandler(eventHandler) - factory.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler) - factory.Core().V1().Secrets().Informer().AddEventHandler(eventHandler) - c.factories[ns] = factory + notOwnedByHelm := func(opts *metav1.ListOptions) { + opts.LabelSelector = "owner!=helm" } for _, ns := range namespaces { - c.factories[ns].Start(stopCh) + factoryKube := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns)) + factoryKube.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler) + factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler) + factoryKube.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler) + + factorySecret := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(notOwnedByHelm)) + factorySecret.Core().V1().Secrets().Informer().AddEventHandler(eventHandler) + + c.factoriesKube[ns] = factoryKube + c.factoriesSecret[ns] = factorySecret } for _, ns := range namespaces { - for typ, ok := range c.factories[ns].WaitForCacheSync(stopCh) { + c.factoriesKube[ns].Start(stopCh) + c.factoriesSecret[ns].Start(stopCh) + } + + for _, ns := range namespaces { + for typ, ok := range c.factoriesKube[ns].WaitForCacheSync(stopCh) { + if !ok { + return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns) + } + } + + for typ, ok := range c.factoriesSecret[ns].WaitForCacheSync(stopCh) { if !ok { return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns) } @@ -188,7 +205,7 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (< func (c *clientWrapper) GetIngresses() []*networkingv1beta1.Ingress { var results []*networkingv1beta1.Ingress - for ns, factory := range c.factories { + for ns, factory := range c.factoriesKube { // extensions ings, err := factory.Extensions().V1beta1().Ingresses().Lister().List(c.ingressLabelSelector) if err != nil { @@ -239,7 +256,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1beta1.Ingress, ip, return c.updateIngressStatusOld(src, ip, hostname) } - ing, err := c.factories[c.lookupNamespace(src.Namespace)].Networking().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name) + ing, err := c.factoriesKube[c.lookupNamespace(src.Namespace)].Networking().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name) if err != nil { return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err) } @@ -268,7 +285,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1beta1.Ingress, ip, } func (c *clientWrapper) updateIngressStatusOld(src *networkingv1beta1.Ingress, ip, hostname string) error { - ing, err := c.factories[c.lookupNamespace(src.Namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name) + ing, err := c.factoriesKube[c.lookupNamespace(src.Namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name) if err != nil { return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err) } @@ -302,7 +319,7 @@ func (c *clientWrapper) GetService(namespace, name string) (*corev1.Service, boo return nil, false, fmt.Errorf("failed to get service %s/%s: namespace is not within watched namespaces", namespace, name) } - service, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name) + service, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name) exist, err := translateNotFoundError(err) return service, exist, err } @@ -313,7 +330,7 @@ func (c *clientWrapper) GetEndpoints(namespace, name string) (*corev1.Endpoints, return nil, false, fmt.Errorf("failed to get endpoints %s/%s: namespace is not within watched namespaces", namespace, name) } - endpoint, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Endpoints().Lister().Endpoints(namespace).Get(name) + endpoint, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Endpoints().Lister().Endpoints(namespace).Get(name) exist, err := translateNotFoundError(err) return endpoint, exist, err } @@ -324,7 +341,7 @@ func (c *clientWrapper) GetSecret(namespace, name string) (*corev1.Secret, bool, return nil, false, fmt.Errorf("failed to get secret %s/%s: namespace is not within watched namespaces", namespace, name) } - secret, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name) + secret, err := c.factoriesSecret[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name) exist, err := translateNotFoundError(err) return secret, exist, err } diff --git a/pkg/provider/kubernetes/ingress/client_test.go b/pkg/provider/kubernetes/ingress/client_test.go index f582502a7..20173200d 100644 --- a/pkg/provider/kubernetes/ingress/client_test.go +++ b/pkg/provider/kubernetes/ingress/client_test.go @@ -3,10 +3,15 @@ package ingress import ( "fmt" "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" kubeerror "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + kubefake "k8s.io/client-go/kubernetes/fake" ) func TestTranslateNotFoundError(t *testing.T) { @@ -47,3 +52,54 @@ func TestTranslateNotFoundError(t *testing.T) { }) } } + +func TestClientIgnoresHelmOwnedSecrets(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "secret", + }, + } + helmSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "helm-secret", + Labels: map[string]string{ + "owner": "helm", + }, + }, + } + + kubeClient := kubefake.NewSimpleClientset(helmSecret, secret) + + client := newClientImpl(kubeClient) + + stopCh := make(chan struct{}) + + eventCh, err := client.WatchAll(nil, stopCh) + require.NoError(t, err) + + select { + case event := <-eventCh: + secret, ok := event.(*corev1.Secret) + require.True(t, ok) + + assert.NotEqual(t, "helm-secret", secret.Name) + case <-time.After(50 * time.Millisecond): + assert.Fail(t, "expected to receive event for secret") + } + + select { + case <-eventCh: + assert.Fail(t, "received more than one event") + case <-time.After(50 * time.Millisecond): + } + + _, found, err := client.GetSecret("default", "secret") + require.NoError(t, err) + assert.True(t, found) + + _, found, err = client.GetSecret("default", "helm-secret") + require.NoError(t, err) + assert.False(t, found) +} From 08264749f0ea8b99e3a4b621bb589c3e15dbb612 Mon Sep 17 00:00:00 2001 From: Kevin Pollet Date: Thu, 19 Nov 2020 17:56:03 +0100 Subject: [PATCH 11/13] Update Yaegi to v0.9.7 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c6b59fd91..04adf5f48 100644 --- a/go.mod +++ b/go.mod @@ -73,7 +73,7 @@ require ( github.com/stvp/go-udp-testing v0.0.0-20191102171040-06b61409b154 github.com/tinylib/msgp v1.0.2 // indirect github.com/traefik/paerser v0.1.0 - github.com/traefik/yaegi v0.9.5 + github.com/traefik/yaegi v0.9.7 github.com/uber/jaeger-client-go v2.25.0+incompatible github.com/uber/jaeger-lib v2.2.0+incompatible github.com/unrolled/render v1.0.2 diff --git a/go.sum b/go.sum index 51ca98e92..d4e35bcf7 100644 --- a/go.sum +++ b/go.sum @@ -766,8 +766,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/traefik/paerser v0.1.0 h1:B4v1tbvd8YnHsA7spwHKEWJoGrRP+2jYpIozsCMHhl0= github.com/traefik/paerser v0.1.0/go.mod h1:yYnAgdEC2wJH5CgG75qGWC8SsFDEapg09o9RrA6FfrE= -github.com/traefik/yaegi v0.9.5 h1:mRJtmV6t/wecIq6Gfs0DpdSC2AjFpPRjoiBXP03OIz0= -github.com/traefik/yaegi v0.9.5/go.mod h1:FAYnRlZyuVlEkvnkHq3bvJ1lW5be6XuwgLdkYgYG6Lk= +github.com/traefik/yaegi v0.9.7 h1:CbeKjEhy3DoSC8xC4TQF2Mhmd7u3Cjqluz1//x6Vtcs= +github.com/traefik/yaegi v0.9.7/go.mod h1:FAYnRlZyuVlEkvnkHq3bvJ1lW5be6XuwgLdkYgYG6Lk= github.com/transip/gotransip/v6 v6.2.0 h1:0Z+qVsyeiQdWfcAUeJyF0IEKAPvhJwwpwPi2WGtBIiE= github.com/transip/gotransip/v6 v6.2.0/go.mod h1:pQZ36hWWRahCUXkFWlx9Hs711gLd8J4qdgLdRzmtY+g= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= From f83a57b3daea1013c7288c78a92fe0dc545fe112 Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Thu, 19 Nov 2020 18:31:09 +0100 Subject: [PATCH 12/13] Prepare release v2.3.3 --- CHANGELOG.md | 32 ++++++++++++++++++++++++++++++++ script/gcg/traefik-bugfix.toml | 6 +++--- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 088c82e28..35864f514 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,35 @@ +## [v2.3.3](https://github.com/traefik/traefik/tree/v2.3.3) (2020-11-19) +[All Commits](https://github.com/traefik/traefik/compare/v2.3.2...v2.3.3) + +**Bug fixes:** +- **[acme]** Update go-acme/lego to v4.1.0 ([#7526](https://github.com/traefik/traefik/pull/7526) by [ldez](https://github.com/ldez)) +- **[consulcatalog,ecs]** Fix missing allow-empty tag on ECS and Consul Catalog providers ([#7561](https://github.com/traefik/traefik/pull/7561) by [jspdown](https://github.com/jspdown)) +- **[consulcatalog]** consulcatalog to update before the first interval ([#7514](https://github.com/traefik/traefik/pull/7514) by [greut](https://github.com/greut)) +- **[consulcatalog]** Fix consul catalog panic when health and services are not in sync ([#7558](https://github.com/traefik/traefik/pull/7558) by [jspdown](https://github.com/jspdown)) +- **[ecs]** Translate configured server port into correct mapped host port ([#7480](https://github.com/traefik/traefik/pull/7480) by [alekitto](https://github.com/alekitto)) +- **[k8s,k8s/crd,k8s/ingress]** Filter out Helm secrets from informer caches ([#7562](https://github.com/traefik/traefik/pull/7562) by [jspdown](https://github.com/jspdown)) +- **[plugins]** Update Yaegi to v0.9.5 ([#7527](https://github.com/traefik/traefik/pull/7527) by [ldez](https://github.com/ldez)) +- **[plugins]** Update Yaegi to v0.9.7 ([#7569](https://github.com/traefik/traefik/pull/7569) by [kevinpollet](https://github.com/kevinpollet)) +- **[plugins]** Update Yaegi to v0.9.4 ([#7451](https://github.com/traefik/traefik/pull/7451) by [ldez](https://github.com/ldez)) +- **[tcp]** Ignore errors when setting keepalive period is not supported by the system ([#7410](https://github.com/traefik/traefik/pull/7410) by [tristan-weil](https://github.com/tristan-weil)) +- **[tcp]** Improve service name lookup on TCP routers ([#7370](https://github.com/traefik/traefik/pull/7370) by [ddtmachado](https://github.com/ddtmachado)) +- Improve anonymize configuration ([#7482](https://github.com/traefik/traefik/pull/7482) by [mmatur](https://github.com/mmatur)) + +**Documentation:** +- **[ecs]** Add ECS menu to dynamic config reference ([#7501](https://github.com/traefik/traefik/pull/7501) by [kevinpollet](https://github.com/kevinpollet)) +- **[k8s,k8s/ingress]** Fix ingress documentation ([#7424](https://github.com/traefik/traefik/pull/7424) by [rtribotte](https://github.com/rtribotte)) +- **[k8s]** fix documentation ([#7469](https://github.com/traefik/traefik/pull/7469) by [jbdoumenjou](https://github.com/jbdoumenjou)) +- **[k8s]** Fix grammar in kubernetes ingress controller documentation ([#7565](https://github.com/traefik/traefik/pull/7565) by [ivorscott](https://github.com/ivorscott)) +- **[logs]** Clarify time-based field units ([#7447](https://github.com/traefik/traefik/pull/7447) by [tomtastic](https://github.com/tomtastic)) +- **[middleware]** Forwardauth headers ([#7506](https://github.com/traefik/traefik/pull/7506) by [w4tsn](https://github.com/w4tsn)) +- **[provider]** fix typo in providers overview documentation ([#7441](https://github.com/traefik/traefik/pull/7441) by [pirey](https://github.com/pirey)) +- **[tls]** Fix docs for TLS ([#7541](https://github.com/traefik/traefik/pull/7541) by [james426759](https://github.com/james426759)) +- fix: exclude protected link from doc verify ([#7477](https://github.com/traefik/traefik/pull/7477) by [jbdoumenjou](https://github.com/jbdoumenjou)) +- Add missed tls config for yaml example ([#7450](https://github.com/traefik/traefik/pull/7450) by [andrew-demb](https://github.com/andrew-demb)) +- Resolve broken URLs causing make docs to fail ([#7444](https://github.com/traefik/traefik/pull/7444) by [tomtastic](https://github.com/tomtastic)) +- Fix Traefik Proxy product nav in docs ([#7523](https://github.com/traefik/traefik/pull/7523) by [PCM2](https://github.com/PCM2)) +- add links to contributors guide ([#7435](https://github.com/traefik/traefik/pull/7435) by [notsureifkevin](https://github.com/notsureifkevin)) + ## [v2.3.2](https://github.com/traefik/traefik/tree/v2.3.2) (2020-10-19) [All Commits](https://github.com/traefik/traefik/compare/v2.3.1...v2.3.2) diff --git a/script/gcg/traefik-bugfix.toml b/script/gcg/traefik-bugfix.toml index 88ed4b137..0a4e2f244 100644 --- a/script/gcg/traefik-bugfix.toml +++ b/script/gcg/traefik-bugfix.toml @@ -4,11 +4,11 @@ RepositoryName = "traefik" OutputType = "file" FileName = "traefik_changelog.md" -# example new bugfix v2.3.2 +# example new bugfix v2.3.3 CurrentRef = "v2.3" -PreviousRef = "v2.3.1" +PreviousRef = "v2.3.2" BaseBranch = "v2.3" -FutureCurrentRefName = "v2.3.2" +FutureCurrentRefName = "v2.3.3" ThresholdPreviousRef = 10 ThresholdCurrentRef = 10 From be0845af027d974ad2d5a41ab843b2e3c191376a Mon Sep 17 00:00:00 2001 From: Romain Date: Fri, 20 Nov 2020 00:18:04 +0100 Subject: [PATCH 13/13] Apply labelSelector as a TweakListOptions for Kubernetes informers --- docs/content/providers/kubernetes-crd.md | 14 ++- docs/content/providers/kubernetes-ingress.md | 10 +- integration/fixtures/k8s/03-ingress.yml | 2 + integration/fixtures/k8s/03-ingressroute.yml | 2 + integration/fixtures/k8s/03-tlsoption.yml | 2 + integration/fixtures/k8s/03-tlsstore.yml | 2 + .../k8s/06-ingressroute-traefikservices.yml | 2 + .../fixtures/k8s_crd_label_selector.toml | 19 ++++ .../fixtures/k8s_ingress_label_selector.toml | 16 +++ integration/k8s_test.go | 22 ++++ .../testdata/rawdata-crd-label-selector.json | 65 +++++++++++ .../rawdata-ingress-label-selector.json | 106 ++++++++++++++++++ pkg/provider/kubernetes/crd/client.go | 52 +++------ pkg/provider/kubernetes/crd/client_test.go | 4 +- pkg/provider/kubernetes/crd/kubernetes.go | 18 +-- pkg/provider/kubernetes/ingress/client.go | 61 +++++----- pkg/provider/kubernetes/ingress/kubernetes.go | 18 +-- 17 files changed, 317 insertions(+), 98 deletions(-) create mode 100644 integration/fixtures/k8s_crd_label_selector.toml create mode 100644 integration/fixtures/k8s_ingress_label_selector.toml create mode 100644 integration/testdata/rawdata-crd-label-selector.json create mode 100644 integration/testdata/rawdata-ingress-label-selector.json diff --git a/docs/content/providers/kubernetes-crd.md b/docs/content/providers/kubernetes-crd.md index 5f3f06086..41a6c5b89 100644 --- a/docs/content/providers/kubernetes-crd.md +++ b/docs/content/providers/kubernetes-crd.md @@ -177,26 +177,32 @@ _Optional,Default: empty (process all resources)_ ```toml tab="File (TOML)" [providers.kubernetesCRD] - labelselector = "A and not B" + labelselector = "app=traefik" # ... ``` ```yaml tab="File (YAML)" providers: kubernetesCRD: - labelselector: "A and not B" + labelselector: "app=traefik" # ... ``` ```bash tab="CLI" ---providers.kubernetescrd.labelselector="A and not B" +--providers.kubernetescrd.labelselector="app=traefik" ``` By default, Traefik processes all resource objects in the configured namespaces. -A label selector can be defined to filter on specific resource objects only. +A label selector can be defined to filter on specific resource objects only, +this will apply only on Traefik [Custom Resources](../routing/providers/kubernetes-crd.md#custom-resource-definition-crd) +and has no effect on Kubernetes `Secrets`, `Endpoints` and `Services`. See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details. +!!! warning + + As the LabelSelector is applied to all Traefik Custom Resources, they all must match the filter. + ### `ingressClass` _Optional, Default: empty_ diff --git a/docs/content/providers/kubernetes-ingress.md b/docs/content/providers/kubernetes-ingress.md index c2e1d5346..3e165389e 100644 --- a/docs/content/providers/kubernetes-ingress.md +++ b/docs/content/providers/kubernetes-ingress.md @@ -212,23 +212,23 @@ _Optional,Default: empty (process all Ingresses)_ ```toml tab="File (TOML)" [providers.kubernetesIngress] - labelSelector = "A and not B" + labelSelector = "app=traefik" # ... ``` ```yaml tab="File (YAML)" providers: kubernetesIngress: - labelselector: "A and not B" + labelselector: "app=traefik" # ... ``` ```bash tab="CLI" ---providers.kubernetesingress.labelselector="A and not B" +--providers.kubernetesingress.labelselector="app=traefik" ``` -By default, Traefik processes all Ingress objects in the configured namespaces. -A label selector can be defined to filter on specific Ingress objects only. +By default, Traefik processes all `Ingress` objects in the configured namespaces. +A label selector can be defined to filter on specific `Ingress` objects only. See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details. diff --git a/integration/fixtures/k8s/03-ingress.yml b/integration/fixtures/k8s/03-ingress.yml index 077572b81..22dde97cc 100644 --- a/integration/fixtures/k8s/03-ingress.yml +++ b/integration/fixtures/k8s/03-ingress.yml @@ -3,6 +3,8 @@ kind: Ingress metadata: name: test.ingress namespace: default + labels: + app: traefik spec: rules: diff --git a/integration/fixtures/k8s/03-ingressroute.yml b/integration/fixtures/k8s/03-ingressroute.yml index 4c48254a0..c040988b5 100644 --- a/integration/fixtures/k8s/03-ingressroute.yml +++ b/integration/fixtures/k8s/03-ingressroute.yml @@ -3,6 +3,8 @@ kind: IngressRoute metadata: name: test.route namespace: default + labels: + app: traefik spec: entryPoints: diff --git a/integration/fixtures/k8s/03-tlsoption.yml b/integration/fixtures/k8s/03-tlsoption.yml index 5c00a3973..7c5df10fc 100644 --- a/integration/fixtures/k8s/03-tlsoption.yml +++ b/integration/fixtures/k8s/03-tlsoption.yml @@ -3,6 +3,8 @@ kind: TLSOption metadata: name: mytlsoption namespace: default + labels: + app: traefik spec: minVersion: VersionTLS12 diff --git a/integration/fixtures/k8s/03-tlsstore.yml b/integration/fixtures/k8s/03-tlsstore.yml index 4ff03107d..5dea5d0fd 100644 --- a/integration/fixtures/k8s/03-tlsstore.yml +++ b/integration/fixtures/k8s/03-tlsstore.yml @@ -3,6 +3,8 @@ kind: TLSStore metadata: name: mytlsstore namespace: default + labels: + app: traefik spec: defaultCertificate: diff --git a/integration/fixtures/k8s/06-ingressroute-traefikservices.yml b/integration/fixtures/k8s/06-ingressroute-traefikservices.yml index a8cee1bee..3e096d1a7 100644 --- a/integration/fixtures/k8s/06-ingressroute-traefikservices.yml +++ b/integration/fixtures/k8s/06-ingressroute-traefikservices.yml @@ -50,6 +50,8 @@ kind: IngressRoute metadata: name: api.route namespace: default + labels: + app: traefik spec: entryPoints: diff --git a/integration/fixtures/k8s_crd_label_selector.toml b/integration/fixtures/k8s_crd_label_selector.toml new file mode 100644 index 000000000..ca7e51cbd --- /dev/null +++ b/integration/fixtures/k8s_crd_label_selector.toml @@ -0,0 +1,19 @@ +[global] + checkNewVersion = false + sendAnonymousUsage = false + +[log] + level = "DEBUG" + +[api] + +[entryPoints] + [entryPoints.footcp] + address = ":8093" + [entryPoints.fooudp] + address = ":8090/udp" + [entryPoints.web] + address = ":8000" + +[providers.kubernetesCRD] + labelSelector = "app=traefik" diff --git a/integration/fixtures/k8s_ingress_label_selector.toml b/integration/fixtures/k8s_ingress_label_selector.toml new file mode 100644 index 000000000..86f411e98 --- /dev/null +++ b/integration/fixtures/k8s_ingress_label_selector.toml @@ -0,0 +1,16 @@ +[global] + checkNewVersion = false + sendAnonymousUsage = false + +[log] + level = "DEBUG" + +[api] + insecure = true + +[entryPoints] + [entryPoints.web] + address = ":8000" + +[providers.kubernetesIngress] + labelSelector = "app=traefik" diff --git a/integration/k8s_test.go b/integration/k8s_test.go index c06d83a0f..60bd25b36 100644 --- a/integration/k8s_test.go +++ b/integration/k8s_test.go @@ -74,6 +74,17 @@ func (s *K8sSuite) TestIngressConfiguration(c *check.C) { testConfiguration(c, "testdata/rawdata-ingress.json", "8080") } +func (s *K8sSuite) TestIngressLabelSelector(c *check.C) { + cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_ingress_label_selector.toml")) + defer display(c) + + err := cmd.Start() + c.Assert(err, checker.IsNil) + defer s.killCmd(cmd) + + testConfiguration(c, "testdata/rawdata-ingress-label-selector.json", "8080") +} + func (s *K8sSuite) TestCRDConfiguration(c *check.C) { cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_crd.toml")) defer display(c) @@ -85,6 +96,17 @@ func (s *K8sSuite) TestCRDConfiguration(c *check.C) { testConfiguration(c, "testdata/rawdata-crd.json", "8000") } +func (s *K8sSuite) TestCRDLabelSelector(c *check.C) { + cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_crd_label_selector.toml")) + defer display(c) + + err := cmd.Start() + c.Assert(err, checker.IsNil) + defer s.killCmd(cmd) + + testConfiguration(c, "testdata/rawdata-crd-label-selector.json", "8000") +} + func testConfiguration(c *check.C, path, apiPort string) { err := try.GetRequest("http://127.0.0.1:"+apiPort+"/api/entrypoints", 20*time.Second, try.BodyContains(`"name":"web"`)) c.Assert(err, checker.IsNil) diff --git a/integration/testdata/rawdata-crd-label-selector.json b/integration/testdata/rawdata-crd-label-selector.json new file mode 100644 index 000000000..d75df29a0 --- /dev/null +++ b/integration/testdata/rawdata-crd-label-selector.json @@ -0,0 +1,65 @@ +{ + "routers": { + "default-api-route-29f28a463fb5d5ba16d2@kubernetescrd": { + "entryPoints": [ + "web" + ], + "service": "api@internal", + "rule": "PathPrefix(`/api`)", + "status": "enabled", + "using": [ + "web" + ] + }, + "default-test-route-6b204d94623b3df4370c@kubernetescrd": { + "entryPoints": [ + "web" + ], + "service": "default-test-route-6b204d94623b3df4370c", + "rule": "Host(`foo.com`) \u0026\u0026 PathPrefix(`/bar`)", + "priority": 12, + "tls": { + "options": "default-mytlsoption" + }, + "status": "enabled", + "using": [ + "web" + ] + } + }, + "services": { + "api@internal": { + "status": "enabled", + "usedBy": [ + "default-api-route-29f28a463fb5d5ba16d2@kubernetescrd" + ] + }, + "dashboard@internal": { + "status": "enabled" + }, + "default-test-route-6b204d94623b3df4370c@kubernetescrd": { + "loadBalancer": { + "servers": [ + { + "url": "http://10.42.0.3:80" + }, + { + "url": "http://10.42.0.4:80" + } + ], + "passHostHeader": true + }, + "status": "enabled", + "usedBy": [ + "default-test-route-6b204d94623b3df4370c@kubernetescrd" + ], + "serverStatus": { + "http://10.42.0.3:80": "UP", + "http://10.42.0.4:80": "UP" + } + }, + "noop@internal": { + "status": "enabled" + } + } +} \ No newline at end of file diff --git a/integration/testdata/rawdata-ingress-label-selector.json b/integration/testdata/rawdata-ingress-label-selector.json new file mode 100644 index 000000000..66d3b095e --- /dev/null +++ b/integration/testdata/rawdata-ingress-label-selector.json @@ -0,0 +1,106 @@ +{ + "routers": { + "api@internal": { + "entryPoints": [ + "traefik" + ], + "service": "api@internal", + "rule": "PathPrefix(`/api`)", + "priority": 2147483646, + "status": "enabled", + "using": [ + "traefik" + ] + }, + "dashboard@internal": { + "entryPoints": [ + "traefik" + ], + "middlewares": [ + "dashboard_redirect@internal", + "dashboard_stripprefix@internal" + ], + "service": "dashboard@internal", + "rule": "PathPrefix(`/`)", + "priority": 2147483645, + "status": "enabled", + "using": [ + "traefik" + ] + }, + "test-ingress-default-whoami-test-whoami@kubernetes": { + "entryPoints": [ + "web" + ], + "service": "default-whoami-http", + "rule": "Host(`whoami.test`) \u0026\u0026 PathPrefix(`/whoami`)", + "status": "enabled", + "using": [ + "web" + ] + } + }, + "middlewares": { + "dashboard_redirect@internal": { + "redirectRegex": { + "regex": "^(http:\\/\\/(\\[[\\w:.]+\\]|[\\w\\._-]+)(:\\d+)?)\\/$", + "replacement": "${1}/dashboard/", + "permanent": true + }, + "status": "enabled", + "usedBy": [ + "dashboard@internal" + ] + }, + "dashboard_stripprefix@internal": { + "stripPrefix": { + "prefixes": [ + "/dashboard/", + "/dashboard" + ] + }, + "status": "enabled", + "usedBy": [ + "dashboard@internal" + ] + } + }, + "services": { + "api@internal": { + "status": "enabled", + "usedBy": [ + "api@internal" + ] + }, + "dashboard@internal": { + "status": "enabled", + "usedBy": [ + "dashboard@internal" + ] + }, + "default-whoami-http@kubernetes": { + "loadBalancer": { + "servers": [ + { + "url": "http://10.42.0.2:80" + }, + { + "url": "http://10.42.0.7:80" + } + ], + "passHostHeader": true + }, + "status": "enabled", + "usedBy": [ + "test-ingress-default-whoami-test-whoami@kubernetes" + ], + "serverStatus": { + "http://10.42.0.2:80": "UP", + "http://10.42.0.7:80": "UP" + } + }, + "noop@internal": { + "status": "enabled" + } + } +} \ No newline at end of file diff --git a/pkg/provider/kubernetes/crd/client.go b/pkg/provider/kubernetes/crd/client.go index 26f045d54..09bfc2a66 100644 --- a/pkg/provider/kubernetes/crd/client.go +++ b/pkg/provider/kubernetes/crd/client.go @@ -17,7 +17,6 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" ) @@ -68,7 +67,7 @@ type clientWrapper struct { factoriesKube map[string]informers.SharedInformerFactory factoriesSecret map[string]informers.SharedInformerFactory - labelSelector labels.Selector + labelSelector string isNamespaceAll bool watchedNamespaces []string @@ -149,20 +148,25 @@ func newExternalClusterClient(endpoint, token, caFilePath string) (*clientWrappe // WatchAll starts namespace-specific controllers for all relevant kinds. func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<-chan interface{}, error) { eventCh := make(chan interface{}, 1) - eventHandler := c.newResourceEventHandler(eventCh) + eventHandler := &resourceEventHandler{ev: eventCh} if len(namespaces) == 0 { namespaces = []string{metav1.NamespaceAll} c.isNamespaceAll = true } + c.watchedNamespaces = namespaces notOwnedByHelm := func(opts *metav1.ListOptions) { opts.LabelSelector = "owner!=helm" } + matchesLabelSelector := func(opts *metav1.ListOptions) { + opts.LabelSelector = c.labelSelector + } + for _, ns := range namespaces { - factoryCrd := externalversions.NewSharedInformerFactoryWithOptions(c.csCrd, resyncPeriod, externalversions.WithNamespace(ns)) + factoryCrd := externalversions.NewSharedInformerFactoryWithOptions(c.csCrd, resyncPeriod, externalversions.WithNamespace(ns), externalversions.WithTweakListOptions(matchesLabelSelector)) factoryCrd.Traefik().V1alpha1().IngressRoutes().Informer().AddEventHandler(eventHandler) factoryCrd.Traefik().V1alpha1().Middlewares().Informer().AddEventHandler(eventHandler) factoryCrd.Traefik().V1alpha1().IngressRouteTCPs().Informer().AddEventHandler(eventHandler) @@ -172,7 +176,6 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (< factoryCrd.Traefik().V1alpha1().TraefikServices().Informer().AddEventHandler(eventHandler) factoryKube := informers.NewSharedInformerFactoryWithOptions(c.csKube, resyncPeriod, informers.WithNamespace(ns)) - factoryKube.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler) factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler) factoryKube.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler) @@ -217,7 +220,7 @@ func (c *clientWrapper) GetIngressRoutes() []*v1alpha1.IngressRoute { var result []*v1alpha1.IngressRoute for ns, factory := range c.factoriesCrd { - ings, err := factory.Traefik().V1alpha1().IngressRoutes().Lister().List(c.labelSelector) + ings, err := factory.Traefik().V1alpha1().IngressRoutes().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list ingress routes in namespace %s: %v", ns, err) } @@ -231,7 +234,7 @@ func (c *clientWrapper) GetIngressRouteTCPs() []*v1alpha1.IngressRouteTCP { var result []*v1alpha1.IngressRouteTCP for ns, factory := range c.factoriesCrd { - ings, err := factory.Traefik().V1alpha1().IngressRouteTCPs().Lister().List(c.labelSelector) + ings, err := factory.Traefik().V1alpha1().IngressRouteTCPs().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list tcp ingress routes in namespace %s: %v", ns, err) } @@ -245,7 +248,7 @@ func (c *clientWrapper) GetIngressRouteUDPs() []*v1alpha1.IngressRouteUDP { var result []*v1alpha1.IngressRouteUDP for ns, factory := range c.factoriesCrd { - ings, err := factory.Traefik().V1alpha1().IngressRouteUDPs().Lister().List(c.labelSelector) + ings, err := factory.Traefik().V1alpha1().IngressRouteUDPs().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list udp ingress routes in namespace %s: %v", ns, err) } @@ -259,7 +262,7 @@ func (c *clientWrapper) GetMiddlewares() []*v1alpha1.Middleware { var result []*v1alpha1.Middleware for ns, factory := range c.factoriesCrd { - middlewares, err := factory.Traefik().V1alpha1().Middlewares().Lister().List(c.labelSelector) + middlewares, err := factory.Traefik().V1alpha1().Middlewares().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list middlewares in namespace %s: %v", ns, err) } @@ -285,7 +288,7 @@ func (c *clientWrapper) GetTraefikServices() []*v1alpha1.TraefikService { var result []*v1alpha1.TraefikService for ns, factory := range c.factoriesCrd { - ings, err := factory.Traefik().V1alpha1().TraefikServices().Lister().List(c.labelSelector) + ings, err := factory.Traefik().V1alpha1().TraefikServices().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list Traefik services in namespace %s: %v", ns, err) } @@ -300,7 +303,7 @@ func (c *clientWrapper) GetTLSOptions() []*v1alpha1.TLSOption { var result []*v1alpha1.TLSOption for ns, factory := range c.factoriesCrd { - options, err := factory.Traefik().V1alpha1().TLSOptions().Lister().List(c.labelSelector) + options, err := factory.Traefik().V1alpha1().TLSOptions().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list tls options in namespace %s: %v", ns, err) } @@ -315,7 +318,7 @@ func (c *clientWrapper) GetTLSStores() []*v1alpha1.TLSStore { var result []*v1alpha1.TLSStore for ns, factory := range c.factoriesCrd { - stores, err := factory.Traefik().V1alpha1().TLSStores().Lister().List(c.labelSelector) + stores, err := factory.Traefik().V1alpha1().TLSStores().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list tls stores in namespace %s: %v", ns, err) } @@ -371,31 +374,6 @@ func (c *clientWrapper) lookupNamespace(ns string) string { return ns } -func (c *clientWrapper) newResourceEventHandler(events chan<- interface{}) cache.ResourceEventHandler { - return &cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - // Ignore Ingresses that do not match our custom label selector. - switch v := obj.(type) { - case *v1alpha1.IngressRoute: - return c.labelSelector.Matches(labels.Set(v.GetLabels())) - case *v1alpha1.IngressRouteTCP: - return c.labelSelector.Matches(labels.Set(v.GetLabels())) - case *v1alpha1.TraefikService: - return c.labelSelector.Matches(labels.Set(v.GetLabels())) - case *v1alpha1.TLSOption: - return c.labelSelector.Matches(labels.Set(v.GetLabels())) - case *v1alpha1.TLSStore: - return c.labelSelector.Matches(labels.Set(v.GetLabels())) - case *v1alpha1.Middleware: - return c.labelSelector.Matches(labels.Set(v.GetLabels())) - default: - return true - } - }, - Handler: &resourceEventHandler{ev: events}, - } -} - // eventHandlerFunc will pass the obj on to the events channel or drop it. // This is so passing the events along won't block in the case of high volume. // The events are only used for signaling anyway so dropping a few is ok. diff --git a/pkg/provider/kubernetes/crd/client_test.go b/pkg/provider/kubernetes/crd/client_test.go index a4113b56f..42aa574ee 100644 --- a/pkg/provider/kubernetes/crd/client_test.go +++ b/pkg/provider/kubernetes/crd/client_test.go @@ -34,7 +34,9 @@ func TestClientIgnoresHelmOwnedSecrets(t *testing.T) { client := newClientImpl(kubeClient, crdClient) - eventCh, err := client.WatchAll(nil, nil) + stopCh := make(chan struct{}) + + eventCh, err := client.WatchAll(nil, stopCh) require.NoError(t, err) select { diff --git a/pkg/provider/kubernetes/crd/kubernetes.go b/pkg/provider/kubernetes/crd/kubernetes.go index d5b978a73..3d3678a83 100644 --- a/pkg/provider/kubernetes/crd/kubernetes.go +++ b/pkg/provider/kubernetes/crd/kubernetes.go @@ -49,12 +49,12 @@ type Provider struct { lastConfiguration safe.Safe } -func (p *Provider) newK8sClient(ctx context.Context, labelSelector string) (*clientWrapper, error) { - labelSel, err := labels.Parse(labelSelector) +func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) { + _, err := labels.Parse(p.LabelSelector) if err != nil { - return nil, fmt.Errorf("invalid label selector: %q", labelSelector) + return nil, fmt.Errorf("invalid label selector: %q", p.LabelSelector) } - log.FromContext(ctx).Infof("label selector is: %q", labelSel) + log.FromContext(ctx).Infof("label selector is: %q", p.LabelSelector) withEndpoint := "" if p.Endpoint != "" { @@ -74,11 +74,12 @@ func (p *Provider) newK8sClient(ctx context.Context, labelSelector string) (*cli client, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath) } - if err == nil { - client.labelSelector = labelSel + if err != nil { + return nil, err } - return client, err + client.labelSelector = p.LabelSelector + return client, nil } // Init the provider. @@ -92,8 +93,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe. ctxLog := log.With(context.Background(), log.Str(log.ProviderName, providerName)) logger := log.FromContext(ctxLog) - logger.Debugf("Using label selector: %q", p.LabelSelector) - k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector) + k8sClient, err := p.newK8sClient(ctxLog) if err != nil { return err } diff --git a/pkg/provider/kubernetes/ingress/client.go b/pkg/provider/kubernetes/ingress/client.go index e54887e75..c40dd9a62 100644 --- a/pkg/provider/kubernetes/ingress/client.go +++ b/pkg/provider/kubernetes/ingress/client.go @@ -19,7 +19,6 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" ) @@ -62,8 +61,9 @@ type clientWrapper struct { clientset kubernetes.Interface factoriesKube map[string]informers.SharedInformerFactory factoriesSecret map[string]informers.SharedInformerFactory + factoriesIngress map[string]informers.SharedInformerFactory clusterFactory informers.SharedInformerFactory - ingressLabelSelector labels.Selector + ingressLabelSelector string isNamespaceAll bool watchedNamespaces []string } @@ -126,16 +126,17 @@ func createClientFromConfig(c *rest.Config) (*clientWrapper, error) { func newClientImpl(clientset kubernetes.Interface) *clientWrapper { return &clientWrapper{ - clientset: clientset, - factoriesKube: make(map[string]informers.SharedInformerFactory), - factoriesSecret: make(map[string]informers.SharedInformerFactory), + clientset: clientset, + factoriesSecret: make(map[string]informers.SharedInformerFactory), + factoriesIngress: make(map[string]informers.SharedInformerFactory), + factoriesKube: make(map[string]informers.SharedInformerFactory), } } // WatchAll starts namespace-specific controllers for all relevant kinds. func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<-chan interface{}, error) { eventCh := make(chan interface{}, 1) - eventHandler := c.newResourceEventHandler(eventCh) + eventHandler := &resourceEventHandler{eventCh} if len(namespaces) == 0 { namespaces = []string{metav1.NamespaceAll} @@ -148,25 +149,38 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (< opts.LabelSelector = "owner!=helm" } + matchesLabelSelector := func(opts *metav1.ListOptions) { + opts.LabelSelector = c.ingressLabelSelector + } + for _, ns := range namespaces { + factoryIngress := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(matchesLabelSelector)) + factoryIngress.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler) + c.factoriesIngress[ns] = factoryIngress + factoryKube := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns)) - factoryKube.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler) factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler) factoryKube.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler) + c.factoriesKube[ns] = factoryKube factorySecret := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(notOwnedByHelm)) factorySecret.Core().V1().Secrets().Informer().AddEventHandler(eventHandler) - - c.factoriesKube[ns] = factoryKube c.factoriesSecret[ns] = factorySecret } for _, ns := range namespaces { + c.factoriesIngress[ns].Start(stopCh) c.factoriesKube[ns].Start(stopCh) c.factoriesSecret[ns].Start(stopCh) } for _, ns := range namespaces { + for typ, ok := range c.factoriesIngress[ns].WaitForCacheSync(stopCh) { + if !ok { + return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns) + } + } + for typ, ok := range c.factoriesKube[ns].WaitForCacheSync(stopCh) { if !ok { return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns) @@ -205,9 +219,9 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (< func (c *clientWrapper) GetIngresses() []*networkingv1beta1.Ingress { var results []*networkingv1beta1.Ingress - for ns, factory := range c.factoriesKube { + for ns, factory := range c.factoriesIngress { // extensions - ings, err := factory.Extensions().V1beta1().Ingresses().Lister().List(c.ingressLabelSelector) + ings, err := factory.Extensions().V1beta1().Ingresses().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list ingresses in namespace %s: %v", ns, err) } @@ -222,7 +236,7 @@ func (c *clientWrapper) GetIngresses() []*networkingv1beta1.Ingress { } // networking - list, err := factory.Networking().V1beta1().Ingresses().Lister().List(c.ingressLabelSelector) + list, err := factory.Networking().V1beta1().Ingresses().Lister().List(labels.Everything()) if err != nil { log.Errorf("Failed to list ingresses in namespace %s: %v", ns, err) } @@ -256,7 +270,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1beta1.Ingress, ip, return c.updateIngressStatusOld(src, ip, hostname) } - ing, err := c.factoriesKube[c.lookupNamespace(src.Namespace)].Networking().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name) + ing, err := c.factoriesIngress[c.lookupNamespace(src.Namespace)].Networking().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name) if err != nil { return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err) } @@ -285,7 +299,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1beta1.Ingress, ip, } func (c *clientWrapper) updateIngressStatusOld(src *networkingv1beta1.Ingress, ip, hostname string) error { - ing, err := c.factoriesKube[c.lookupNamespace(src.Namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name) + ing, err := c.factoriesIngress[c.lookupNamespace(src.Namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name) if err != nil { return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err) } @@ -378,25 +392,6 @@ func (c *clientWrapper) lookupNamespace(ns string) string { return ns } -func (c *clientWrapper) newResourceEventHandler(events chan<- interface{}) cache.ResourceEventHandler { - return &cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - // Ignore Ingresses that do not match our custom label selector. - switch v := obj.(type) { - case *extensionsv1beta1.Ingress: - lbls := labels.Set(v.GetLabels()) - return c.ingressLabelSelector.Matches(lbls) - case *networkingv1beta1.Ingress: - lbls := labels.Set(v.GetLabels()) - return c.ingressLabelSelector.Matches(lbls) - default: - return true - } - }, - Handler: &resourceEventHandler{ev: events}, - } -} - // GetServerVersion returns the cluster server version, or an error. func (c *clientWrapper) GetServerVersion() (*version.Version, error) { serverVersion, err := c.clientset.Discovery().ServerVersion() diff --git a/pkg/provider/kubernetes/ingress/kubernetes.go b/pkg/provider/kubernetes/ingress/kubernetes.go index 5173bbd8b..f5944edb1 100644 --- a/pkg/provider/kubernetes/ingress/kubernetes.go +++ b/pkg/provider/kubernetes/ingress/kubernetes.go @@ -53,15 +53,15 @@ type EndpointIngress struct { PublishedService string `description:"Published Kubernetes Service to copy status from." json:"publishedService,omitempty" toml:"publishedService,omitempty" yaml:"publishedService,omitempty"` } -func (p *Provider) newK8sClient(ctx context.Context, ingressLabelSelector string) (*clientWrapper, error) { - ingLabelSel, err := labels.Parse(ingressLabelSelector) +func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) { + _, err := labels.Parse(p.LabelSelector) if err != nil { - return nil, fmt.Errorf("invalid ingress label selector: %q", ingressLabelSelector) + return nil, fmt.Errorf("invalid ingress label selector: %q", p.LabelSelector) } logger := log.FromContext(ctx) - logger.Infof("ingress label selector is: %q", ingLabelSel) + logger.Infof("ingress label selector is: %q", p.LabelSelector) withEndpoint := "" if p.Endpoint != "" { @@ -81,11 +81,12 @@ func (p *Provider) newK8sClient(ctx context.Context, ingressLabelSelector string cl, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath) } - if err == nil { - cl.ingressLabelSelector = ingLabelSel + if err != nil { + return nil, err } - return cl, err + cl.ingressLabelSelector = p.LabelSelector + return cl, nil } // Init the provider. @@ -99,8 +100,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe. ctxLog := log.With(context.Background(), log.Str(log.ProviderName, "kubernetes")) logger := log.FromContext(ctxLog) - logger.Debugf("Using Ingress label selector: %q", p.LabelSelector) - k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector) + k8sClient, err := p.newK8sClient(ctxLog) if err != nil { return err }